osism 0.20250602.0__py3-none-any.whl → 0.20250605.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- osism/commands/baremetal.py +36 -30
- osism/commands/vault.py +9 -1
- osism/core/enums.py +1 -0
- osism/tasks/conductor/__init__.py +54 -0
- osism/tasks/conductor/config.py +92 -0
- osism/tasks/conductor/ironic.py +323 -0
- osism/tasks/conductor/netbox.py +50 -0
- osism/tasks/conductor/utils.py +79 -0
- osism/tasks/conductor.py +13 -470
- {osism-0.20250602.0.dist-info → osism-0.20250605.0.dist-info}/METADATA +2 -2
- {osism-0.20250602.0.dist-info → osism-0.20250605.0.dist-info}/RECORD +17 -12
- osism-0.20250605.0.dist-info/pbr.json +1 -0
- osism-0.20250602.0.dist-info/pbr.json +0 -1
- {osism-0.20250602.0.dist-info → osism-0.20250605.0.dist-info}/WHEEL +0 -0
- {osism-0.20250602.0.dist-info → osism-0.20250605.0.dist-info}/entry_points.txt +0 -0
- {osism-0.20250602.0.dist-info → osism-0.20250605.0.dist-info}/licenses/AUTHORS +0 -0
- {osism-0.20250602.0.dist-info → osism-0.20250605.0.dist-info}/licenses/LICENSE +0 -0
- {osism-0.20250602.0.dist-info → osism-0.20250605.0.dist-info}/top_level.txt +0 -0
osism/commands/baremetal.py
CHANGED
@@ -70,19 +70,18 @@ class BaremetalDeploy(Command):
|
|
70
70
|
def get_parser(self, prog_name):
|
71
71
|
parser = super(BaremetalDeploy, self).get_parser(prog_name)
|
72
72
|
|
73
|
-
|
74
|
-
|
73
|
+
parser.add_argument(
|
74
|
+
"name",
|
75
|
+
nargs="?",
|
76
|
+
type=str,
|
77
|
+
help="Deploy given baremetal node when in provision state available",
|
78
|
+
)
|
79
|
+
parser.add_argument(
|
75
80
|
"--all",
|
76
81
|
default=False,
|
77
82
|
help="Deploy all baremetal nodes in provision state available",
|
78
83
|
action="store_true",
|
79
84
|
)
|
80
|
-
parser_exc_group.add_argument(
|
81
|
-
"--name",
|
82
|
-
default=[],
|
83
|
-
help="Deploy given baremetal node when in provision state available. May be specified multiple times",
|
84
|
-
action="append",
|
85
|
-
)
|
86
85
|
parser.add_argument(
|
87
86
|
"--rebuild",
|
88
87
|
default=False,
|
@@ -99,10 +98,14 @@ class BaremetalDeploy(Command):
|
|
99
98
|
|
100
99
|
def take_action(self, parsed_args):
|
101
100
|
all_nodes = parsed_args.all
|
102
|
-
|
101
|
+
name = parsed_args.name
|
103
102
|
rebuild = parsed_args.rebuild
|
104
103
|
yes_i_really_really_mean_it = parsed_args.yes_i_really_really_mean_it
|
105
104
|
|
105
|
+
if not all_nodes and not name:
|
106
|
+
logger.error("Please specify a node name or use --all")
|
107
|
+
return
|
108
|
+
|
106
109
|
if all_nodes and rebuild and not yes_i_really_really_mean_it:
|
107
110
|
logger.error(
|
108
111
|
"Please confirm that you wish to rebuild all nodes by specifying '--yes-i-really-really-mean-it'"
|
@@ -114,14 +117,14 @@ class BaremetalDeploy(Command):
|
|
114
117
|
if all_nodes:
|
115
118
|
deploy_nodes = list(conn.baremetal.nodes(details=True))
|
116
119
|
else:
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
120
|
+
node = conn.baremetal.find_node(name, ignore_missing=True, details=True)
|
121
|
+
if not node:
|
122
|
+
logger.warning(f"Could not find node {name}")
|
123
|
+
return
|
124
|
+
deploy_nodes = [node]
|
121
125
|
|
122
|
-
for
|
126
|
+
for node in deploy_nodes:
|
123
127
|
if not node:
|
124
|
-
logger.warning(f"Could not find node {names[node_idx]}")
|
125
128
|
continue
|
126
129
|
|
127
130
|
if node.provision_state in ["available", "deploy failed"]:
|
@@ -176,19 +179,18 @@ class BaremetalUndeploy(Command):
|
|
176
179
|
def get_parser(self, prog_name):
|
177
180
|
parser = super(BaremetalUndeploy, self).get_parser(prog_name)
|
178
181
|
|
179
|
-
|
180
|
-
|
182
|
+
parser.add_argument(
|
183
|
+
"name",
|
184
|
+
nargs="?",
|
185
|
+
type=str,
|
186
|
+
help="Undeploy given baremetal node",
|
187
|
+
)
|
188
|
+
parser.add_argument(
|
181
189
|
"--all",
|
182
190
|
default=False,
|
183
191
|
help="Undeploy all baremetal nodes",
|
184
192
|
action="store_true",
|
185
193
|
)
|
186
|
-
parser_exc_group.add_argument(
|
187
|
-
"--name",
|
188
|
-
default=[],
|
189
|
-
help="Undeploy given baremetal node. May be specified multiple times",
|
190
|
-
action="append",
|
191
|
-
)
|
192
194
|
parser.add_argument(
|
193
195
|
"--yes-i-really-really-mean-it",
|
194
196
|
default=False,
|
@@ -199,9 +201,13 @@ class BaremetalUndeploy(Command):
|
|
199
201
|
|
200
202
|
def take_action(self, parsed_args):
|
201
203
|
all_nodes = parsed_args.all
|
202
|
-
|
204
|
+
name = parsed_args.name
|
203
205
|
yes_i_really_really_mean_it = parsed_args.yes_i_really_really_mean_it
|
204
206
|
|
207
|
+
if not all_nodes and not name:
|
208
|
+
logger.error("Please specify a node name or use --all")
|
209
|
+
return
|
210
|
+
|
205
211
|
if all_nodes and not yes_i_really_really_mean_it:
|
206
212
|
logger.error(
|
207
213
|
"Please confirm that you wish to undeploy all nodes by specifying '--yes-i-really-really-mean-it'"
|
@@ -213,14 +219,14 @@ class BaremetalUndeploy(Command):
|
|
213
219
|
if all_nodes:
|
214
220
|
deploy_nodes = list(conn.baremetal.nodes())
|
215
221
|
else:
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
222
|
+
node = conn.baremetal.find_node(name, ignore_missing=True, details=False)
|
223
|
+
if not node:
|
224
|
+
logger.warning(f"Could not find node {name}")
|
225
|
+
return
|
226
|
+
deploy_nodes = [node]
|
220
227
|
|
221
|
-
for
|
228
|
+
for node in deploy_nodes:
|
222
229
|
if not node:
|
223
|
-
logger.warning(f"Could not find node {names[node_idx]}")
|
224
230
|
continue
|
225
231
|
|
226
232
|
if node.provision_state in ["active", "deploy failed", "error"]:
|
osism/commands/vault.py
CHANGED
@@ -5,6 +5,7 @@
|
|
5
5
|
|
6
6
|
import os
|
7
7
|
import subprocess
|
8
|
+
import sys
|
8
9
|
|
9
10
|
from cliff.command import Command
|
10
11
|
from cryptography.fernet import Fernet
|
@@ -31,7 +32,14 @@ class SetPassword(Command):
|
|
31
32
|
|
32
33
|
f = Fernet(key)
|
33
34
|
|
34
|
-
|
35
|
+
# Check if password is being piped from STDIN
|
36
|
+
if not sys.stdin.isatty():
|
37
|
+
ansible_vault_password = sys.stdin.read().strip()
|
38
|
+
else:
|
39
|
+
ansible_vault_password = prompt(
|
40
|
+
"Ansible Vault password: ", is_password=True
|
41
|
+
)
|
42
|
+
|
35
43
|
redis.set(
|
36
44
|
"ansible_vault_password", f.encrypt(ansible_vault_password.encode("utf-8"))
|
37
45
|
)
|
osism/core/enums.py
CHANGED
@@ -102,6 +102,7 @@ VALIDATE_PLAYBOOKS = {
|
|
102
102
|
"ceph-osds": {"environment": "ceph", "runtime": "osism-ansible"},
|
103
103
|
"container-status": {"environment": "generic", "runtime": "osism-ansible"},
|
104
104
|
"kernel-version": {"environment": "generic", "runtime": "osism-ansible"},
|
105
|
+
"docker-version": {"environment": "generic", "runtime": "osism-ansible"},
|
105
106
|
"kolla-connectivity": {"environment": "kolla", "runtime": "osism-ansible"},
|
106
107
|
"mysql-open-files-limit": {"environment": "generic", "runtime": "osism-ansible"},
|
107
108
|
"ntp": {"environment": "generic", "runtime": "osism-ansible"},
|
@@ -0,0 +1,54 @@
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
2
|
+
|
3
|
+
import copy
|
4
|
+
from celery import Celery
|
5
|
+
from celery.signals import worker_process_init
|
6
|
+
from loguru import logger
|
7
|
+
|
8
|
+
from osism.tasks import Config
|
9
|
+
from osism.tasks.conductor.config import get_configuration
|
10
|
+
from osism.tasks.conductor.ironic import sync_ironic as _sync_ironic
|
11
|
+
|
12
|
+
|
13
|
+
# App configuration
|
14
|
+
app = Celery("conductor")
|
15
|
+
app.config_from_object(Config)
|
16
|
+
|
17
|
+
|
18
|
+
@worker_process_init.connect
|
19
|
+
def celery_init_worker(**kwargs):
|
20
|
+
pass
|
21
|
+
|
22
|
+
|
23
|
+
@app.on_after_configure.connect
|
24
|
+
def setup_periodic_tasks(sender, **kwargs):
|
25
|
+
pass
|
26
|
+
|
27
|
+
|
28
|
+
# Tasks
|
29
|
+
@app.task(bind=True, name="osism.tasks.conductor.get_ironic_parameters")
|
30
|
+
def get_ironic_parameters(self):
|
31
|
+
configuration = get_configuration()
|
32
|
+
if "ironic_parameters" in configuration:
|
33
|
+
# NOTE: Do not pass by reference, everybody gets their own copy to work with
|
34
|
+
return copy.deepcopy(configuration["ironic_parameters"])
|
35
|
+
|
36
|
+
return {}
|
37
|
+
|
38
|
+
|
39
|
+
@app.task(bind=True, name="osism.tasks.conductor.sync_netbox")
|
40
|
+
def sync_netbox(self, force_update=False):
|
41
|
+
logger.info("Not implemented")
|
42
|
+
|
43
|
+
|
44
|
+
@app.task(bind=True, name="osism.tasks.conductor.sync_ironic")
|
45
|
+
def sync_ironic(self, force_update=False):
|
46
|
+
_sync_ironic(get_ironic_parameters, force_update)
|
47
|
+
|
48
|
+
|
49
|
+
__all__ = [
|
50
|
+
"app",
|
51
|
+
"get_ironic_parameters",
|
52
|
+
"sync_netbox",
|
53
|
+
"sync_ironic",
|
54
|
+
]
|
@@ -0,0 +1,92 @@
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
2
|
+
|
3
|
+
import uuid
|
4
|
+
|
5
|
+
from loguru import logger
|
6
|
+
import yaml
|
7
|
+
|
8
|
+
from osism.tasks import Config, openstack
|
9
|
+
|
10
|
+
|
11
|
+
def is_uuid(value):
|
12
|
+
"""Check if a string is a valid UUID."""
|
13
|
+
try:
|
14
|
+
uuid.UUID(value)
|
15
|
+
return True
|
16
|
+
except (ValueError, AttributeError):
|
17
|
+
return False
|
18
|
+
|
19
|
+
|
20
|
+
def get_configuration():
|
21
|
+
with open("/etc/conductor.yml") as fp:
|
22
|
+
configuration = yaml.load(fp, Loader=yaml.SafeLoader)
|
23
|
+
|
24
|
+
if not configuration:
|
25
|
+
logger.warning(
|
26
|
+
"The conductor configuration is empty. That's probably wrong"
|
27
|
+
)
|
28
|
+
return {}
|
29
|
+
|
30
|
+
if Config.enable_ironic.lower() not in ["true", "yes"]:
|
31
|
+
return configuration
|
32
|
+
|
33
|
+
if "ironic_parameters" not in configuration:
|
34
|
+
logger.error("ironic_parameters not found in the conductor configuration")
|
35
|
+
return configuration
|
36
|
+
|
37
|
+
if "instance_info" in configuration["ironic_parameters"]:
|
38
|
+
if "image_source" in configuration["ironic_parameters"]["instance_info"]:
|
39
|
+
image_source = configuration["ironic_parameters"]["instance_info"][
|
40
|
+
"image_source"
|
41
|
+
]
|
42
|
+
if not is_uuid(image_source):
|
43
|
+
result = openstack.image_get(image_source)
|
44
|
+
configuration["ironic_parameters"]["instance_info"][
|
45
|
+
"image_source"
|
46
|
+
] = result.id
|
47
|
+
|
48
|
+
if "driver_info" in configuration["ironic_parameters"]:
|
49
|
+
if "deploy_kernel" in configuration["ironic_parameters"]["driver_info"]:
|
50
|
+
deploy_kernel = configuration["ironic_parameters"]["driver_info"][
|
51
|
+
"deploy_kernel"
|
52
|
+
]
|
53
|
+
if not is_uuid(deploy_kernel):
|
54
|
+
result = openstack.image_get(deploy_kernel)
|
55
|
+
configuration["ironic_parameters"]["driver_info"][
|
56
|
+
"deploy_kernel"
|
57
|
+
] = result.id
|
58
|
+
|
59
|
+
if "deploy_ramdisk" in configuration["ironic_parameters"]["driver_info"]:
|
60
|
+
deploy_ramdisk = configuration["ironic_parameters"]["driver_info"][
|
61
|
+
"deploy_ramdisk"
|
62
|
+
]
|
63
|
+
if not is_uuid(deploy_ramdisk):
|
64
|
+
result = openstack.image_get(deploy_ramdisk)
|
65
|
+
configuration["ironic_parameters"]["driver_info"][
|
66
|
+
"deploy_ramdisk"
|
67
|
+
] = result.id
|
68
|
+
|
69
|
+
if "cleaning_network" in configuration["ironic_parameters"]["driver_info"]:
|
70
|
+
result = openstack.network_get(
|
71
|
+
configuration["ironic_parameters"]["driver_info"][
|
72
|
+
"cleaning_network"
|
73
|
+
]
|
74
|
+
)
|
75
|
+
configuration["ironic_parameters"]["driver_info"][
|
76
|
+
"cleaning_network"
|
77
|
+
] = result.id
|
78
|
+
|
79
|
+
if (
|
80
|
+
"provisioning_network"
|
81
|
+
in configuration["ironic_parameters"]["driver_info"]
|
82
|
+
):
|
83
|
+
result = openstack.network_get(
|
84
|
+
configuration["ironic_parameters"]["driver_info"][
|
85
|
+
"provisioning_network"
|
86
|
+
]
|
87
|
+
)
|
88
|
+
configuration["ironic_parameters"]["driver_info"][
|
89
|
+
"provisioning_network"
|
90
|
+
] = result.id
|
91
|
+
|
92
|
+
return configuration
|
@@ -0,0 +1,323 @@
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
2
|
+
|
3
|
+
import ipaddress
|
4
|
+
import json
|
5
|
+
|
6
|
+
import jinja2
|
7
|
+
from loguru import logger
|
8
|
+
from pottery import Redlock
|
9
|
+
|
10
|
+
from osism import utils as osism_utils
|
11
|
+
from osism.tasks import netbox, openstack
|
12
|
+
from osism.tasks.conductor.netbox import get_nb_device_query_list
|
13
|
+
from osism.tasks.conductor.utils import (
|
14
|
+
deep_compare,
|
15
|
+
deep_decrypt,
|
16
|
+
deep_merge,
|
17
|
+
get_vault,
|
18
|
+
)
|
19
|
+
|
20
|
+
|
21
|
+
driver_params = {
|
22
|
+
"ipmi": {
|
23
|
+
"address": "ipmi_address",
|
24
|
+
"port": "ipmi_port",
|
25
|
+
"password": "ipmi_password",
|
26
|
+
"username": "ipmi_username",
|
27
|
+
},
|
28
|
+
"redfish": {
|
29
|
+
"address": "redfish_address",
|
30
|
+
"password": "redfish_password",
|
31
|
+
"username": "redfish_username",
|
32
|
+
},
|
33
|
+
}
|
34
|
+
|
35
|
+
|
36
|
+
def sync_ironic(get_ironic_parameters, force_update=False):
|
37
|
+
devices = set()
|
38
|
+
nb_device_query_list = get_nb_device_query_list()
|
39
|
+
for nb_device_query in nb_device_query_list:
|
40
|
+
devices |= set(netbox.get_devices(**nb_device_query))
|
41
|
+
|
42
|
+
# NOTE: Find nodes in Ironic which are no longer present in netbox and remove them
|
43
|
+
device_names = {dev.name for dev in devices}
|
44
|
+
nodes = openstack.baremetal_node_list()
|
45
|
+
for node in nodes:
|
46
|
+
logger.info(f"Looking for {node['Name']} in netbox")
|
47
|
+
if node["Name"] not in device_names:
|
48
|
+
if (
|
49
|
+
not node["Instance UUID"]
|
50
|
+
and node["Provisioning State"] in ["enroll", "manageable", "available"]
|
51
|
+
and node["Power State"] in ["power off", None]
|
52
|
+
):
|
53
|
+
logger.info(
|
54
|
+
f"Cleaning up baremetal node not found in netbox: {node['Name']}"
|
55
|
+
)
|
56
|
+
for port in openstack.baremetal_port_list(
|
57
|
+
details=False, attributes=dict(node_uuid=node["UUID"])
|
58
|
+
):
|
59
|
+
openstack.baremetal_port_delete(port.id)
|
60
|
+
openstack.baremetal_node_delete(node["UUID"])
|
61
|
+
else:
|
62
|
+
logger.error(
|
63
|
+
f"Cannot remove baremetal node because it is still provisioned or running: {node}"
|
64
|
+
)
|
65
|
+
|
66
|
+
# NOTE: Find nodes in netbox which are not present in Ironic and add them
|
67
|
+
for device in devices:
|
68
|
+
logger.info(f"Looking for {device.name} in ironic")
|
69
|
+
logger.info(device)
|
70
|
+
|
71
|
+
node_interfaces = list(netbox.get_interfaces_by_device(device.name))
|
72
|
+
|
73
|
+
node_attributes = get_ironic_parameters()
|
74
|
+
if (
|
75
|
+
"ironic_parameters" in device.custom_fields
|
76
|
+
and device.custom_fields["ironic_parameters"]
|
77
|
+
):
|
78
|
+
# NOTE: Update node attributes with overrides from netbox device
|
79
|
+
deep_merge(node_attributes, device.custom_fields["ironic_parameters"])
|
80
|
+
|
81
|
+
# NOTE: Decrypt ansible vaulted secrets
|
82
|
+
vault = get_vault()
|
83
|
+
deep_decrypt(node_attributes, vault)
|
84
|
+
|
85
|
+
node_secrets = device.custom_fields.get("secrets", {})
|
86
|
+
if node_secrets is None:
|
87
|
+
node_secrets = {}
|
88
|
+
deep_decrypt(node_secrets, vault)
|
89
|
+
|
90
|
+
if (
|
91
|
+
"driver" in node_attributes
|
92
|
+
and node_attributes["driver"] in driver_params.keys()
|
93
|
+
):
|
94
|
+
if "driver_info" in node_attributes:
|
95
|
+
# NOTE: Remove all fields belonging to a different driver
|
96
|
+
unused_drivers = [
|
97
|
+
driver
|
98
|
+
for driver in driver_params.keys()
|
99
|
+
if driver != node_attributes["driver"]
|
100
|
+
]
|
101
|
+
for key in list(node_attributes["driver_info"].keys()):
|
102
|
+
for driver in unused_drivers:
|
103
|
+
if key.startswith(driver + "_"):
|
104
|
+
node_attributes["driver_info"].pop(key, None)
|
105
|
+
|
106
|
+
# NOTE: Render driver username field
|
107
|
+
username_key = driver_params[node_attributes["driver"]]["username"]
|
108
|
+
if username_key in node_attributes["driver_info"]:
|
109
|
+
node_attributes["driver_info"][username_key] = (
|
110
|
+
jinja2.Environment(loader=jinja2.BaseLoader())
|
111
|
+
.from_string(node_attributes["driver_info"][username_key])
|
112
|
+
.render(
|
113
|
+
remote_board_username=str(
|
114
|
+
node_secrets.get("remote_board_username", "admin")
|
115
|
+
)
|
116
|
+
)
|
117
|
+
)
|
118
|
+
|
119
|
+
# NOTE: Render driver password field
|
120
|
+
password_key = driver_params[node_attributes["driver"]]["password"]
|
121
|
+
if password_key in node_attributes["driver_info"]:
|
122
|
+
node_attributes["driver_info"][password_key] = (
|
123
|
+
jinja2.Environment(loader=jinja2.BaseLoader())
|
124
|
+
.from_string(node_attributes["driver_info"][password_key])
|
125
|
+
.render(
|
126
|
+
remote_board_password=str(
|
127
|
+
node_secrets.get("remote_board_password", "password")
|
128
|
+
)
|
129
|
+
)
|
130
|
+
)
|
131
|
+
|
132
|
+
# NOTE: Render driver address field
|
133
|
+
address_key = driver_params[node_attributes["driver"]]["address"]
|
134
|
+
if address_key in node_attributes["driver_info"]:
|
135
|
+
if device.oob_ip and "address" in device.oob_ip:
|
136
|
+
node_mgmt_address = device.oob_ip["address"]
|
137
|
+
else:
|
138
|
+
node_mgmt_addresses = [
|
139
|
+
interface["address"]
|
140
|
+
for interface in node_interfaces
|
141
|
+
if interface.mgmt_only
|
142
|
+
and "address" in interface
|
143
|
+
and interface["address"]
|
144
|
+
]
|
145
|
+
if len(node_mgmt_addresses) > 0:
|
146
|
+
node_mgmt_address = node_mgmt_addresses[0]
|
147
|
+
else:
|
148
|
+
node_mgmt_address = None
|
149
|
+
if node_mgmt_address:
|
150
|
+
node_attributes["driver_info"][address_key] = (
|
151
|
+
jinja2.Environment(loader=jinja2.BaseLoader())
|
152
|
+
.from_string(node_attributes["driver_info"][address_key])
|
153
|
+
.render(
|
154
|
+
remote_board_address=str(
|
155
|
+
ipaddress.ip_interface(node_mgmt_address).ip
|
156
|
+
)
|
157
|
+
)
|
158
|
+
)
|
159
|
+
node_attributes.update({"resource_class": device.name})
|
160
|
+
# NOTE: Write metadata used for provisioning into 'extra' field, so that it is available during node deploy without querying the netbox again
|
161
|
+
if "extra" not in node_attributes:
|
162
|
+
node_attributes["extra"] = {}
|
163
|
+
if (
|
164
|
+
"netplan_parameters" in device.custom_fields
|
165
|
+
and device.custom_fields["netplan_parameters"]
|
166
|
+
):
|
167
|
+
node_attributes["extra"].update(
|
168
|
+
{
|
169
|
+
"netplan_parameters": json.dumps(
|
170
|
+
device.custom_fields["netplan_parameters"]
|
171
|
+
)
|
172
|
+
}
|
173
|
+
)
|
174
|
+
if (
|
175
|
+
"frr_parameters" in device.custom_fields
|
176
|
+
and device.custom_fields["frr_parameters"]
|
177
|
+
):
|
178
|
+
node_attributes["extra"].update(
|
179
|
+
{"frr_parameters": json.dumps(device.custom_fields["frr_parameters"])}
|
180
|
+
)
|
181
|
+
ports_attributes = [
|
182
|
+
dict(address=interface.mac_address)
|
183
|
+
for interface in node_interfaces
|
184
|
+
if interface.enabled and not interface.mgmt_only and interface.mac_address
|
185
|
+
]
|
186
|
+
|
187
|
+
lock = Redlock(
|
188
|
+
key=f"lock_osism_tasks_conductor_sync_ironic-{device.name}",
|
189
|
+
masters={osism_utils.redis},
|
190
|
+
auto_release_time=600,
|
191
|
+
)
|
192
|
+
if lock.acquire(timeout=120):
|
193
|
+
try:
|
194
|
+
logger.info(f"Processing device {device.name}")
|
195
|
+
node = openstack.baremetal_node_show(device.name, ignore_missing=True)
|
196
|
+
if not node:
|
197
|
+
logger.info(f"Creating baremetal node for {device.name}")
|
198
|
+
node = openstack.baremetal_node_create(device.name, node_attributes)
|
199
|
+
else:
|
200
|
+
# NOTE: The listener service only reacts to changes in the baremetal node. Explicitly sync provision and power state in case updates were missed by the listener.
|
201
|
+
if (
|
202
|
+
device.custom_fields["provision_state"]
|
203
|
+
!= node["provision_state"]
|
204
|
+
):
|
205
|
+
netbox.set_provision_state(device.name, node["provision_state"])
|
206
|
+
if device.custom_fields["power_state"] != node["power_state"]:
|
207
|
+
netbox.set_power_state(device.name, node["power_state"])
|
208
|
+
# NOTE: Check whether the baremetal node needs to be updated
|
209
|
+
node_updates = {}
|
210
|
+
deep_compare(node_attributes, node, node_updates)
|
211
|
+
if "driver_info" in node_updates:
|
212
|
+
# NOTE: The password is not returned by ironic, so we cannot make a comparision and it would always be updated. Therefore we pop it from the dictionary
|
213
|
+
password_key = driver_params[node_attributes["driver"]][
|
214
|
+
"password"
|
215
|
+
]
|
216
|
+
if password_key in node_updates["driver_info"]:
|
217
|
+
node_updates["driver_info"].pop(password_key, None)
|
218
|
+
if not node_updates["driver_info"]:
|
219
|
+
node_updates.pop("driver_info", None)
|
220
|
+
if node_updates or force_update:
|
221
|
+
logger.info(
|
222
|
+
f"Updating baremetal node for {device.name} with {node_updates}"
|
223
|
+
)
|
224
|
+
# NOTE: Do the actual updates with all values in node_attributes. Otherwise nested dicts like e.g. driver_info will be overwritten as a whole and contain only changed values
|
225
|
+
node = openstack.baremetal_node_update(
|
226
|
+
node["uuid"], node_attributes
|
227
|
+
)
|
228
|
+
|
229
|
+
node_ports = openstack.baremetal_port_list(
|
230
|
+
details=False, attributes=dict(node_uuid=node["uuid"])
|
231
|
+
)
|
232
|
+
# NOTE: Baremetal ports are only required for (i)pxe boot
|
233
|
+
if node["boot_interface"] in ["pxe", "ipxe"]:
|
234
|
+
for port_attributes in ports_attributes:
|
235
|
+
port_attributes.update({"node_id": node["uuid"]})
|
236
|
+
port = [
|
237
|
+
port
|
238
|
+
for port in node_ports
|
239
|
+
if port_attributes["address"].upper()
|
240
|
+
== port["address"].upper()
|
241
|
+
]
|
242
|
+
if not port:
|
243
|
+
logger.info(
|
244
|
+
f"Creating baremetal port with MAC address {port_attributes['address']} for {device.name}"
|
245
|
+
)
|
246
|
+
openstack.baremetal_port_create(port_attributes)
|
247
|
+
else:
|
248
|
+
node_ports.remove(port[0])
|
249
|
+
for node_port in node_ports:
|
250
|
+
# NOTE: Delete remaining ports not found in netbox
|
251
|
+
logger.info(
|
252
|
+
f"Deleting baremetal port with MAC address {node_port['address']} for {device.name}"
|
253
|
+
)
|
254
|
+
openstack.baremetal_port_delete(node_port["id"])
|
255
|
+
|
256
|
+
node_validation = openstack.baremetal_node_validate(node["uuid"])
|
257
|
+
if node_validation["management"].result:
|
258
|
+
logger.info(
|
259
|
+
f"Validation of management interface successful for baremetal node for {device.name}"
|
260
|
+
)
|
261
|
+
if node["provision_state"] == "enroll":
|
262
|
+
logger.info(
|
263
|
+
f"Transitioning baremetal node to manageable state for {device.name}"
|
264
|
+
)
|
265
|
+
node = openstack.baremetal_node_set_provision_state(
|
266
|
+
node["uuid"], "manage"
|
267
|
+
)
|
268
|
+
node = openstack.baremetal_node_wait_for_nodes_provision_state(
|
269
|
+
node["uuid"], "manageable"
|
270
|
+
)
|
271
|
+
logger.info(f"Baremetal node for {device.name} is manageable")
|
272
|
+
if node_validation["boot"].result:
|
273
|
+
logger.info(
|
274
|
+
f"Validation of boot interface successful for baremetal node for {device.name}"
|
275
|
+
)
|
276
|
+
if node["provision_state"] == "manageable":
|
277
|
+
logger.info(
|
278
|
+
f"Transitioning baremetal node to available state for {device.name}"
|
279
|
+
)
|
280
|
+
node = openstack.baremetal_node_set_provision_state(
|
281
|
+
node["uuid"], "provide"
|
282
|
+
)
|
283
|
+
node = (
|
284
|
+
openstack.baremetal_node_wait_for_nodes_provision_state(
|
285
|
+
node["uuid"], "available"
|
286
|
+
)
|
287
|
+
)
|
288
|
+
logger.info(
|
289
|
+
f"Baremetal node for {device.name} is available"
|
290
|
+
)
|
291
|
+
else:
|
292
|
+
logger.info(
|
293
|
+
f"Validation of boot interface failed for baremetal node for {device.name}\nReason: {node_validation['boot'].reason}"
|
294
|
+
)
|
295
|
+
if node["provision_state"] == "available":
|
296
|
+
# NOTE: Demote node to manageable
|
297
|
+
logger.info(
|
298
|
+
f"Transitioning baremetal node to manageable state for {device.name}"
|
299
|
+
)
|
300
|
+
node = openstack.baremetal_node_set_provision_state(
|
301
|
+
node["uuid"], "manage"
|
302
|
+
)
|
303
|
+
node = (
|
304
|
+
openstack.baremetal_node_wait_for_nodes_provision_state(
|
305
|
+
node["uuid"], "manageable"
|
306
|
+
)
|
307
|
+
)
|
308
|
+
logger.info(
|
309
|
+
f"Baremetal node for {device.name} is manageable"
|
310
|
+
)
|
311
|
+
else:
|
312
|
+
logger.info(
|
313
|
+
f"Validation of management interface failed for baremetal node for {device.name}\nReason: {node_validation['management'].reason}"
|
314
|
+
)
|
315
|
+
except Exception as exc:
|
316
|
+
logger.info(
|
317
|
+
f"Could not fully synchronize device {device.name} with ironic: {exc}"
|
318
|
+
)
|
319
|
+
finally:
|
320
|
+
lock.release()
|
321
|
+
|
322
|
+
else:
|
323
|
+
logger.error("Could not acquire lock for node {device.name}")
|
@@ -0,0 +1,50 @@
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
2
|
+
|
3
|
+
from loguru import logger
|
4
|
+
import yaml
|
5
|
+
|
6
|
+
from osism import settings
|
7
|
+
from osism.tasks import netbox
|
8
|
+
|
9
|
+
|
10
|
+
def get_nb_device_query_list():
|
11
|
+
try:
|
12
|
+
supported_nb_device_filters = [
|
13
|
+
"site",
|
14
|
+
"region",
|
15
|
+
"site_group",
|
16
|
+
"location",
|
17
|
+
"rack",
|
18
|
+
"tag",
|
19
|
+
"state",
|
20
|
+
]
|
21
|
+
nb_device_query_list = yaml.safe_load(settings.NETBOX_FILTER_CONDUCTOR)
|
22
|
+
if type(nb_device_query_list) is not list:
|
23
|
+
raise TypeError
|
24
|
+
for nb_device_query in nb_device_query_list:
|
25
|
+
if type(nb_device_query) is not dict:
|
26
|
+
raise TypeError
|
27
|
+
for key in list(nb_device_query.keys()):
|
28
|
+
if key not in supported_nb_device_filters:
|
29
|
+
raise ValueError
|
30
|
+
# NOTE: Only "location_id" and "rack_id" are supported by netbox
|
31
|
+
if key in ["location", "rack"]:
|
32
|
+
value_name = nb_device_query.pop(key, "")
|
33
|
+
if key == "location":
|
34
|
+
value_id = netbox.get_location_id(value_name)
|
35
|
+
elif key == "rack":
|
36
|
+
value_id = netbox.get_rack_id(value_name)
|
37
|
+
if value_id:
|
38
|
+
nb_device_query.update({key + "_id": value_id})
|
39
|
+
else:
|
40
|
+
raise ValueError(f"Invalid name {value_name} for {key}")
|
41
|
+
except (yaml.YAMLError, TypeError):
|
42
|
+
logger.error(
|
43
|
+
f"Setting NETBOX_FILTER_CONDUCTOR needs to be an array of mappings containing supported netbox device filters: {supported_nb_device_filters}"
|
44
|
+
)
|
45
|
+
nb_device_query_list = []
|
46
|
+
except ValueError as exc:
|
47
|
+
logger.error(f"Unknown value in NETBOX_FILTER_CONDUCTOR: {exc}")
|
48
|
+
nb_device_query_list = []
|
49
|
+
|
50
|
+
return nb_device_query_list
|