osism 0.20250331.0__py3-none-any.whl → 0.20250425.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
osism/tasks/netbox.py CHANGED
@@ -1,126 +1,154 @@
1
1
  # SPDX-License-Identifier: Apache-2.0
2
2
 
3
3
  from celery import Celery
4
- from celery.signals import worker_process_init
5
- import json
6
- import pynetbox
4
+ from loguru import logger
5
+ from pottery import Redlock
7
6
 
8
7
  from osism import settings, utils
9
- from osism.actions import manage_device, manage_interface
10
- from osism.tasks import Config, openstack, run_command
8
+ from osism.tasks import Config, run_command
11
9
 
12
10
  app = Celery("netbox")
13
11
  app.config_from_object(Config)
14
12
 
15
13
 
16
- @worker_process_init.connect
17
- def celery_init_worker(**kwargs):
18
- if settings.NETBOX_URL and settings.NETBOX_TOKEN:
19
- utils.nb = pynetbox.api(settings.NETBOX_URL, token=settings.NETBOX_TOKEN)
20
-
21
- if settings.IGNORE_SSL_ERRORS:
22
- import requests
23
-
24
- requests.packages.urllib3.disable_warnings()
25
- session = requests.Session()
26
- session.verify = False
27
- utils.nb.http_session = session
28
-
29
-
30
14
  @app.on_after_configure.connect
31
15
  def setup_periodic_tasks(sender, **kwargs):
32
16
  pass
33
17
 
34
18
 
35
- @app.task(bind=True, name="osism.tasks.netbox.periodic_synchronize_ironic")
36
- def periodic_synchronize_ironic(self):
37
- """Synchronize the state of Ironic with Netbox"""
38
- openstack.baremetal_node_list.apply_async((), link=synchronize_device_state.s())
39
-
40
-
41
19
  @app.task(bind=True, name="osism.tasks.netbox.run")
42
20
  def run(self, action, arguments):
43
21
  pass
44
22
 
45
23
 
46
- @app.task(bind=True, name="osism.tasks.netbox.update_network_interface_name")
47
- def update_network_interface_name(self, mac_address, network_interface_name):
48
- manage_interface.update_network_interface_name(mac_address, network_interface_name)
49
-
50
-
51
- @app.task(bind=True, name="osism.tasks.netbox.synchronize_device_state")
52
- def synchronize_device_state(self, data):
53
- """Synchronize the state of Ironic with Netbox"""
54
-
55
- if type(data) == str:
56
- data = json.loads(data)
57
-
58
- if not data:
59
- return
60
-
61
- for device in data:
62
- manage_device.set_provision_state(device["Name"], device["Provisioning State"])
63
- manage_device.set_power_state(device["Name"], device["Power State"])
64
-
65
-
66
- @app.task(bind=True, name="osism.tasks.netbox.states")
67
- def states(self, data):
68
- result = manage_device.get_states(data.keys())
69
- return result
70
-
71
-
72
- @app.task(bind=True, name="osism.tasks.netbox.set_state")
73
- def set_state(self, device=None, state=None, state_type=None):
74
- manage_device.set_state(device, state, state_type)
24
+ # NOTE: While `get_*` tasks only operate on the netbox configured in NETBOX_URL, `set_*` tasks additionally operate on all netbox instances listed in NETBOX_SECONDARIES
75
25
 
76
26
 
77
27
  @app.task(bind=True, name="osism.tasks.netbox.set_maintenance")
78
- def set_maintenance(self, device=None, state=None):
79
- manage_device.set_maintenance(device, state)
80
-
28
+ def set_maintenance(self, device_name, state=True):
29
+ """Set the maintenance state for a device in the Netbox."""
81
30
 
82
- @app.task(bind=True, name="osism.tasks.netbox.diff")
83
- @app.task(bind=True, name="osism.tasks.netbox.get_devices_not_yet_registered_in_ironic")
84
- def get_devices_not_yet_registered_in_ironic(
85
- self, status="active", tags=["managed-by-ironic"], ironic_enabled=True
86
- ):
87
- devices = utils.nb.dcim.devices.filter(
88
- tag=tags, status=status, cf_ironic_enabled=[ironic_enabled]
31
+ lock = Redlock(
32
+ key=f"lock_osism_tasks_netbox_set_maintenance_{device_name}",
33
+ masters={utils.redis},
34
+ auto_release_time=60,
89
35
  )
90
-
91
- result = []
92
-
93
- for device in devices:
94
- if (
95
- "ironic_state" in device.custom_fields
96
- and device.custom_fields["ironic_state"] != "registered"
97
- ):
98
- result.append(device.name)
99
-
100
- return result
101
-
102
-
103
- @app.task(
104
- bind=True,
105
- name="osism.tasks.netbox.get_devices_that_should_have_an_allocation_in_ironic",
106
- )
107
- def get_devices_that_should_have_an_allocation_in_ironic(self):
108
- devices = utils.nb.dcim.devices.filter(
109
- tag=["managed-by-ironic", "managed-by-osism"],
110
- status="active",
111
- cf_ironic_enabled=[True],
112
- cf_ironic_state=["registered"],
113
- cf_provision_state=["available"],
114
- cf_introspection_state=["introspected"],
115
- cf_device_type=["server"],
36
+ if lock.acquire(timeout=20):
37
+ try:
38
+ for nb in [utils.nb] + utils.secondary_nb_list:
39
+ logger.info(
40
+ f"Set maintenance state of device {device_name} = {state} on {nb.base_url}"
41
+ )
42
+ device = nb.dcim.devices.get(name=device_name)
43
+ if device:
44
+ device.custom_fields.update({"maintenance": state})
45
+ device.save()
46
+ else:
47
+ logger.error(
48
+ f"Could not set maintenance for {device_name} on {nb.base_url}"
49
+ )
50
+ finally:
51
+ lock.release()
52
+ else:
53
+ logger.error("Could not acquire lock for node {device_name}")
54
+
55
+
56
+ @app.task(bind=True, name="osism.tasks.netbox.set_provision_state")
57
+ def set_provision_state(self, device_name, state):
58
+ """Set the provision state for a device in the Netbox."""
59
+
60
+ lock = Redlock(
61
+ key=f"lock_osism_tasks_netbox_set_provision_state_{device_name}",
62
+ masters={utils.redis},
63
+ auto_release_time=60,
116
64
  )
117
-
118
- result = []
119
-
120
- for device in devices:
121
- result.append(device.name)
122
-
123
- return result
65
+ if lock.acquire(timeout=20):
66
+ try:
67
+
68
+ for nb in [utils.nb] + utils.secondary_nb_list:
69
+ logger.info(
70
+ f"Set provision state of device {device_name} = {state} on {nb.base_url}"
71
+ )
72
+ device = nb.dcim.devices.get(name=device_name)
73
+ if device:
74
+ device.custom_fields.update({"provision_state": state})
75
+ device.save()
76
+ else:
77
+ logger.error(
78
+ f"Could not set provision state for {device_name} on {nb.base_url}"
79
+ )
80
+ finally:
81
+ lock.release()
82
+ else:
83
+ logger.error("Could not acquire lock for node {device_name}")
84
+
85
+
86
+ @app.task(bind=True, name="osism.tasks.netbox.set_power_state")
87
+ def set_power_state(self, device_name, state):
88
+ """Set the provision state for a device in the Netbox."""
89
+
90
+ lock = Redlock(
91
+ key=f"lock_osism_tasks_netbox_set_provision_state_{device_name}",
92
+ masters={utils.redis},
93
+ auto_release_time=60,
94
+ )
95
+ if lock.acquire(timeout=20):
96
+ try:
97
+ for nb in [utils.nb] + utils.secondary_nb_list:
98
+ logger.info(
99
+ f"Set power state of device {device_name} = {state} on {nb.base_url}"
100
+ )
101
+ device = nb.dcim.devices.get(name=device_name)
102
+ if device:
103
+ device.custom_fields.update({"power_state": state})
104
+ device.save()
105
+ else:
106
+ logger.error(
107
+ f"Could not set power state for {device_name} on {nb.base_url}"
108
+ )
109
+ finally:
110
+ lock.release()
111
+ else:
112
+ logger.error("Could not acquire lock for node {device_name}")
113
+
114
+
115
+ @app.task(bind=True, name="osism.tasks.netbox.get_location_id")
116
+ def get_location_id(self, location_name):
117
+ try:
118
+ location = utils.nb.dcim.locations.get(name=location_name)
119
+ except ValueError:
120
+ return None
121
+ if location:
122
+ return location.id
123
+ else:
124
+ return None
125
+
126
+
127
+ @app.task(bind=True, name="osism.tasks.netbox.get_rack_id")
128
+ def get_rack_id(self, rack_name):
129
+ try:
130
+ rack = utils.nb.dcim.racks.get(name=rack_name)
131
+ except ValueError:
132
+ return None
133
+ if rack:
134
+ return rack.id
135
+ else:
136
+ return None
137
+
138
+
139
+ @app.task(bind=True, name="osism.tasks.netbox.get_devices")
140
+ def get_devices(self, **query):
141
+ return utils.nb.dcim.devices.filter(**query)
142
+
143
+
144
+ @app.task(bind=True, name="osism.tasks.netbox.get_device_by_name")
145
+ def get_device_by_name(self, name):
146
+ return utils.nb.dcim.devices.get(name=name)
147
+
148
+
149
+ @app.task(bind=True, name="osism.tasks.netbox.get_interfaces_by_device")
150
+ def get_interfaces_by_device(self, device_name):
151
+ return utils.nb.dcim.interfaces.filter(device=device_name)
124
152
 
125
153
 
126
154
  @app.task(bind=True, name="osism.tasks.netbox.manage")
@@ -139,7 +167,7 @@ def manage(self, *arguments, publish=True, locking=False, auto_release_time=3600
139
167
  *arguments,
140
168
  publish=publish,
141
169
  locking=locking,
142
- auto_release_time=auto_release_time
170
+ auto_release_time=auto_release_time,
143
171
  )
144
172
 
145
173
 
osism/tasks/openstack.py CHANGED
@@ -1,16 +1,10 @@
1
1
  # SPDX-License-Identifier: Apache-2.0
2
2
 
3
- import copy
4
- import ipaddress
5
-
6
3
  from celery import Celery
7
- import jinja2
8
- from openstack.exceptions import ConflictException, ResourceNotFound, ResourceFailure
9
- from pottery import Redlock
10
4
  import tempfile
11
5
 
12
6
  from osism import utils
13
- from osism.tasks import Config, conductor, netbox, run_command
7
+ from osism.tasks import Config, run_command
14
8
 
15
9
  app = Celery("openstack")
16
10
  app.config_from_object(Config)
@@ -25,20 +19,46 @@ def setup_periodic_tasks(sender, **kwargs):
25
19
  def image_get(self, image_name):
26
20
  conn = utils.get_openstack_connection()
27
21
  result = conn.image.find_image(image_name)
28
- return result.id
22
+ return result
29
23
 
30
24
 
31
25
  @app.task(bind=True, name="osism.tasks.openstack.network_get")
32
26
  def network_get(self, network_name):
33
27
  conn = utils.get_openstack_connection()
34
28
  result = conn.network.find_network(network_name)
35
- return result.id
29
+ return result
30
+
31
+
32
+ @app.task(bind=True, name="osism.tasks.openstack.baremetal_node_create")
33
+ def baremetal_node_create(self, node_name, attributes=None):
34
+ if attributes is None:
35
+ attributes = {}
36
+ attributes.update({"name": node_name})
37
+ conn = utils.get_openstack_connection()
38
+ result = conn.baremetal.create_node(**attributes)
39
+ return result
40
+
41
+
42
+ @app.task(bind=True, name="osism.tasks.openstack.baremetal_node_delete")
43
+ def baremetal_node_delete(self, node_or_id):
44
+ conn = utils.get_openstack_connection()
45
+ result = conn.baremetal.delete_node(node_or_id)
46
+ return result
47
+
48
+
49
+ @app.task(bind=True, name="osism.tasks.openstack.baremetal_node_update")
50
+ def baremetal_node_update(self, node_id_or_name, attributes=None):
51
+ if attributes is None:
52
+ attributes = {}
53
+ conn = utils.get_openstack_connection()
54
+ result = conn.baremetal.update_node(node_id_or_name, **attributes)
55
+ return result
36
56
 
37
57
 
38
58
  @app.task(bind=True, name="osism.tasks.openstack.baremetal_node_show")
39
- def baremetal_node_show(self, node_id_or_name):
59
+ def baremetal_node_show(self, node_id_or_name, ignore_missing=False):
40
60
  conn = utils.get_openstack_connection()
41
- result = conn.baremetal.find_node(node_id_or_name)
61
+ result = conn.baremetal.find_node(node_id_or_name, ignore_missing)
42
62
  return result
43
63
 
44
64
 
@@ -64,182 +84,108 @@ def baremetal_node_list(self):
64
84
  return result
65
85
 
66
86
 
67
- @app.task(
68
- bind=True, name="osism.tasks.openstack.baremetal_introspection_interface_list"
69
- )
70
- def baremetal_introspection_interface_list(self, node_id_or_name):
71
- pass
72
-
73
-
74
- @app.task(bind=True, name="osism.tasks.openstack.baremetal_introspection_status")
75
- def baremetal_introspection_status(self, node_id_or_name):
76
- result = None
87
+ @app.task(bind=True, name="osism.tasks.openstack.baremetal_node_validate")
88
+ def baremetal_node_validate(self, node_id_or_name):
89
+ conn = utils.get_openstack_connection()
90
+ result = conn.baremetal.validate_node(node_id_or_name, required=())
77
91
  return result
78
92
 
79
93
 
80
- @app.task(bind=True, name="osism.tasks.openstack.baremetal_get_network_interface_name")
81
- def baremetal_get_network_interface_name(self, node_name, mac_address):
94
+ @app.task(
95
+ bind=True,
96
+ name="osism.tasks.openstack.baremetal_node_wait_for_nodes_provision_state",
97
+ )
98
+ def baremetal_node_wait_for_nodes_provision_state(self, node_id_or_name, state):
82
99
  conn = utils.get_openstack_connection()
100
+ result = conn.baremetal.wait_for_nodes_provision_state([node_id_or_name], state)
101
+ if len(result) > 0:
102
+ return result[0]
103
+ else:
104
+ return None
83
105
 
84
- introspection = conn.baremetal_introspection.get_introspection(node_name)
85
-
86
- # Wait up to 5 minutes for the completion of a running introspection
87
- conn.baremetal_introspection.wait_for_introspection(introspection, timeout=30)
88
-
89
- introspection_data = conn.baremetal_introspection.get_introspection_data(
90
- introspection
91
- )
92
- interfaces = introspection_data["inventory"]["interfaces"]
93
-
94
- result = None
95
- for interface in interfaces:
96
- if interface["mac_address"].lower() == mac_address.lower():
97
- result = interface["name"]
98
106
 
107
+ @app.task(bind=True, name="osism.tasks.openstack.baremetal_node_set_provision_state")
108
+ def baremetal_node_set_provision_state(self, node, state):
109
+ conn = utils.get_openstack_connection()
110
+ result = conn.baremetal.set_node_provision_state(node, state)
99
111
  return result
100
112
 
101
113
 
102
- @app.task(bind=True, name="osism.tasks.openstack.baremetal_set_node_provision_state")
103
- def baremetal_set_node_provision_state(self, node, state):
114
+ @app.task(bind=True, name="osism.tasks.openstack.baremetal_port_list")
115
+ def baremetal_port_list(self, details=False, attributes=None):
116
+ if attributes is None:
117
+ attributes = {}
104
118
  conn = utils.get_openstack_connection()
105
- conn.baremetal.set_node_provision_state(node, state)
119
+ result = conn.baremetal.ports(details=details, **attributes)
120
+ return list(result)
106
121
 
107
122
 
108
- @app.task(bind=True, name="osism.tasks.openstack.baremetal_create_allocations")
109
- def baremetal_create_allocations(self, nodes):
123
+ @app.task(bind=True, name="osism.tasks.openstack.baremetal_port_create")
124
+ def baremetal_port_create(self, attributes=None):
125
+ if attributes is None:
126
+ attributes = {}
110
127
  conn = utils.get_openstack_connection()
111
-
112
- for node in nodes:
113
- try:
114
- allocation_a = conn.baremetal.get_allocation(allocation=node)
115
- except ResourceNotFound:
116
- allocation_a = None
117
-
118
- if not allocation_a:
119
- # Get Ironic parameters from the conductor
120
- task = conductor.get_ironic_parameters.delay()
121
- task.wait(timeout=None, interval=0.5)
122
- ironic_parameters = task.get()
123
-
124
- allocation_a = conn.baremetal.create_allocation(
125
- name=node,
126
- candidate_nodes=[node],
127
- resource_class=ironic_parameters["resource_class"],
128
- )
129
- conn.baremetal.wait_for_allocation(allocation=node, timeout=30)
128
+ result = conn.baremetal.create_port(**attributes)
129
+ return result
130
130
 
131
131
 
132
- @app.task(bind=True, name="osism.tasks.openstack.baremetal_create_nodes")
133
- def baremetal_create_nodes(self, nodes, ironic_parameters):
132
+ @app.task(bind=True, name="osism.tasks.openstack.baremetal_port_delete")
133
+ def baremetal_port_delete(self, port_or_id):
134
134
  conn = utils.get_openstack_connection()
135
+ result = conn.baremetal.delete_port(port_or_id)
136
+ return result
135
137
 
136
- for node in nodes:
137
- # TODO: Filter on mgmt_only
138
- address_a = utils.nb.ipam.ip_addresses.get(device=node, interface="Ethernet0")
139
-
140
- node_parameters = copy.deepcopy(ironic_parameters)
141
-
142
- if node_parameters["driver"] == "redfish":
143
- remote_board_address = str(ipaddress.ip_interface(address_a["address"]).ip)
144
- t = jinja2.Environment(loader=jinja2.BaseLoader()).from_string(
145
- node_parameters["driver_info"]["redfish_address"]
146
- )
147
- node_parameters["driver_info"]["redfish_address"] = t.render(
148
- remote_board_address=remote_board_address
149
- )
150
-
151
- elif node_parameters["driver"] == "ipmi":
152
- remote_board_address = str(ipaddress.ip_interface(address_a["address"]).ip)
153
- t = jinja2.Environment(loader=jinja2.BaseLoader()).from_string(
154
- node_parameters["driver_info"]["ipmi_address"]
155
- )
156
- node_parameters["driver_info"]["ipmi_address"] = t.render(
157
- remote_board_address=remote_board_address
158
- )
159
-
160
- try:
161
- device_a = utils.nb.dcim.devices.get(name=node)
162
- tags = [str(tag) for tag in device_a.tags]
163
-
164
- # NOTE: Internally used nodes are identified by their unique name via the resource class.
165
- # The actual resource class is explicitly overwritten.
166
- if "Managed by Ironic" in tags and "Managed by OSISM" in tags:
167
- node_parameters["resource_class"] = f"osism-{node}"
168
- baremetal_create_internal_flavor(node)
169
-
170
- conn.baremetal.create_node(
171
- name=node, provision_state="manageable", **node_parameters
172
- )
173
- conn.baremetal.wait_for_nodes_provision_state([node], "manageable")
174
-
175
- if "Managed by Ironic" in tags and "Managed by OSISM" not in tags:
176
- conn.baremetal.set_node_traits(node, ["CUSTOM_GENERAL_USE"])
177
- elif "Managed by Ironic" in tags and "Managed by OSISM" in tags:
178
- conn.baremetal.set_node_traits(node, ["CUSTOM_OSISM_USE"])
179
138
 
180
- conn.baremetal.set_node_provision_state(node, "inspect")
139
+ @app.task(bind=True, name="osism.tasks.openstack.compute_flavor_get")
140
+ def compute_flavor_get(self, name_or_id):
141
+ conn = utils.get_openstack_connection()
142
+ result = conn.compute.find_flavor(
143
+ name_or_id, ignore_missing=True, get_extra_specs=True
144
+ )
145
+ return result
181
146
 
182
- # TODO: Check if the system has been registered correctly
183
- device_a.custom_fields = {
184
- "ironic_state": "registered",
185
- }
186
- device_a.save()
187
-
188
- except ResourceFailure:
189
- # TODO: Do something useful here
190
- pass
191
- except ConflictException:
192
- # The node already exists and has a wronge state in the Netbox
193
- device_a = utils.nb.dcim.devices.get(name=node)
194
- device_a.custom_fields = {
195
- "ironic_state": "registered",
196
- }
197
- device_a.save()
198
147
 
148
+ @app.task(bind=True, name="osism.tasks.openstack.compute_flavor_create")
149
+ def compute_flavor_create(self, name, attributes=None):
150
+ if attributes is None:
151
+ attributes = {}
152
+ attributes.update({"name": name})
153
+ extra_specs = attributes.pop("extra_specs", None)
154
+ conn = utils.get_openstack_connection()
155
+ flavor = conn.compute.create_flavor(**attributes)
156
+ if extra_specs:
157
+ flavor = conn.compute.create_flavor_extra_specs(flavor, extra_specs)
158
+ return flavor
199
159
 
200
- @app.task(bind=True, name="osism.tasks.openstack.baremetal_check_allocations")
201
- def baremetal_check_allocations(self):
202
- lock = Redlock(
203
- key="lock_osism_tasks_openstack_baremetal_check_allocations",
204
- masters={utils.redis},
205
- auto_release_time=60,
206
- )
207
160
 
208
- if lock.acquire(timeout=20):
209
- netbox.get_devices_that_should_have_an_allocation_in_ironic.apply_async(
210
- (), link=baremetal_create_allocations.s()
211
- )
212
- lock.release()
161
+ @app.task(bind=True, name="osism.tasks.openstack.compute_flavor_delete")
162
+ def compute_flavor_delete(self, flavor):
163
+ conn = utils.get_openstack_connection()
164
+ conn.compute.delete_flavor(flavor, ignore_missing=True)
213
165
 
214
166
 
215
- @app.task(bind=True, name="osism.tasks.openstack.baremetal_create_internal_flavor")
216
- def baremetal_create_internal_flavor(self, node):
167
+ @app.task(bind=True, name="osism.tasks.openstack.compute_flavor_update_extra_specs")
168
+ def compute_flavor_update_extra_specs(self, flavor, extra_specs={}):
217
169
  conn = utils.get_openstack_connection()
170
+ for key, value in extra_specs.items():
171
+ conn.compute.update_flavor_extra_specs_property(flavor, key, value)
218
172
 
219
- flavor_a = conn.compute.create_flavor(
220
- name=f"osism-{node}", ram=1, vcpus=1, disk=1, is_public=False
221
- )
222
- specs = {
223
- f"resources:CUSTOM_RESOURCE_CLASS_OSISM_{node.upper()}": 1,
224
- "resources:VCPU": 0,
225
- "resources:MEMORY_MB": 0,
226
- "resources:DISK_GB": 0,
227
- "trait:CUSTOM_OSISM_USE": "required",
228
- }
229
- conn.compute.create_flavor_extra_specs(flavor_a, specs)
230
-
231
-
232
- @app.task(bind=True, name="osism.tasks.openstack.baremetal_delete_internal_flavor")
233
- def baremetal_delete_internal_flavor(self, node):
234
- conn = utils.get_openstack_connection()
235
173
 
236
- flavor = conn.compute.get_flavor(f"osism-{node}")
237
- conn.compute.delete_flavor(flavor)
174
+ @app.task(bind=True, name="osism.tasks.openstack.compute_flavor_delete_extra_specs")
175
+ def compute_flavor_delete_extra_specs_property(self, flavor, prop):
176
+ conn = utils.get_openstack_connection()
177
+ conn.compute.delete_flavor_extra_specs_property(flavor, prop)
238
178
 
239
179
 
240
180
  @app.task(bind=True, name="osism.tasks.openstack.image_manager")
241
181
  def image_manager(
242
- self, *arguments, configs=None, publish=True, locking=False, auto_release_time=3600
182
+ self,
183
+ *arguments,
184
+ configs=None,
185
+ publish=True,
186
+ locking=False,
187
+ auto_release_time=3600,
188
+ ignore_env=False
243
189
  ):
244
190
  command = "/usr/local/bin/openstack-image-manager"
245
191
  if configs:
@@ -269,6 +215,7 @@ def image_manager(
269
215
  publish=publish,
270
216
  locking=locking,
271
217
  auto_release_time=auto_release_time,
218
+ ignore_env=ignore_env,
272
219
  )
273
220
  return rc
274
221
  else:
@@ -280,6 +227,7 @@ def image_manager(
280
227
  publish=publish,
281
228
  locking=locking,
282
229
  auto_release_time=auto_release_time,
230
+ ignore_env=ignore_env,
283
231
  )
284
232
 
285
233