osism 0.20250326.0__py3-none-any.whl → 0.20250407.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
osism/tasks/conductor.py CHANGED
@@ -2,41 +2,26 @@
2
2
 
3
3
  from celery import Celery
4
4
  from celery.signals import worker_process_init
5
- import keystoneauth1
5
+ import copy
6
+ import ipaddress
7
+ import jinja2
6
8
  from loguru import logger
7
- import openstack
8
- from redis import Redis
9
+ from pottery import Redlock
9
10
  import yaml
10
11
 
11
- from osism import settings
12
- from osism.tasks import Config
12
+ from osism import utils
13
+ from osism.tasks import Config, netbox, openstack
13
14
 
14
15
  app = Celery("conductor")
15
16
  app.config_from_object(Config)
16
17
 
17
18
 
18
19
  configuration = {}
19
- redis = None
20
20
 
21
21
 
22
22
  @worker_process_init.connect
23
23
  def celery_init_worker(**kwargs):
24
24
  global configuration
25
- global redis
26
-
27
- redis = Redis(
28
- host=settings.REDIS_HOST,
29
- port=settings.REDIS_PORT,
30
- db=settings.REDIS_DB,
31
- socket_keepalive=True,
32
- )
33
- redis.ping()
34
-
35
- # Parameters come from the environment, OS_*
36
- try:
37
- conn = openstack.connect()
38
- except keystoneauth1.exceptions.auth_plugins.MissingRequiredOptions:
39
- pass
40
25
 
41
26
  with open("/etc/conductor.yml") as fp:
42
27
  configuration = yaml.load(fp, Loader=yaml.SafeLoader)
@@ -45,20 +30,20 @@ def celery_init_worker(**kwargs):
45
30
  logger.warning(
46
31
  "The conductor configuration is empty. That's probably wrong"
47
32
  )
33
+ configuration = {}
48
34
  return
49
35
 
50
36
  # Resolve all IDs in the conductor.yml
51
- if Config.enable_ironic in ["True", "true", "Yes", "yes"]:
37
+ if Config.enable_ironic.lower() in ["true", "yes"]:
52
38
  if "ironic_parameters" not in configuration:
53
39
  logger.error(
54
40
  "ironic_parameters not found in the conductor configuration"
55
41
  )
56
42
  return
57
43
 
58
- # TODO: use osism.tasks.openstack in the future
59
44
  if "driver_info" in configuration["ironic_parameters"]:
60
45
  if "deploy_kernel" in configuration["ironic_parameters"]["driver_info"]:
61
- result = conn.image.find_image(
46
+ result = openstack.image_get(
62
47
  configuration["ironic_parameters"]["driver_info"][
63
48
  "deploy_kernel"
64
49
  ]
@@ -71,7 +56,7 @@ def celery_init_worker(**kwargs):
71
56
  "deploy_ramdisk"
72
57
  in configuration["ironic_parameters"]["driver_info"]
73
58
  ):
74
- result = conn.image.find_image(
59
+ result = openstack.image_get(
75
60
  configuration["ironic_parameters"]["driver_info"][
76
61
  "deploy_ramdisk"
77
62
  ]
@@ -84,7 +69,7 @@ def celery_init_worker(**kwargs):
84
69
  "cleaning_network"
85
70
  in configuration["ironic_parameters"]["driver_info"]
86
71
  ):
87
- result = conn.network.find_network(
72
+ result = openstack.network_get(
88
73
  configuration["ironic_parameters"]["driver_info"][
89
74
  "cleaning_network"
90
75
  ]
@@ -97,7 +82,7 @@ def celery_init_worker(**kwargs):
97
82
  "provisioning_network"
98
83
  in configuration["ironic_parameters"]["driver_info"]
99
84
  ):
100
- result = conn.network.find_network(
85
+ result = openstack.network_get(
101
86
  configuration["ironic_parameters"]["driver_info"][
102
87
  "provisioning_network"
103
88
  ]
@@ -115,6 +100,320 @@ def setup_periodic_tasks(sender, **kwargs):
115
100
  @app.task(bind=True, name="osism.tasks.conductor.get_ironic_parameters")
116
101
  def get_ironic_parameters(self):
117
102
  if "ironic_parameters" in configuration:
118
- return configuration["ironic_parameters"]
103
+ # NOTE: Do not pass by reference, everybody gets their own copy to work with
104
+ return copy.deepcopy(configuration["ironic_parameters"])
119
105
 
120
106
  return {}
107
+
108
+
109
+ @app.task(bind=True, name="osism.tasks.conductor.sync_netbox_with_ironic")
110
+ def sync_netbox_with_ironic(self, force_update=False):
111
+ def deep_compare(a, b, updates):
112
+ """
113
+ Find items in a that do not exist in b or are different.
114
+ Write required changes into updates
115
+ """
116
+ for key, value in a.items():
117
+ if type(value) is not dict:
118
+ if key not in b or b[key] != value:
119
+ updates[key] = value
120
+ else:
121
+ updates[key] = {}
122
+ deep_compare(a[key], b[key], updates[key])
123
+ if not updates[key]:
124
+ updates.pop(key)
125
+
126
+ driver_params = {
127
+ "ipmi": {
128
+ "address": "ipmi_address",
129
+ "port": "ipmi_port",
130
+ "password": "ipmi_password",
131
+ },
132
+ "redfish": {
133
+ "address": "redfish_address",
134
+ "password": "redfish_password",
135
+ },
136
+ }
137
+
138
+ devices = list(netbox.get_devices_by_tags(["managed-by-ironic"]))
139
+
140
+ # NOTE: Find nodes in Ironic which are no longer present in netbox and remove them
141
+ device_names = [dev.name for dev in devices]
142
+ nodes = openstack.baremetal_node_list()
143
+ for node in nodes:
144
+ logger.info(f"Looking for {node['Name']} in netbox")
145
+ if node["Name"] not in device_names:
146
+ if (
147
+ not node["Instance UUID"]
148
+ and node["Provisioning State"] in ["enroll", "manageable", "available"]
149
+ and node["Power State"] in ["power off", None]
150
+ ):
151
+ logger.info(
152
+ f"Cleaning up baremetal node not found in netbox: {node['Name']}"
153
+ )
154
+ flavor_name = "osism-" + node["Name"]
155
+ flavor = openstack.compute_flavor_get(flavor_name)
156
+ if flavor:
157
+ logger.info(f"Deleting flavor {flavor_name}")
158
+ openstack.compute_flavor_delete(flavor)
159
+ for port in openstack.baremetal_port_list(
160
+ details=False, attributes=dict(node_uuid=node["UUID"])
161
+ ):
162
+ openstack.baremetal_port_delete(port.id)
163
+ openstack.baremetal_node_delete(node["UUID"])
164
+ else:
165
+ logger.error(
166
+ f"Cannot remove baremetal node because it is still provisioned or running: {node}"
167
+ )
168
+
169
+ # NOTE: Find nodes in netbox which are not present in Ironic and add them
170
+ for device in devices:
171
+ logger.info(f"Looking for {device.name} in ironic")
172
+
173
+ node_interfaces = list(netbox.get_interfaces_by_device(device.name))
174
+
175
+ node_attributes = get_ironic_parameters()
176
+ if (
177
+ "driver" in node_attributes
178
+ and node_attributes["driver"] in driver_params.keys()
179
+ ):
180
+ if "driver_info" in node_attributes:
181
+ address_key = driver_params[node_attributes["driver"]]["address"]
182
+ if address_key in node_attributes["driver_info"]:
183
+ if "oob_address" in device.custom_fields:
184
+ node_mgmt_address = device.custom_fields["oob_address"]
185
+ elif "address" in device.oob_ip:
186
+ node_mgmt_address = device.oob_ip["address"]
187
+ else:
188
+ node_mgmt_addresses = [
189
+ interface["address"]
190
+ for interface in node_interfaces
191
+ if interface.mgmt_only
192
+ and "address" in interface
193
+ and interface["address"]
194
+ ]
195
+ if len(node_mgmt_addresses) > 0:
196
+ node_mgmt_address = node_mgmt_addresses[0]
197
+ else:
198
+ node_mgmt_address = None
199
+ if node_mgmt_address:
200
+ node_attributes["driver_info"][address_key] = (
201
+ jinja2.Environment(loader=jinja2.BaseLoader())
202
+ .from_string(node_attributes["driver_info"][address_key])
203
+ .render(
204
+ remote_board_address=str(
205
+ ipaddress.ip_interface(node_mgmt_address).ip
206
+ )
207
+ )
208
+ )
209
+ else:
210
+ logger.error(f"Could not find out-of-band address for {device}")
211
+ node_attributes["driver_info"].pop(address_key, None)
212
+ if (
213
+ "port" in driver_params[node_attributes["driver"]]
214
+ and "oob_port" in device.custom_fields
215
+ and device.custom_fields["oob_port"]
216
+ ):
217
+ port_key = driver_params[node_attributes["driver"]]["port"]
218
+ node_attributes["driver_info"].update(
219
+ {port_key: device.custom_fields["oob_port"]}
220
+ )
221
+ node_attributes.update({"resource_class": device.name})
222
+ ports_attributes = [
223
+ dict(address=interface.mac_address)
224
+ for interface in node_interfaces
225
+ if interface.enabled and not interface.mgmt_only and interface.mac_address
226
+ ]
227
+ flavor_attributes = {
228
+ "ram": 1,
229
+ "disk": 0,
230
+ "vcpus": 1,
231
+ "is_public": False,
232
+ "extra_specs": {
233
+ "resources:CUSTOM_"
234
+ + device.name.upper().replace("-", "_").replace(".", "_"): "1",
235
+ "resources:VCPU": "0",
236
+ "resources:MEMORY_MB": "0",
237
+ "resources:DISK_GB": "0",
238
+ },
239
+ }
240
+
241
+ lock = Redlock(
242
+ key=f"lock_osism_tasks_conductor_sync_netbox_with_ironic-{device.name}",
243
+ masters={utils.redis},
244
+ auto_release_time=60,
245
+ )
246
+ if lock.acquire(timeout=20):
247
+ try:
248
+ logger.info(f"Processing device {device.name}")
249
+ node = openstack.baremetal_node_show(device.name, ignore_missing=True)
250
+ if not node:
251
+ logger.info(f"Creating baremetal node for {device.name}")
252
+ node = openstack.baremetal_node_create(device.name, node_attributes)
253
+ else:
254
+ # NOTE: The listener service only reacts to changes in the baremetal node. Explicitly sync provision and power state in case updates were missed by the listener.
255
+ if (
256
+ device.custom_fields["provision_state"]
257
+ != node["provision_state"]
258
+ ):
259
+ netbox.set_provision_state(device.name, node["provision_state"])
260
+ if device.custom_fields["power_state"] != node["power_state"]:
261
+ netbox.set_power_state(device.name, node["power_state"])
262
+ # NOTE: Check whether the baremetal node needs to be updated
263
+ node_updates = {}
264
+ deep_compare(node_attributes, node, node_updates)
265
+ if "driver_info" in node_updates:
266
+ # NOTE: The password is not returned by ironic, so we cannot make a comparision and it would always be updated. Therefore we pop it from the dictionary
267
+ password_key = driver_params[node_attributes["driver"]][
268
+ "password"
269
+ ]
270
+ if password_key in node_updates["driver_info"]:
271
+ node_updates["driver_info"].pop(password_key, None)
272
+ if not node_updates["driver_info"]:
273
+ node_updates.pop("driver_info", None)
274
+ if node_updates or force_update:
275
+ logger.info(
276
+ f"Updating baremetal node for {device.name} with {node_updates}"
277
+ )
278
+ # NOTE: Do the actual updates with all values in node_attributes. Otherwise nested dicts like e.g. driver_info will be overwritten as a whole and contain only changed values
279
+ node = openstack.baremetal_node_update(
280
+ node["uuid"], node_attributes
281
+ )
282
+
283
+ node_ports = openstack.baremetal_port_list(
284
+ details=False, attributes=dict(node_uuid=node["uuid"])
285
+ )
286
+ # NOTE: Baremetal ports are only required for (i)pxe boot
287
+ if node["boot_interface"] in ["pxe", "ipxe"]:
288
+ for port_attributes in ports_attributes:
289
+ port_attributes.update({"node_id": node["uuid"]})
290
+ port = [
291
+ port
292
+ for port in node_ports
293
+ if port_attributes["address"].upper()
294
+ == port["address"].upper()
295
+ ]
296
+ if not port:
297
+ logger.info(
298
+ f"Creating baremetal port with MAC address {port_attributes['address']} for {device.name}"
299
+ )
300
+ openstack.baremetal_port_create(port_attributes)
301
+ else:
302
+ node_ports.remove(port[0])
303
+ for node_port in node_ports:
304
+ # NOTE: Delete remaining ports not found in netbox
305
+ logger.info(
306
+ f"Deleting baremetal port with MAC address {node_port['address']} for {device.name}"
307
+ )
308
+ openstack.baremetal_port_delete(node_port["id"])
309
+
310
+ node_validation = openstack.baremetal_node_validate(node["uuid"])
311
+ if node_validation["management"].result:
312
+ logger.info(
313
+ f"Validation of management interface successful for baremetal node for {device.name}"
314
+ )
315
+ if node["provision_state"] == "enroll":
316
+ logger.info(
317
+ f"Transitioning baremetal node to manageable state for {device.name}"
318
+ )
319
+ node = openstack.baremetal_node_set_provision_state(
320
+ node["uuid"], "manage"
321
+ )
322
+ node = openstack.baremetal_node_wait_for_nodes_provision_state(
323
+ node["uuid"], "manageable"
324
+ )
325
+ logger.info(f"Baremetal node for {device.name} is manageable")
326
+ if node_validation["boot"].result:
327
+ logger.info(
328
+ f"Validation of boot interface successful for baremetal node for {device.name}"
329
+ )
330
+ if node["provision_state"] == "manageable":
331
+ logger.info(
332
+ f"Transitioning baremetal node to available state for {device.name}"
333
+ )
334
+ node = openstack.baremetal_node_set_provision_state(
335
+ node["uuid"], "provide"
336
+ )
337
+ node = (
338
+ openstack.baremetal_node_wait_for_nodes_provision_state(
339
+ node["uuid"], "available"
340
+ )
341
+ )
342
+ logger.info(
343
+ f"Baremetal node for {device.name} is available"
344
+ )
345
+ else:
346
+ logger.info(
347
+ f"Validation of boot interface failed for baremetal node for {device.name}\nReason: {node_validation['boot'].reason}"
348
+ )
349
+ if node["provision_state"] == "available":
350
+ # NOTE: Demote node to manageable
351
+ logger.info(
352
+ f"Transitioning baremetal node to manageable state for {device.name}"
353
+ )
354
+ node = openstack.baremetal_node_set_provision_state(
355
+ node["uuid"], "manage"
356
+ )
357
+ node = (
358
+ openstack.baremetal_node_wait_for_nodes_provision_state(
359
+ node["uuid"], "manageable"
360
+ )
361
+ )
362
+ logger.info(
363
+ f"Baremetal node for {device.name} is manageable"
364
+ )
365
+ else:
366
+ logger.info(
367
+ f"Validation of management interface failed for baremetal node for {device.name}\nReason: {node_validation['management'].reason}"
368
+ )
369
+
370
+ flavor_name = "osism-" + device.name
371
+ flavor = openstack.compute_flavor_get(flavor_name)
372
+ if not flavor:
373
+ logger.info(f"Creating flavor for {flavor_name}")
374
+ flavor = openstack.compute_flavor_create(
375
+ flavor_name, flavor_attributes
376
+ )
377
+ else:
378
+ flavor_updates = {}
379
+ deep_compare(flavor_attributes, flavor, flavor_updates)
380
+ flavor_updates_extra_specs = flavor_updates.pop("extra_specs", None)
381
+ if flavor_updates:
382
+ logger.info(
383
+ f"Updating flavor for {device.name} with {flavor_updates}"
384
+ )
385
+ openstack.compute_flavor_delete(flavor)
386
+ flavor = openstack.compute_flavor_create(
387
+ flavor_name, flavor_attributes
388
+ )
389
+ elif flavor_updates_extra_specs:
390
+ logger.info(
391
+ f"Updating flavor extra_specs for {device.name} with {flavor_updates_extra_specs}"
392
+ )
393
+ openstack.compute_flavor_update_extra_specs(
394
+ flavor, flavor_updates_extra_specs
395
+ )
396
+ flavor = openstack.compute_flavor_get(flavor_name)
397
+ for extra_specs_key in flavor["extra_specs"].keys():
398
+ if (
399
+ extra_specs_key
400
+ not in flavor_attributes["extra_specs"].keys()
401
+ ):
402
+ logger.info(
403
+ f"Deleting flavor extra_specs property {extra_specs_key} for {device.name}"
404
+ )
405
+ flavor = (
406
+ openstack.compute_flavor_delete_extra_specs_property(
407
+ flavor, extra_specs_key
408
+ )
409
+ )
410
+
411
+ except Exception as exc:
412
+ logger.info(
413
+ f"Could not fully synchronize device {device.name} with ironic: {exc}"
414
+ )
415
+ finally:
416
+ lock.release()
417
+
418
+ else:
419
+ logger.error("Could not acquire lock for node {device.name}")
osism/tasks/netbox.py CHANGED
@@ -2,36 +2,21 @@
2
2
 
3
3
  from celery import Celery
4
4
  from celery.signals import worker_process_init
5
- import json
5
+ from loguru import logger
6
+ from pottery import Redlock
6
7
  import pynetbox
7
- from redis import Redis
8
8
 
9
- from osism import settings
10
- from osism.actions import manage_device, manage_interface
11
- from osism.tasks import Config, openstack, run_command
9
+ from osism import settings, utils
10
+ from osism.tasks import Config, run_command
12
11
 
13
12
  app = Celery("netbox")
14
13
  app.config_from_object(Config)
15
14
 
16
- redis = None
17
- nb = None
18
-
19
15
 
20
16
  @worker_process_init.connect
21
17
  def celery_init_worker(**kwargs):
22
- global nb
23
- global redis
24
-
25
- redis = Redis(
26
- host=settings.REDIS_HOST,
27
- port=settings.REDIS_PORT,
28
- db=settings.REDIS_DB,
29
- socket_keepalive=True,
30
- )
31
- redis.ping()
32
-
33
18
  if settings.NETBOX_URL and settings.NETBOX_TOKEN:
34
- nb = pynetbox.api(settings.NETBOX_URL, token=settings.NETBOX_TOKEN)
19
+ utils.nb = pynetbox.api(settings.NETBOX_URL, token=settings.NETBOX_TOKEN)
35
20
 
36
21
  if settings.IGNORE_SSL_ERRORS:
37
22
  import requests
@@ -39,7 +24,7 @@ def celery_init_worker(**kwargs):
39
24
  requests.packages.urllib3.disable_warnings()
40
25
  session = requests.Session()
41
26
  session.verify = False
42
- nb.http_session = session
27
+ utils.nb.http_session = session
43
28
 
44
29
 
45
30
  @app.on_after_configure.connect
@@ -47,99 +32,99 @@ def setup_periodic_tasks(sender, **kwargs):
47
32
  pass
48
33
 
49
34
 
50
- @app.task(bind=True, name="osism.tasks.netbox.periodic_synchronize_ironic")
51
- def periodic_synchronize_ironic(self):
52
- """Synchronize the state of Ironic with Netbox"""
53
- openstack.baremetal_node_list.apply_async((), link=synchronize_device_state.s())
54
-
55
-
56
35
  @app.task(bind=True, name="osism.tasks.netbox.run")
57
36
  def run(self, action, arguments):
58
37
  pass
59
38
 
60
39
 
61
- @app.task(bind=True, name="osism.tasks.netbox.update_network_interface_name")
62
- def update_network_interface_name(self, mac_address, network_interface_name):
63
- manage_interface.update_network_interface_name(mac_address, network_interface_name)
64
-
65
-
66
- @app.task(bind=True, name="osism.tasks.netbox.synchronize_device_state")
67
- def synchronize_device_state(self, data):
68
- """Synchronize the state of Ironic with Netbox"""
69
-
70
- if type(data) == str:
71
- data = json.loads(data)
72
-
73
- if not data:
74
- return
75
-
76
- for device in data:
77
- manage_device.set_provision_state(device["Name"], device["Provisioning State"])
78
- manage_device.set_power_state(device["Name"], device["Power State"])
79
-
80
-
81
- @app.task(bind=True, name="osism.tasks.netbox.states")
82
- def states(self, data):
83
- result = manage_device.get_states(data.keys())
84
- return result
85
-
86
-
87
- @app.task(bind=True, name="osism.tasks.netbox.set_state")
88
- def set_state(self, device=None, state=None, state_type=None):
89
- manage_device.set_state(device, state, state_type)
90
-
91
-
92
40
  @app.task(bind=True, name="osism.tasks.netbox.set_maintenance")
93
- def set_maintenance(self, device=None, state=None):
94
- manage_device.set_maintenance(device, state)
41
+ def set_maintenance(self, device_name, state=True):
42
+ """Set the maintenance state for a device in the Netbox."""
95
43
 
44
+ lock = Redlock(
45
+ key=f"lock_osism_tasks_netbox_set_maintenance_{device_name}",
46
+ masters={utils.redis},
47
+ auto_release_time=60,
48
+ )
49
+ if lock.acquire(timeout=20):
50
+ try:
51
+ logger.info(f"Set maintenance state of device {device_name} = {state}")
52
+
53
+ device = utils.nb.dcim.devices.get(name=device_name)
54
+ if device:
55
+ device.custom_fields.update({"maintenance": state})
56
+ device.save()
57
+ else:
58
+ logger.error(f"Could not set maintenance for {device_name}")
59
+ finally:
60
+ lock.release()
61
+ else:
62
+ logger.error("Could not acquire lock for node {device_name}")
63
+
64
+
65
+ @app.task(bind=True, name="osism.tasks.netbox.set_provision_state")
66
+ def set_provision_state(self, device_name, state):
67
+ """Set the provision state for a device in the Netbox."""
68
+
69
+ lock = Redlock(
70
+ key=f"lock_osism_tasks_netbox_set_provision_state_{device_name}",
71
+ masters={utils.redis},
72
+ auto_release_time=60,
73
+ )
74
+ if lock.acquire(timeout=20):
75
+ try:
76
+ logger.info(f"Set provision state of device {device_name} = {state}")
77
+
78
+ device = utils.nb.dcim.devices.get(name=device_name)
79
+ if device:
80
+ device.custom_fields.update({"provision_state": state})
81
+ device.save()
82
+ else:
83
+ logger.error(f"Could not set provision state for {device_name}")
84
+ finally:
85
+ lock.release()
86
+ else:
87
+ logger.error("Could not acquire lock for node {device_name}")
88
+
89
+
90
+ @app.task(bind=True, name="osism.tasks.netbox.set_power_state")
91
+ def set_power_state(self, device_name, state):
92
+ """Set the provision state for a device in the Netbox."""
93
+
94
+ lock = Redlock(
95
+ key=f"lock_osism_tasks_netbox_set_provision_state_{device_name}",
96
+ masters={utils.redis},
97
+ auto_release_time=60,
98
+ )
99
+ if lock.acquire(timeout=20):
100
+ try:
101
+ logger.info(f"Set power state of device {device_name} = {state}")
96
102
 
97
- @app.task(bind=True, name="osism.tasks.netbox.diff")
98
- @app.task(bind=True, name="osism.tasks.netbox.get_devices_not_yet_registered_in_ironic")
99
- def get_devices_not_yet_registered_in_ironic(
100
- self, status="active", tags=["managed-by-ironic"], ironic_enabled=True
101
- ):
102
- global nb
103
+ device = utils.nb.dcim.devices.get(name=device_name)
104
+ if device:
105
+ device.custom_fields.update({"power_state": state})
106
+ device.save()
107
+ else:
108
+ logger.error(f"Could not set power state for {device_name}")
109
+ finally:
110
+ lock.release()
111
+ else:
112
+ logger.error("Could not acquire lock for node {device_name}")
103
113
 
104
- devices = nb.dcim.devices.filter(
105
- tag=tags, status=status, cf_ironic_enabled=[ironic_enabled]
106
- )
107
114
 
108
- result = []
109
-
110
- for device in devices:
111
- if (
112
- "ironic_state" in device.custom_fields
113
- and device.custom_fields["ironic_state"] != "registered"
114
- ):
115
- result.append(device.name)
116
-
117
- return result
118
-
119
-
120
- @app.task(
121
- bind=True,
122
- name="osism.tasks.netbox.get_devices_that_should_have_an_allocation_in_ironic",
123
- )
124
- def get_devices_that_should_have_an_allocation_in_ironic(self):
125
- global nb
126
-
127
- devices = nb.dcim.devices.filter(
128
- tag=["managed-by-ironic", "managed-by-osism"],
129
- status="active",
130
- cf_ironic_enabled=[True],
131
- cf_ironic_state=["registered"],
132
- cf_provision_state=["available"],
133
- cf_introspection_state=["introspected"],
134
- cf_device_type=["server"],
135
- )
115
+ @app.task(bind=True, name="osism.tasks.netbox.get_devices")
116
+ def get_devices_by_tags(self, tags, state="active"):
117
+ return utils.nb.dcim.devices.filter(tag=tags, state=state)
136
118
 
137
- result = []
138
119
 
139
- for device in devices:
140
- result.append(device.name)
120
+ @app.task(bind=True, name="osism.tasks.netbox.get_devices")
121
+ def get_device_by_name(self, name):
122
+ return utils.nb.dcim.devices.get(name=name)
141
123
 
142
- return result
124
+
125
+ @app.task(bind=True, name="osism.tasks.netbox.get_interfaces_by_device")
126
+ def get_interfaces_by_device(self, device_name):
127
+ return utils.nb.dcim.interfaces.filter(device=device_name)
143
128
 
144
129
 
145
130
  @app.task(bind=True, name="osism.tasks.netbox.manage")
@@ -158,14 +143,12 @@ def manage(self, *arguments, publish=True, locking=False, auto_release_time=3600
158
143
  *arguments,
159
144
  publish=publish,
160
145
  locking=locking,
161
- auto_release_time=auto_release_time
146
+ auto_release_time=auto_release_time,
162
147
  )
163
148
 
164
149
 
165
150
  @app.task(bind=True, name="osism.tasks.netbox.ping")
166
151
  def ping(self):
167
- global nb
168
-
169
- status = nb.status()
152
+ status = utils.nb.status()
170
153
 
171
154
  return status