osism 0.20250314.0__py3-none-any.whl → 0.20250331.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- osism/api.py +5 -10
- osism/commands/manage.py +117 -55
- osism/commands/netbox.py +12 -27
- osism/commands/wait.py +5 -12
- osism/core/enums.py +1 -0
- osism/tasks/__init__.py +13 -41
- osism/tasks/ansible.py +0 -15
- osism/tasks/conductor.py +0 -12
- osism/tasks/netbox.py +6 -27
- osism/tasks/openstack.py +81 -51
- osism/tasks/reconciler.py +11 -42
- osism/utils/__init__.py +11 -0
- {osism-0.20250314.0.dist-info → osism-0.20250331.0.dist-info}/METADATA +11 -10
- {osism-0.20250314.0.dist-info → osism-0.20250331.0.dist-info}/RECORD +20 -20
- {osism-0.20250314.0.dist-info → osism-0.20250331.0.dist-info}/WHEEL +1 -1
- {osism-0.20250314.0.dist-info → osism-0.20250331.0.dist-info}/entry_points.txt +0 -1
- osism-0.20250331.0.dist-info/licenses/AUTHORS +1 -0
- osism-0.20250331.0.dist-info/pbr.json +1 -0
- osism-0.20250314.0.dist-info/AUTHORS +0 -1
- osism-0.20250314.0.dist-info/pbr.json +0 -1
- {osism-0.20250314.0.dist-info → osism-0.20250331.0.dist-info/licenses}/LICENSE +0 -0
- {osism-0.20250314.0.dist-info → osism-0.20250331.0.dist-info}/top_level.txt +0 -0
osism/api.py
CHANGED
@@ -11,7 +11,7 @@ import pynetbox
|
|
11
11
|
from starlette.middleware.cors import CORSMiddleware
|
12
12
|
|
13
13
|
from osism.tasks import reconciler
|
14
|
-
from osism import settings
|
14
|
+
from osism import settings, utils
|
15
15
|
from osism.services.listener import BaremetalEvents
|
16
16
|
|
17
17
|
|
@@ -75,16 +75,13 @@ app.add_middleware(CORSMiddleware)
|
|
75
75
|
dictConfig(LogConfig().dict())
|
76
76
|
logger = logging.getLogger("api")
|
77
77
|
|
78
|
-
nb = None
|
79
78
|
baremetal_events = BaremetalEvents()
|
80
79
|
|
81
80
|
|
82
81
|
@app.on_event("startup")
|
83
82
|
async def startup_event():
|
84
|
-
global nb
|
85
|
-
|
86
83
|
if settings.NETBOX_URL and settings.NETBOX_TOKEN:
|
87
|
-
nb = pynetbox.api(settings.NETBOX_URL, token=settings.NETBOX_TOKEN)
|
84
|
+
utils.nb = pynetbox.api(settings.NETBOX_URL, token=settings.NETBOX_TOKEN)
|
88
85
|
|
89
86
|
if settings.IGNORE_SSL_ERRORS:
|
90
87
|
import requests
|
@@ -92,7 +89,7 @@ async def startup_event():
|
|
92
89
|
requests.packages.urllib3.disable_warnings()
|
93
90
|
session = requests.Session()
|
94
91
|
session.verify = False
|
95
|
-
nb.http_session = session
|
92
|
+
utils.nb.http_session = session
|
96
93
|
|
97
94
|
|
98
95
|
@app.get("/")
|
@@ -125,9 +122,7 @@ async def webhook(
|
|
125
122
|
content_length: int = Header(...),
|
126
123
|
x_hook_signature: str = Header(None),
|
127
124
|
):
|
128
|
-
|
129
|
-
|
130
|
-
if nb:
|
125
|
+
if utils.nb:
|
131
126
|
data = webhook_input.data
|
132
127
|
url = data["url"]
|
133
128
|
name = data["name"]
|
@@ -146,7 +141,7 @@ async def webhook(
|
|
146
141
|
device_type = "interface"
|
147
142
|
|
148
143
|
device_id = data["device"]["id"]
|
149
|
-
device = nb.dcim.devices.get(id=device_id)
|
144
|
+
device = utils.nb.dcim.devices.get(id=device_id)
|
150
145
|
tags = [str(x) for x in device.tags]
|
151
146
|
custom_fields = device.custom_fields
|
152
147
|
|
osism/commands/manage.py
CHANGED
@@ -1,8 +1,6 @@
|
|
1
1
|
# SPDX-License-Identifier: Apache-2.0
|
2
2
|
|
3
|
-
import os
|
4
3
|
from re import findall
|
5
|
-
import subprocess
|
6
4
|
from urllib.parse import urljoin
|
7
5
|
|
8
6
|
from cliff.command import Command
|
@@ -12,6 +10,7 @@ from loguru import logger
|
|
12
10
|
import requests
|
13
11
|
|
14
12
|
from osism.data import TEMPLATE_IMAGE_CLUSTERAPI, TEMPLATE_IMAGE_OCTAVIA
|
13
|
+
from osism.tasks import openstack, handle_task
|
15
14
|
|
16
15
|
SUPPORTED_CLUSTERAPI_K8S_IMAGES = ["1.29", "1.30", "1.31"]
|
17
16
|
|
@@ -20,6 +19,12 @@ class ImageClusterapi(Command):
|
|
20
19
|
def get_parser(self, prog_name):
|
21
20
|
parser = super(ImageClusterapi, self).get_parser(prog_name)
|
22
21
|
|
22
|
+
parser.add_argument(
|
23
|
+
"--no-wait",
|
24
|
+
default=False,
|
25
|
+
help="Do not wait until image management has been completed",
|
26
|
+
action="store_true",
|
27
|
+
)
|
23
28
|
parser.add_argument(
|
24
29
|
"--base-url",
|
25
30
|
type=str,
|
@@ -56,13 +61,14 @@ class ImageClusterapi(Command):
|
|
56
61
|
cloud = parsed_args.cloud
|
57
62
|
filter = parsed_args.filter
|
58
63
|
tag = parsed_args.tag
|
64
|
+
wait = not parsed_args.no_wait
|
59
65
|
|
60
66
|
if filter:
|
61
67
|
supported_cluterapi_k8s_images = [filter]
|
62
68
|
else:
|
63
69
|
supported_cluterapi_k8s_images = SUPPORTED_CLUSTERAPI_K8S_IMAGES
|
64
70
|
|
65
|
-
|
71
|
+
result = []
|
66
72
|
for kubernetes_release in supported_cluterapi_k8s_images:
|
67
73
|
url = urljoin(base_url, f"last-{kubernetes_release}")
|
68
74
|
|
@@ -84,18 +90,18 @@ class ImageClusterapi(Command):
|
|
84
90
|
logger.info(f"checksum: {splitted_checksum[0]}")
|
85
91
|
|
86
92
|
template = Template(TEMPLATE_IMAGE_CLUSTERAPI)
|
87
|
-
result
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
93
|
+
result.extend(
|
94
|
+
[
|
95
|
+
template.render(
|
96
|
+
image_url=url,
|
97
|
+
image_checksum=f"sha256:{splitted_checksum[0]}",
|
98
|
+
image_version=r[0].strip(),
|
99
|
+
image_builddate=splitted[0],
|
100
|
+
)
|
101
|
+
]
|
92
102
|
)
|
93
|
-
with open(f"/tmp/clusterapi/k8s-{kubernetes_release}.yml", "w+") as fp:
|
94
|
-
fp.write(result)
|
95
103
|
|
96
104
|
args = [
|
97
|
-
"openstack-image-manager",
|
98
|
-
"--images=/tmp/clusterapi",
|
99
105
|
"--cloud",
|
100
106
|
cloud,
|
101
107
|
"--filter",
|
@@ -105,15 +111,32 @@ class ImageClusterapi(Command):
|
|
105
111
|
args.extend(["--tag", tag])
|
106
112
|
if parsed_args.dry_run:
|
107
113
|
args.append("--dry-run")
|
108
|
-
|
114
|
+
|
115
|
+
task_signature = openstack.image_manager.si(*args, configs=result)
|
116
|
+
task = task_signature.apply_async()
|
117
|
+
if wait:
|
118
|
+
logger.info(
|
119
|
+
f"It takes a moment until task {task.task_id} (image-manager) has been started and output is visible here."
|
120
|
+
)
|
121
|
+
|
122
|
+
return handle_task(task, wait, format="script", timeout=3600)
|
109
123
|
|
110
124
|
|
111
125
|
class ImageOctavia(Command):
|
112
126
|
def get_parser(self, prog_name):
|
113
127
|
parser = super(ImageOctavia, self).get_parser(prog_name)
|
128
|
+
parser.add_argument(
|
129
|
+
"--no-wait",
|
130
|
+
default=False,
|
131
|
+
help="Do not wait until image management has been completed",
|
132
|
+
action="store_true",
|
133
|
+
)
|
114
134
|
|
115
135
|
parser.add_argument(
|
116
|
-
"--cloud",
|
136
|
+
"--cloud",
|
137
|
+
type=str,
|
138
|
+
help="Cloud name in clouds.yaml (will be overruled by OS_AUTH_URL envvar)",
|
139
|
+
default="octavia",
|
117
140
|
)
|
118
141
|
parser.add_argument(
|
119
142
|
"--base-url",
|
@@ -124,6 +147,7 @@ class ImageOctavia(Command):
|
|
124
147
|
return parser
|
125
148
|
|
126
149
|
def take_action(self, parsed_args):
|
150
|
+
wait = not parsed_args.no_wait
|
127
151
|
cloud = parsed_args.cloud
|
128
152
|
base_url = parsed_args.base_url
|
129
153
|
|
@@ -147,21 +171,31 @@ class ImageOctavia(Command):
|
|
147
171
|
logger.info(f"checksum: {splitted_checksum[0]}")
|
148
172
|
|
149
173
|
template = Template(TEMPLATE_IMAGE_OCTAVIA)
|
150
|
-
result =
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
174
|
+
result = []
|
175
|
+
result.extend(
|
176
|
+
[
|
177
|
+
template.render(
|
178
|
+
image_url=url,
|
179
|
+
image_checksum=f"sha256:{splitted_checksum[0]}",
|
180
|
+
image_version=splitted[0],
|
181
|
+
image_builddate=splitted[0],
|
182
|
+
)
|
183
|
+
]
|
155
184
|
)
|
185
|
+
arguments = [
|
186
|
+
"--cloud",
|
187
|
+
cloud,
|
188
|
+
"--deactivate",
|
189
|
+
]
|
156
190
|
|
157
|
-
|
158
|
-
|
159
|
-
|
191
|
+
task_signature = openstack.image_manager.si(*arguments, configs=result)
|
192
|
+
task = task_signature.apply_async()
|
193
|
+
if wait:
|
194
|
+
logger.info(
|
195
|
+
f"It takes a moment until task {task.task_id} (image-manager) has been started and output is visible here."
|
196
|
+
)
|
160
197
|
|
161
|
-
|
162
|
-
"/usr/local/bin/openstack-image-manager --images=/tmp/octavia --cloud octavia --deactivate",
|
163
|
-
shell=True,
|
164
|
-
)
|
198
|
+
return handle_task(task, wait, format="script", timeout=3600)
|
165
199
|
|
166
200
|
|
167
201
|
class Images(Command):
|
@@ -173,6 +207,12 @@ class Images(Command):
|
|
173
207
|
# to typer. Then openstack-image-manager can simply be included directly at this
|
174
208
|
# point.
|
175
209
|
|
210
|
+
parser.add_argument(
|
211
|
+
"--no-wait",
|
212
|
+
default=False,
|
213
|
+
help="Do not wait until image management has been completed",
|
214
|
+
action="store_true",
|
215
|
+
)
|
176
216
|
parser.add_argument(
|
177
217
|
"--dry-run",
|
178
218
|
default=False,
|
@@ -185,6 +225,12 @@ class Images(Command):
|
|
185
225
|
help="Hide images that should be deleted",
|
186
226
|
action="store_true",
|
187
227
|
)
|
228
|
+
parser.add_argument(
|
229
|
+
"--delete",
|
230
|
+
default=False,
|
231
|
+
help="Delete images that should be deleted",
|
232
|
+
action="store_true",
|
233
|
+
)
|
188
234
|
parser.add_argument(
|
189
235
|
"--latest",
|
190
236
|
default=False,
|
@@ -210,35 +256,39 @@ class Images(Command):
|
|
210
256
|
return parser
|
211
257
|
|
212
258
|
def take_action(self, parsed_args):
|
213
|
-
|
214
|
-
dry_run = parsed_args.dry_run
|
215
|
-
filter = parsed_args.filter
|
216
|
-
hide = parsed_args.hide
|
217
|
-
latest = parsed_args.latest
|
218
|
-
images = parsed_args.images
|
259
|
+
wait = not parsed_args.no_wait
|
219
260
|
|
220
261
|
arguments = []
|
221
|
-
if cloud:
|
222
|
-
arguments.append(
|
223
|
-
|
224
|
-
|
225
|
-
|
262
|
+
if parsed_args.cloud:
|
263
|
+
arguments.append("--cloud")
|
264
|
+
arguments.append(parsed_args.cloud)
|
265
|
+
if parsed_args.filter:
|
266
|
+
arguments.append("--filter")
|
267
|
+
arguments.append(parsed_args.filter)
|
268
|
+
if parsed_args.delete:
|
269
|
+
arguments.append("--delete")
|
270
|
+
arguments.append("--yes-i-really-know-what-i-do")
|
271
|
+
if parsed_args.dry_run:
|
226
272
|
arguments.append("--dry-run")
|
227
|
-
if latest:
|
273
|
+
if parsed_args.latest:
|
228
274
|
arguments.append("--latest")
|
229
|
-
if hide:
|
275
|
+
if parsed_args.hide:
|
230
276
|
arguments.append("--hide")
|
231
277
|
|
232
|
-
|
233
|
-
|
278
|
+
arguments.append("--images")
|
279
|
+
if parsed_args.images:
|
280
|
+
arguments.append(parsed_args.images)
|
234
281
|
else:
|
235
|
-
arguments.append("
|
282
|
+
arguments.append("/etc/images")
|
236
283
|
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
284
|
+
task_signature = openstack.image_manager.si(*arguments)
|
285
|
+
task = task_signature.apply_async()
|
286
|
+
if wait:
|
287
|
+
logger.info(
|
288
|
+
f"It takes a moment until task {task.task_id} (image-manager) has been started and output is visible here."
|
289
|
+
)
|
290
|
+
|
291
|
+
return handle_task(task, wait, format="script", timeout=3600)
|
242
292
|
|
243
293
|
|
244
294
|
class Flavors(Command):
|
@@ -250,6 +300,12 @@ class Flavors(Command):
|
|
250
300
|
# to typer. Then openstack-flavor-manager can simply be included directly at this
|
251
301
|
# point.
|
252
302
|
|
303
|
+
parser.add_argument(
|
304
|
+
"--no-wait",
|
305
|
+
default=False,
|
306
|
+
help="Do not wait until flavor management has been completed",
|
307
|
+
action="store_true",
|
308
|
+
)
|
253
309
|
parser.add_argument(
|
254
310
|
"--cloud", type=str, help="Cloud name in clouds.yaml", default="admin"
|
255
311
|
)
|
@@ -276,23 +332,29 @@ class Flavors(Command):
|
|
276
332
|
return parser
|
277
333
|
|
278
334
|
def take_action(self, parsed_args):
|
335
|
+
wait = not parsed_args.no_wait
|
279
336
|
cloud = parsed_args.cloud
|
280
337
|
name = parsed_args.name
|
281
338
|
recommended = parsed_args.recommended
|
282
339
|
url = parsed_args.url
|
283
340
|
|
284
|
-
arguments = [
|
341
|
+
arguments = ["--name", name]
|
285
342
|
if cloud:
|
286
|
-
arguments.append(
|
343
|
+
arguments.append("--cloud")
|
344
|
+
arguments.append(cloud)
|
287
345
|
|
288
346
|
if recommended:
|
289
347
|
arguments.append("--recommended")
|
290
348
|
|
291
349
|
if url:
|
292
|
-
arguments.append(
|
350
|
+
arguments.append("--url")
|
351
|
+
arguments.append(url)
|
352
|
+
|
353
|
+
task_signature = openstack.flavor_manager.si(*arguments)
|
354
|
+
task = task_signature.apply_async()
|
355
|
+
if wait:
|
356
|
+
logger.info(
|
357
|
+
f"It takes a moment until task {task.task_id} (flavor-manager) has been started and output is visible here."
|
358
|
+
)
|
293
359
|
|
294
|
-
|
295
|
-
subprocess.call(
|
296
|
-
f"/usr/local/bin/openstack-flavor-manager {joined_arguments}",
|
297
|
-
shell=True,
|
298
|
-
)
|
360
|
+
return handle_task(task, wait, format="script", timeout=3600)
|
osism/commands/netbox.py
CHANGED
@@ -1,37 +1,11 @@
|
|
1
1
|
# SPDX-License-Identifier: Apache-2.0
|
2
2
|
|
3
|
-
import argparse
|
4
|
-
|
5
3
|
from cliff.command import Command
|
6
4
|
from loguru import logger
|
7
|
-
from redis import Redis
|
8
5
|
|
9
|
-
from osism import settings
|
10
6
|
from osism.tasks import conductor, netbox, reconciler, openstack, handle_task
|
11
7
|
|
12
8
|
|
13
|
-
redis = Redis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB)
|
14
|
-
redis.ping()
|
15
|
-
|
16
|
-
|
17
|
-
class Run(Command):
|
18
|
-
def get_parser(self, prog_name):
|
19
|
-
parser = super(Run, self).get_parser(prog_name)
|
20
|
-
parser.add_argument(
|
21
|
-
"arguments", nargs=argparse.REMAINDER, help="Other arguments for Ansible"
|
22
|
-
)
|
23
|
-
parser.add_argument(
|
24
|
-
"--no-wait",
|
25
|
-
default=False,
|
26
|
-
help="Do not wait until the role has been applied",
|
27
|
-
action="store_true",
|
28
|
-
)
|
29
|
-
return parser
|
30
|
-
|
31
|
-
def take_action(self, parsed_args):
|
32
|
-
pass
|
33
|
-
|
34
|
-
|
35
9
|
class Ironic(Command):
|
36
10
|
def get_parser(self, prog_name):
|
37
11
|
parser = super(Ironic, self).get_parser(prog_name)
|
@@ -89,6 +63,12 @@ class Manage(Command):
|
|
89
63
|
help="Do not wait for the netbox API to be ready",
|
90
64
|
action="store_true",
|
91
65
|
)
|
66
|
+
parser.add_argument(
|
67
|
+
"--parallel",
|
68
|
+
type=str,
|
69
|
+
default=None,
|
70
|
+
help="Process up to n files in parallel",
|
71
|
+
)
|
92
72
|
parser.add_argument(
|
93
73
|
"--limit",
|
94
74
|
type=str,
|
@@ -124,8 +104,13 @@ class Manage(Command):
|
|
124
104
|
else:
|
125
105
|
arguments.append("--wait")
|
126
106
|
|
107
|
+
if parsed_args.parallel:
|
108
|
+
arguments.append("--parallel")
|
109
|
+
arguments.append(parsed_args.parallel)
|
110
|
+
|
127
111
|
if parsed_args.limit:
|
128
|
-
arguments.append("--limit
|
112
|
+
arguments.append("--limit")
|
113
|
+
arguments.append(parsed_args.limit)
|
129
114
|
|
130
115
|
if parsed_args.skipdtl:
|
131
116
|
arguments.append("--skipdtl")
|
osism/commands/wait.py
CHANGED
@@ -6,8 +6,7 @@ from celery import Celery
|
|
6
6
|
from celery.result import AsyncResult
|
7
7
|
from cliff.command import Command
|
8
8
|
from loguru import logger
|
9
|
-
from
|
10
|
-
from osism import settings
|
9
|
+
from osism import utils
|
11
10
|
from osism.tasks import Config
|
12
11
|
|
13
12
|
|
@@ -119,18 +118,12 @@ class Run(Command):
|
|
119
118
|
print(f"{task_id} = STARTED")
|
120
119
|
|
121
120
|
if live:
|
122
|
-
redis
|
123
|
-
host=settings.REDIS_HOST,
|
124
|
-
port=settings.REDIS_PORT,
|
125
|
-
db=settings.REDIS_DB,
|
126
|
-
socket_keepalive=True,
|
127
|
-
)
|
128
|
-
redis.ping()
|
121
|
+
utils.redis.ping()
|
129
122
|
|
130
123
|
last_id = 0
|
131
124
|
while_True = True
|
132
125
|
while while_True:
|
133
|
-
data = redis.xread(
|
126
|
+
data = utils.redis.xread(
|
134
127
|
{str(task_id): last_id}, count=1, block=1000
|
135
128
|
)
|
136
129
|
if data:
|
@@ -143,7 +136,7 @@ class Run(Command):
|
|
143
136
|
logger.debug(
|
144
137
|
f"Processing message {last_id} of type {message_type}"
|
145
138
|
)
|
146
|
-
redis.xdel(str(task_id), last_id)
|
139
|
+
utils.redis.xdel(str(task_id), last_id)
|
147
140
|
|
148
141
|
if message_type == "stdout":
|
149
142
|
print(message_content, end="")
|
@@ -153,7 +146,7 @@ class Run(Command):
|
|
153
146
|
message_type == "action"
|
154
147
|
and message_content == "quit"
|
155
148
|
):
|
156
|
-
redis.close()
|
149
|
+
utils.redis.close()
|
157
150
|
if len(task_ids) == 1:
|
158
151
|
return rc
|
159
152
|
else:
|
osism/core/enums.py
CHANGED
@@ -107,6 +107,7 @@ VALIDATE_PLAYBOOKS = {
|
|
107
107
|
"ntp": {"environment": "generic", "runtime": "osism-ansible"},
|
108
108
|
"system-encoding": {"environment": "generic", "runtime": "osism-ansible"},
|
109
109
|
"ulimits": {"environment": "generic", "runtime": "osism-ansible"},
|
110
|
+
"stress": {"environment": "generic", "runtime": "osism-ansible"},
|
110
111
|
}
|
111
112
|
|
112
113
|
MAP_ROLE2ROLE = {
|
osism/tasks/__init__.py
CHANGED
@@ -5,14 +5,10 @@ import re
|
|
5
5
|
import subprocess
|
6
6
|
import time
|
7
7
|
|
8
|
-
from celery.signals import worker_process_init
|
9
8
|
from loguru import logger
|
10
|
-
from redis import Redis
|
11
9
|
from pottery import Redlock
|
12
10
|
|
13
|
-
from osism import
|
14
|
-
|
15
|
-
redis = None
|
11
|
+
from osism import utils
|
16
12
|
|
17
13
|
|
18
14
|
class Config:
|
@@ -36,19 +32,6 @@ class Config:
|
|
36
32
|
}
|
37
33
|
|
38
34
|
|
39
|
-
@worker_process_init.connect
|
40
|
-
def celery_init_worker(**kwargs):
|
41
|
-
global redis
|
42
|
-
|
43
|
-
redis = Redis(
|
44
|
-
host=settings.REDIS_HOST,
|
45
|
-
port=settings.REDIS_PORT,
|
46
|
-
db=settings.REDIS_DB,
|
47
|
-
socket_keepalive=True,
|
48
|
-
)
|
49
|
-
redis.ping()
|
50
|
-
|
51
|
-
|
52
35
|
def run_ansible_in_environment(
|
53
36
|
request_id,
|
54
37
|
worker,
|
@@ -88,7 +71,7 @@ def run_ansible_in_environment(
|
|
88
71
|
|
89
72
|
# NOTE: This is a first step to make Ansible Vault usable via OSISM workers.
|
90
73
|
# It's not ready in that form yet.
|
91
|
-
ansible_vault_password = redis.get("ansible_vault_password")
|
74
|
+
ansible_vault_password = utils.redis.get("ansible_vault_password")
|
92
75
|
if ansible_vault_password:
|
93
76
|
env["VAULT"] = "/ansible-vault.py"
|
94
77
|
|
@@ -96,7 +79,7 @@ def run_ansible_in_environment(
|
|
96
79
|
if locking:
|
97
80
|
lock = Redlock(
|
98
81
|
key=f"lock-ansible-{environment}-{role}",
|
99
|
-
masters={redis},
|
82
|
+
masters={utils.redis},
|
100
83
|
auto_release_time=auto_release_time,
|
101
84
|
)
|
102
85
|
|
@@ -178,14 +161,14 @@ def run_ansible_in_environment(
|
|
178
161
|
while p.poll() is None:
|
179
162
|
line = p.stdout.readline().decode("utf-8")
|
180
163
|
if publish:
|
181
|
-
redis.xadd(request_id, {"type": "stdout", "content": line})
|
164
|
+
utils.redis.xadd(request_id, {"type": "stdout", "content": line})
|
182
165
|
result += line
|
183
166
|
|
184
167
|
rc = p.wait(timeout=60)
|
185
168
|
|
186
169
|
if publish:
|
187
|
-
redis.xadd(request_id, {"type": "rc", "content": rc})
|
188
|
-
redis.xadd(request_id, {"type": "action", "content": "quit"})
|
170
|
+
utils.redis.xadd(request_id, {"type": "rc", "content": rc})
|
171
|
+
utils.redis.xadd(request_id, {"type": "action", "content": "quit"})
|
189
172
|
|
190
173
|
if locking:
|
191
174
|
lock.release()
|
@@ -209,7 +192,7 @@ def run_command(
|
|
209
192
|
if locking:
|
210
193
|
lock = Redlock(
|
211
194
|
key=f"lock-{command}",
|
212
|
-
masters={redis},
|
195
|
+
masters={utils.redis},
|
213
196
|
auto_release_time=auto_release_time,
|
214
197
|
)
|
215
198
|
|
@@ -222,14 +205,14 @@ def run_command(
|
|
222
205
|
while p.poll() is None:
|
223
206
|
line = p.stdout.readline().decode("utf-8")
|
224
207
|
if publish:
|
225
|
-
redis.xadd(request_id, {"type": "stdout", "content": line})
|
208
|
+
utils.redis.xadd(request_id, {"type": "stdout", "content": line})
|
226
209
|
result += line
|
227
210
|
|
228
211
|
rc = p.wait(timeout=60)
|
229
212
|
|
230
213
|
if publish:
|
231
|
-
redis.xadd(request_id, {"type": "rc", "content": rc})
|
232
|
-
redis.xadd(request_id, {"type": "action", "content": "quit"})
|
214
|
+
utils.redis.xadd(request_id, {"type": "rc", "content": rc})
|
215
|
+
utils.redis.xadd(request_id, {"type": "action", "content": "quit"})
|
233
216
|
|
234
217
|
if locking:
|
235
218
|
lock.release()
|
@@ -238,23 +221,12 @@ def run_command(
|
|
238
221
|
|
239
222
|
|
240
223
|
def handle_task(t, wait=True, format="log", timeout=3600):
|
241
|
-
global redis
|
242
|
-
|
243
|
-
if not redis:
|
244
|
-
redis = Redis(
|
245
|
-
host=settings.REDIS_HOST,
|
246
|
-
port=settings.REDIS_PORT,
|
247
|
-
db=settings.REDIS_DB,
|
248
|
-
socket_keepalive=True,
|
249
|
-
)
|
250
|
-
redis.ping()
|
251
|
-
|
252
224
|
rc = 0
|
253
225
|
if wait:
|
254
226
|
stoptime = time.time() + timeout
|
255
227
|
last_id = 0
|
256
228
|
while time.time() < stoptime:
|
257
|
-
data = redis.xread(
|
229
|
+
data = utils.redis.xread(
|
258
230
|
{str(t.task_id): last_id}, count=1, block=(timeout * 1000)
|
259
231
|
)
|
260
232
|
if data:
|
@@ -266,7 +238,7 @@ def handle_task(t, wait=True, format="log", timeout=3600):
|
|
266
238
|
message_content = message[b"content"].decode()
|
267
239
|
|
268
240
|
logger.debug(f"Processing message {last_id} of type {message_type}")
|
269
|
-
redis.xdel(str(t.task_id), last_id)
|
241
|
+
utils.redis.xdel(str(t.task_id), last_id)
|
270
242
|
|
271
243
|
if message_type == "stdout":
|
272
244
|
print(message_content, end="", flush=True)
|
@@ -279,7 +251,7 @@ def handle_task(t, wait=True, format="log", timeout=3600):
|
|
279
251
|
elif message_type == "rc":
|
280
252
|
rc = int(message_content)
|
281
253
|
elif message_type == "action" and message_content == "quit":
|
282
|
-
redis.close()
|
254
|
+
utils.redis.close()
|
283
255
|
return rc
|
284
256
|
else:
|
285
257
|
logger.info(
|
osism/tasks/ansible.py
CHANGED
@@ -1,25 +1,10 @@
|
|
1
1
|
# SPDX-License-Identifier: Apache-2.0
|
2
2
|
|
3
|
-
import functools
|
4
|
-
from threading import RLock
|
5
|
-
|
6
3
|
from celery import Celery
|
7
|
-
import kombu.utils
|
8
4
|
|
9
5
|
from osism import settings
|
10
6
|
from osism.tasks import Config, run_ansible_in_environment
|
11
7
|
|
12
|
-
# https://github.com/celery/kombu/issues/1804
|
13
|
-
if not getattr(kombu.utils.cached_property, "lock", None):
|
14
|
-
setattr(
|
15
|
-
kombu.utils.cached_property,
|
16
|
-
"lock",
|
17
|
-
functools.cached_property(lambda _: RLock()),
|
18
|
-
)
|
19
|
-
# Must call __set_name__ here since this cached property is not defined in the context of a class
|
20
|
-
# Refer to https://docs.python.org/3/reference/datamodel.html#object.__set_name__
|
21
|
-
kombu.utils.cached_property.lock.__set_name__(kombu.utils.cached_property, "lock")
|
22
|
-
|
23
8
|
app = Celery("ansible")
|
24
9
|
app.config_from_object(Config)
|
25
10
|
|
osism/tasks/conductor.py
CHANGED
@@ -5,10 +5,8 @@ from celery.signals import worker_process_init
|
|
5
5
|
import keystoneauth1
|
6
6
|
from loguru import logger
|
7
7
|
import openstack
|
8
|
-
from redis import Redis
|
9
8
|
import yaml
|
10
9
|
|
11
|
-
from osism import settings
|
12
10
|
from osism.tasks import Config
|
13
11
|
|
14
12
|
app = Celery("conductor")
|
@@ -16,21 +14,11 @@ app.config_from_object(Config)
|
|
16
14
|
|
17
15
|
|
18
16
|
configuration = {}
|
19
|
-
redis = None
|
20
17
|
|
21
18
|
|
22
19
|
@worker_process_init.connect
|
23
20
|
def celery_init_worker(**kwargs):
|
24
21
|
global configuration
|
25
|
-
global redis
|
26
|
-
|
27
|
-
redis = Redis(
|
28
|
-
host=settings.REDIS_HOST,
|
29
|
-
port=settings.REDIS_PORT,
|
30
|
-
db=settings.REDIS_DB,
|
31
|
-
socket_keepalive=True,
|
32
|
-
)
|
33
|
-
redis.ping()
|
34
22
|
|
35
23
|
# Parameters come from the environment, OS_*
|
36
24
|
try:
|
osism/tasks/netbox.py
CHANGED
@@ -4,34 +4,19 @@ from celery import Celery
|
|
4
4
|
from celery.signals import worker_process_init
|
5
5
|
import json
|
6
6
|
import pynetbox
|
7
|
-
from redis import Redis
|
8
7
|
|
9
|
-
from osism import settings
|
8
|
+
from osism import settings, utils
|
10
9
|
from osism.actions import manage_device, manage_interface
|
11
10
|
from osism.tasks import Config, openstack, run_command
|
12
11
|
|
13
12
|
app = Celery("netbox")
|
14
13
|
app.config_from_object(Config)
|
15
14
|
|
16
|
-
redis = None
|
17
|
-
nb = None
|
18
|
-
|
19
15
|
|
20
16
|
@worker_process_init.connect
|
21
17
|
def celery_init_worker(**kwargs):
|
22
|
-
global nb
|
23
|
-
global redis
|
24
|
-
|
25
|
-
redis = Redis(
|
26
|
-
host=settings.REDIS_HOST,
|
27
|
-
port=settings.REDIS_PORT,
|
28
|
-
db=settings.REDIS_DB,
|
29
|
-
socket_keepalive=True,
|
30
|
-
)
|
31
|
-
redis.ping()
|
32
|
-
|
33
18
|
if settings.NETBOX_URL and settings.NETBOX_TOKEN:
|
34
|
-
nb = pynetbox.api(settings.NETBOX_URL, token=settings.NETBOX_TOKEN)
|
19
|
+
utils.nb = pynetbox.api(settings.NETBOX_URL, token=settings.NETBOX_TOKEN)
|
35
20
|
|
36
21
|
if settings.IGNORE_SSL_ERRORS:
|
37
22
|
import requests
|
@@ -39,7 +24,7 @@ def celery_init_worker(**kwargs):
|
|
39
24
|
requests.packages.urllib3.disable_warnings()
|
40
25
|
session = requests.Session()
|
41
26
|
session.verify = False
|
42
|
-
nb.http_session = session
|
27
|
+
utils.nb.http_session = session
|
43
28
|
|
44
29
|
|
45
30
|
@app.on_after_configure.connect
|
@@ -99,9 +84,7 @@ def set_maintenance(self, device=None, state=None):
|
|
99
84
|
def get_devices_not_yet_registered_in_ironic(
|
100
85
|
self, status="active", tags=["managed-by-ironic"], ironic_enabled=True
|
101
86
|
):
|
102
|
-
|
103
|
-
|
104
|
-
devices = nb.dcim.devices.filter(
|
87
|
+
devices = utils.nb.dcim.devices.filter(
|
105
88
|
tag=tags, status=status, cf_ironic_enabled=[ironic_enabled]
|
106
89
|
)
|
107
90
|
|
@@ -122,9 +105,7 @@ def get_devices_not_yet_registered_in_ironic(
|
|
122
105
|
name="osism.tasks.netbox.get_devices_that_should_have_an_allocation_in_ironic",
|
123
106
|
)
|
124
107
|
def get_devices_that_should_have_an_allocation_in_ironic(self):
|
125
|
-
|
126
|
-
|
127
|
-
devices = nb.dcim.devices.filter(
|
108
|
+
devices = utils.nb.dcim.devices.filter(
|
128
109
|
tag=["managed-by-ironic", "managed-by-osism"],
|
129
110
|
status="active",
|
130
111
|
cf_ironic_enabled=[True],
|
@@ -164,8 +145,6 @@ def manage(self, *arguments, publish=True, locking=False, auto_release_time=3600
|
|
164
145
|
|
165
146
|
@app.task(bind=True, name="osism.tasks.netbox.ping")
|
166
147
|
def ping(self):
|
167
|
-
|
168
|
-
|
169
|
-
status = nb.status()
|
148
|
+
status = utils.nb.status()
|
170
149
|
|
171
150
|
return status
|
osism/tasks/openstack.py
CHANGED
@@ -1,60 +1,20 @@
|
|
1
1
|
# SPDX-License-Identifier: Apache-2.0
|
2
2
|
|
3
|
-
import functools
|
4
3
|
import copy
|
5
4
|
import ipaddress
|
6
|
-
from threading import RLock
|
7
5
|
|
8
6
|
from celery import Celery
|
9
|
-
from celery.signals import worker_process_init
|
10
7
|
import jinja2
|
11
|
-
import
|
12
|
-
import kombu.utils
|
13
|
-
import openstack
|
8
|
+
from openstack.exceptions import ConflictException, ResourceNotFound, ResourceFailure
|
14
9
|
from pottery import Redlock
|
15
|
-
|
10
|
+
import tempfile
|
16
11
|
|
17
|
-
from osism import settings
|
18
|
-
from osism.tasks import Config, conductor, netbox
|
19
12
|
from osism import utils
|
20
|
-
|
21
|
-
# https://github.com/celery/kombu/issues/1804
|
22
|
-
if not getattr(kombu.utils.cached_property, "lock", None):
|
23
|
-
setattr(
|
24
|
-
kombu.utils.cached_property,
|
25
|
-
"lock",
|
26
|
-
functools.cached_property(lambda _: RLock()),
|
27
|
-
)
|
28
|
-
# Must call __set_name__ here since this cached property is not defined in the context of a class
|
29
|
-
# Refer to https://docs.python.org/3/reference/datamodel.html#object.__set_name__
|
30
|
-
kombu.utils.cached_property.lock.__set_name__(kombu.utils.cached_property, "lock")
|
13
|
+
from osism.tasks import Config, conductor, netbox, run_command
|
31
14
|
|
32
15
|
app = Celery("openstack")
|
33
16
|
app.config_from_object(Config)
|
34
17
|
|
35
|
-
redis = None
|
36
|
-
conn = None
|
37
|
-
|
38
|
-
|
39
|
-
@worker_process_init.connect
|
40
|
-
def celery_init_worker(**kwargs):
|
41
|
-
global conn
|
42
|
-
global redis
|
43
|
-
|
44
|
-
redis = Redis(
|
45
|
-
host=settings.REDIS_HOST,
|
46
|
-
port=settings.REDIS_PORT,
|
47
|
-
db=settings.REDIS_DB,
|
48
|
-
socket_keepalive=True,
|
49
|
-
)
|
50
|
-
redis.ping()
|
51
|
-
|
52
|
-
# Parameters come from the environment, OS_*
|
53
|
-
try:
|
54
|
-
conn = openstack.connect()
|
55
|
-
except keystoneauth1.exceptions.auth_plugins.MissingRequiredOptions:
|
56
|
-
pass
|
57
|
-
|
58
18
|
|
59
19
|
@app.on_after_configure.connect
|
60
20
|
def setup_periodic_tasks(sender, **kwargs):
|
@@ -63,24 +23,28 @@ def setup_periodic_tasks(sender, **kwargs):
|
|
63
23
|
|
64
24
|
@app.task(bind=True, name="osism.tasks.openstack.image_get")
|
65
25
|
def image_get(self, image_name):
|
26
|
+
conn = utils.get_openstack_connection()
|
66
27
|
result = conn.image.find_image(image_name)
|
67
28
|
return result.id
|
68
29
|
|
69
30
|
|
70
31
|
@app.task(bind=True, name="osism.tasks.openstack.network_get")
|
71
32
|
def network_get(self, network_name):
|
33
|
+
conn = utils.get_openstack_connection()
|
72
34
|
result = conn.network.find_network(network_name)
|
73
35
|
return result.id
|
74
36
|
|
75
37
|
|
76
38
|
@app.task(bind=True, name="osism.tasks.openstack.baremetal_node_show")
|
77
39
|
def baremetal_node_show(self, node_id_or_name):
|
40
|
+
conn = utils.get_openstack_connection()
|
78
41
|
result = conn.baremetal.find_node(node_id_or_name)
|
79
42
|
return result
|
80
43
|
|
81
44
|
|
82
45
|
@app.task(bind=True, name="osism.tasks.openstack.baremetal_node_list")
|
83
46
|
def baremetal_node_list(self):
|
47
|
+
conn = utils.get_openstack_connection()
|
84
48
|
nodes = conn.baremetal.nodes()
|
85
49
|
result = []
|
86
50
|
|
@@ -115,7 +79,7 @@ def baremetal_introspection_status(self, node_id_or_name):
|
|
115
79
|
|
116
80
|
@app.task(bind=True, name="osism.tasks.openstack.baremetal_get_network_interface_name")
|
117
81
|
def baremetal_get_network_interface_name(self, node_name, mac_address):
|
118
|
-
|
82
|
+
conn = utils.get_openstack_connection()
|
119
83
|
|
120
84
|
introspection = conn.baremetal_introspection.get_introspection(node_name)
|
121
85
|
|
@@ -137,18 +101,18 @@ def baremetal_get_network_interface_name(self, node_name, mac_address):
|
|
137
101
|
|
138
102
|
@app.task(bind=True, name="osism.tasks.openstack.baremetal_set_node_provision_state")
|
139
103
|
def baremetal_set_node_provision_state(self, node, state):
|
140
|
-
|
104
|
+
conn = utils.get_openstack_connection()
|
141
105
|
conn.baremetal.set_node_provision_state(node, state)
|
142
106
|
|
143
107
|
|
144
108
|
@app.task(bind=True, name="osism.tasks.openstack.baremetal_create_allocations")
|
145
109
|
def baremetal_create_allocations(self, nodes):
|
146
|
-
|
110
|
+
conn = utils.get_openstack_connection()
|
147
111
|
|
148
112
|
for node in nodes:
|
149
113
|
try:
|
150
114
|
allocation_a = conn.baremetal.get_allocation(allocation=node)
|
151
|
-
except
|
115
|
+
except ResourceNotFound:
|
152
116
|
allocation_a = None
|
153
117
|
|
154
118
|
if not allocation_a:
|
@@ -167,7 +131,7 @@ def baremetal_create_allocations(self, nodes):
|
|
167
131
|
|
168
132
|
@app.task(bind=True, name="osism.tasks.openstack.baremetal_create_nodes")
|
169
133
|
def baremetal_create_nodes(self, nodes, ironic_parameters):
|
170
|
-
|
134
|
+
conn = utils.get_openstack_connection()
|
171
135
|
|
172
136
|
for node in nodes:
|
173
137
|
# TODO: Filter on mgmt_only
|
@@ -221,10 +185,10 @@ def baremetal_create_nodes(self, nodes, ironic_parameters):
|
|
221
185
|
}
|
222
186
|
device_a.save()
|
223
187
|
|
224
|
-
except
|
188
|
+
except ResourceFailure:
|
225
189
|
# TODO: Do something useful here
|
226
190
|
pass
|
227
|
-
except
|
191
|
+
except ConflictException:
|
228
192
|
# The node already exists and has a wronge state in the Netbox
|
229
193
|
device_a = utils.nb.dcim.devices.get(name=node)
|
230
194
|
device_a.custom_fields = {
|
@@ -237,7 +201,7 @@ def baremetal_create_nodes(self, nodes, ironic_parameters):
|
|
237
201
|
def baremetal_check_allocations(self):
|
238
202
|
lock = Redlock(
|
239
203
|
key="lock_osism_tasks_openstack_baremetal_check_allocations",
|
240
|
-
masters={redis},
|
204
|
+
masters={utils.redis},
|
241
205
|
auto_release_time=60,
|
242
206
|
)
|
243
207
|
|
@@ -250,6 +214,8 @@ def baremetal_check_allocations(self):
|
|
250
214
|
|
251
215
|
@app.task(bind=True, name="osism.tasks.openstack.baremetal_create_internal_flavor")
|
252
216
|
def baremetal_create_internal_flavor(self, node):
|
217
|
+
conn = utils.get_openstack_connection()
|
218
|
+
|
253
219
|
flavor_a = conn.compute.create_flavor(
|
254
220
|
name=f"osism-{node}", ram=1, vcpus=1, disk=1, is_public=False
|
255
221
|
)
|
@@ -265,5 +231,69 @@ def baremetal_create_internal_flavor(self, node):
|
|
265
231
|
|
266
232
|
@app.task(bind=True, name="osism.tasks.openstack.baremetal_delete_internal_flavor")
|
267
233
|
def baremetal_delete_internal_flavor(self, node):
|
234
|
+
conn = utils.get_openstack_connection()
|
235
|
+
|
268
236
|
flavor = conn.compute.get_flavor(f"osism-{node}")
|
269
237
|
conn.compute.delete_flavor(flavor)
|
238
|
+
|
239
|
+
|
240
|
+
@app.task(bind=True, name="osism.tasks.openstack.image_manager")
|
241
|
+
def image_manager(
|
242
|
+
self, *arguments, configs=None, publish=True, locking=False, auto_release_time=3600
|
243
|
+
):
|
244
|
+
command = "/usr/local/bin/openstack-image-manager"
|
245
|
+
if configs:
|
246
|
+
with tempfile.TemporaryDirectory() as temp_dir:
|
247
|
+
for config in configs:
|
248
|
+
with tempfile.NamedTemporaryFile(
|
249
|
+
mode="w+", suffix=".yml", dir=temp_dir, delete=False
|
250
|
+
) as temp_file:
|
251
|
+
temp_file.write(config)
|
252
|
+
|
253
|
+
sanitized_args = [
|
254
|
+
arg for arg in arguments if not arg.startswith("--images=")
|
255
|
+
]
|
256
|
+
|
257
|
+
try:
|
258
|
+
images_index = sanitized_args.index("--images")
|
259
|
+
sanitized_args.pop(images_index)
|
260
|
+
sanitized_args.pop(images_index)
|
261
|
+
except ValueError:
|
262
|
+
pass
|
263
|
+
sanitized_args.extend(["--images", temp_dir])
|
264
|
+
rc = run_command(
|
265
|
+
self.request.id,
|
266
|
+
command,
|
267
|
+
{},
|
268
|
+
*sanitized_args,
|
269
|
+
publish=publish,
|
270
|
+
locking=locking,
|
271
|
+
auto_release_time=auto_release_time,
|
272
|
+
)
|
273
|
+
return rc
|
274
|
+
else:
|
275
|
+
return run_command(
|
276
|
+
self.request.id,
|
277
|
+
command,
|
278
|
+
{},
|
279
|
+
*arguments,
|
280
|
+
publish=publish,
|
281
|
+
locking=locking,
|
282
|
+
auto_release_time=auto_release_time,
|
283
|
+
)
|
284
|
+
|
285
|
+
|
286
|
+
@app.task(bind=True, name="osism.tasks.openstack.flavor_manager")
|
287
|
+
def flavor_manager(
|
288
|
+
self, *arguments, publish=True, locking=False, auto_release_time=3600
|
289
|
+
):
|
290
|
+
command = "/usr/local/bin/openstack-flavor-manager"
|
291
|
+
return run_command(
|
292
|
+
self.request.id,
|
293
|
+
command,
|
294
|
+
{},
|
295
|
+
*arguments,
|
296
|
+
publish=publish,
|
297
|
+
locking=locking,
|
298
|
+
auto_release_time=auto_release_time,
|
299
|
+
)
|
osism/tasks/reconciler.py
CHANGED
@@ -1,50 +1,17 @@
|
|
1
1
|
# SPDX-License-Identifier: Apache-2.0
|
2
2
|
|
3
|
-
import functools
|
4
3
|
import io
|
5
4
|
import subprocess
|
6
|
-
from threading import RLock
|
7
5
|
|
8
6
|
from celery import Celery
|
9
|
-
from celery.signals import worker_process_init
|
10
|
-
import kombu.utils
|
11
7
|
from loguru import logger
|
12
8
|
from pottery import Redlock
|
13
|
-
from
|
14
|
-
|
15
|
-
from osism import settings
|
9
|
+
from osism import settings, utils
|
16
10
|
from osism.tasks import Config
|
17
11
|
|
18
|
-
|
19
|
-
# https://github.com/celery/kombu/issues/1804
|
20
|
-
if not getattr(kombu.utils.cached_property, "lock", None):
|
21
|
-
setattr(
|
22
|
-
kombu.utils.cached_property,
|
23
|
-
"lock",
|
24
|
-
functools.cached_property(lambda _: RLock()),
|
25
|
-
)
|
26
|
-
# Must call __set_name__ here since this cached property is not defined in the context of a class
|
27
|
-
# Refer to https://docs.python.org/3/reference/datamodel.html#object.__set_name__
|
28
|
-
kombu.utils.cached_property.lock.__set_name__(kombu.utils.cached_property, "lock")
|
29
|
-
|
30
12
|
app = Celery("reconciler")
|
31
13
|
app.config_from_object(Config)
|
32
14
|
|
33
|
-
redis = None
|
34
|
-
|
35
|
-
|
36
|
-
@worker_process_init.connect
|
37
|
-
def celery_init_worker(**kwargs):
|
38
|
-
global redis
|
39
|
-
|
40
|
-
redis = Redis(
|
41
|
-
host=settings.REDIS_HOST,
|
42
|
-
port=settings.REDIS_PORT,
|
43
|
-
db=settings.REDIS_DB,
|
44
|
-
socket_keepalive=True,
|
45
|
-
)
|
46
|
-
redis.ping()
|
47
|
-
|
48
15
|
|
49
16
|
@app.on_after_configure.connect
|
50
17
|
def setup_periodic_tasks(sender, **kwargs):
|
@@ -56,7 +23,9 @@ def setup_periodic_tasks(sender, **kwargs):
|
|
56
23
|
@app.task(bind=True, name="osism.tasks.reconciler.run")
|
57
24
|
def run(self, publish=True):
|
58
25
|
lock = Redlock(
|
59
|
-
key="lock_osism_tasks_reconciler_run",
|
26
|
+
key="lock_osism_tasks_reconciler_run",
|
27
|
+
masters={utils.redis},
|
28
|
+
auto_release_time=60,
|
60
29
|
)
|
61
30
|
|
62
31
|
if lock.acquire(timeout=20):
|
@@ -67,13 +36,13 @@ def run(self, publish=True):
|
|
67
36
|
|
68
37
|
for line in io.TextIOWrapper(p.stdout, encoding="utf-8"):
|
69
38
|
if publish:
|
70
|
-
redis.xadd(self.request.id, {"type": "stdout", "content": line})
|
39
|
+
utils.redis.xadd(self.request.id, {"type": "stdout", "content": line})
|
71
40
|
|
72
41
|
rc = p.wait(timeout=60)
|
73
42
|
|
74
43
|
if publish:
|
75
|
-
redis.xadd(self.request.id, {"type": "rc", "content": rc})
|
76
|
-
redis.xadd(self.request.id, {"type": "action", "content": "quit"})
|
44
|
+
utils.redis.xadd(self.request.id, {"type": "rc", "content": rc})
|
45
|
+
utils.redis.xadd(self.request.id, {"type": "action", "content": "quit"})
|
77
46
|
|
78
47
|
lock.release()
|
79
48
|
|
@@ -82,7 +51,7 @@ def run(self, publish=True):
|
|
82
51
|
def run_on_change(self):
|
83
52
|
lock = Redlock(
|
84
53
|
key="lock_osism_tasks_reconciler_run_on_change",
|
85
|
-
masters={redis},
|
54
|
+
masters={utils.redis},
|
86
55
|
auto_release_time=60,
|
87
56
|
)
|
88
57
|
|
@@ -100,7 +69,7 @@ def run_on_change(self):
|
|
100
69
|
def sync_inventory_with_netbox(self):
|
101
70
|
lock = Redlock(
|
102
71
|
key="lock_osism_tasks_reconciler_sync_inventory_with_netbox",
|
103
|
-
masters={redis},
|
72
|
+
masters={utils.redis},
|
104
73
|
auto_release_time=60,
|
105
74
|
)
|
106
75
|
|
@@ -114,13 +83,13 @@ def sync_inventory_with_netbox(self):
|
|
114
83
|
|
115
84
|
for line in io.TextIOWrapper(p.stdout, encoding="utf-8"):
|
116
85
|
# NOTE: use task_id or request_id in future
|
117
|
-
redis.publish(
|
86
|
+
utils.redis.publish(
|
118
87
|
"netbox-sync-inventory-with-netbox", {"type": "stdout", "content": line}
|
119
88
|
)
|
120
89
|
|
121
90
|
lock.release()
|
122
91
|
|
123
92
|
# NOTE: use task_id or request_id in future
|
124
|
-
redis.publish(
|
93
|
+
utils.redis.publish(
|
125
94
|
"netbox-sync-inventory-with-netbox", {"type": "action", "content": "quit"}
|
126
95
|
)
|
osism/utils/__init__.py
CHANGED
@@ -1,5 +1,7 @@
|
|
1
1
|
# SPDX-License-Identifier: Apache-2.0
|
2
2
|
|
3
|
+
import keystoneauth1
|
4
|
+
import openstack
|
3
5
|
import pynetbox
|
4
6
|
from redis import Redis
|
5
7
|
import urllib3
|
@@ -29,6 +31,15 @@ else:
|
|
29
31
|
nb = None
|
30
32
|
|
31
33
|
|
34
|
+
def get_openstack_connection():
|
35
|
+
try:
|
36
|
+
conn = openstack.connect()
|
37
|
+
except keystoneauth1.exceptions.auth_plugins.MissingRequiredOptions:
|
38
|
+
pass
|
39
|
+
|
40
|
+
return conn
|
41
|
+
|
42
|
+
|
32
43
|
# https://stackoverflow.com/questions/2361426/get-the-first-item-from-an-iterable-that-matches-a-condition
|
33
44
|
def first(iterable, condition=lambda x: True):
|
34
45
|
"""
|
@@ -1,6 +1,6 @@
|
|
1
|
-
Metadata-Version: 2.
|
1
|
+
Metadata-Version: 2.4
|
2
2
|
Name: osism
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.20250331.0
|
4
4
|
Summary: OSISM manager interface
|
5
5
|
Home-page: https://github.com/osism/python-osism
|
6
6
|
Author: OSISM GmbH
|
@@ -29,28 +29,28 @@ Requires-Dist: PyYAML==6.0.2
|
|
29
29
|
Requires-Dist: ara==1.7.2
|
30
30
|
Requires-Dist: celery[redis]==5.4.0
|
31
31
|
Requires-Dist: cliff==4.9.1
|
32
|
-
Requires-Dist: deepdiff==8.
|
32
|
+
Requires-Dist: deepdiff==8.4.2
|
33
33
|
Requires-Dist: docker==7.1.0
|
34
34
|
Requires-Dist: dtrack-auditor==1.5.0
|
35
|
-
Requires-Dist: fastapi==0.115.
|
35
|
+
Requires-Dist: fastapi==0.115.12
|
36
36
|
Requires-Dist: flower==2.0.1
|
37
37
|
Requires-Dist: hiredis==3.1.0
|
38
38
|
Requires-Dist: jc==1.25.4
|
39
39
|
Requires-Dist: keystoneauth1==5.10.0
|
40
|
-
Requires-Dist: kombu==5.5.
|
40
|
+
Requires-Dist: kombu==5.5.2
|
41
41
|
Requires-Dist: kubernetes==32.0.1
|
42
42
|
Requires-Dist: loguru==0.7.3
|
43
43
|
Requires-Dist: netmiko==4.5.0
|
44
44
|
Requires-Dist: nornir-ansible==2023.12.28
|
45
45
|
Requires-Dist: nornir==3.5.0
|
46
46
|
Requires-Dist: openstacksdk==4.4.0
|
47
|
-
Requires-Dist: pottery==3.0.
|
47
|
+
Requires-Dist: pottery==3.0.1
|
48
48
|
Requires-Dist: prompt-toolkit==3.0.50
|
49
49
|
Requires-Dist: pydantic==1.10.21
|
50
50
|
Requires-Dist: pynetbox==7.4.1
|
51
|
-
Requires-Dist: pytest-testinfra==10.
|
51
|
+
Requires-Dist: pytest-testinfra==10.2.2
|
52
52
|
Requires-Dist: python-dateutil==2.9.0.post0
|
53
|
-
Requires-Dist: setuptools==
|
53
|
+
Requires-Dist: setuptools==78.1.0
|
54
54
|
Requires-Dist: sqlmodel==0.0.24
|
55
55
|
Requires-Dist: sushy==5.5.0
|
56
56
|
Requires-Dist: tabulate==0.9.0
|
@@ -58,8 +58,8 @@ Requires-Dist: transitions==0.9.2
|
|
58
58
|
Requires-Dist: uvicorn[standard]==0.34.0
|
59
59
|
Requires-Dist: watchdog==6.0.0
|
60
60
|
Provides-Extra: ansible
|
61
|
-
Requires-Dist: ansible-runner==2.4.
|
62
|
-
Requires-Dist: ansible-core==2.18.
|
61
|
+
Requires-Dist: ansible-runner==2.4.1; extra == "ansible"
|
62
|
+
Requires-Dist: ansible-core==2.18.4; extra == "ansible"
|
63
63
|
Provides-Extra: openstack-image-manager
|
64
64
|
Requires-Dist: openstack-image-manager==0.20250314.0; extra == "openstack-image-manager"
|
65
65
|
Dynamic: author
|
@@ -67,6 +67,7 @@ Dynamic: author-email
|
|
67
67
|
Dynamic: classifier
|
68
68
|
Dynamic: description
|
69
69
|
Dynamic: home-page
|
70
|
+
Dynamic: license-file
|
70
71
|
Dynamic: requires-dist
|
71
72
|
Dynamic: requires-python
|
72
73
|
Dynamic: summary
|
@@ -1,6 +1,6 @@
|
|
1
1
|
osism/__init__.py,sha256=1UiNTBus0V0f2AbZQzAtVtu6zkfCCrw0OTq--NwFAqY,341
|
2
2
|
osism/__main__.py,sha256=ILe4gu61xEISiBsxanqTQIdSkV-YhpZXTRlguCYyssk,141
|
3
|
-
osism/api.py,sha256=
|
3
|
+
osism/api.py,sha256=Lvkdd92tvv9RtoMs9RtvqsN3DiSKPdSll24J3wRzbBY,4793
|
4
4
|
osism/main.py,sha256=Dt2-9sLXcS-Ny4DAz7hrha-KRc7zd7BFUTRdfs_X8z4,893
|
5
5
|
osism/settings.py,sha256=m__DltxKQo5D-vDKKwY8RNBVs5bverYdJmtyVyln_6o,1049
|
6
6
|
osism/actions/__init__.py,sha256=bG7Ffen4LvQtgnYPFEpFccsWs81t4zqqeqn9ZeirH6E,38
|
@@ -15,8 +15,8 @@ osism/commands/console.py,sha256=8BPz1hio5Wi6kONVAWFuSqkDRrMcLEYeFIY8dbtN6e4,321
|
|
15
15
|
osism/commands/container.py,sha256=Fku2GaCM3Idq_FxExUtNqjrEM0XYjpVvXmueSVO8S_c,1601
|
16
16
|
osism/commands/get.py,sha256=ryytjtXWmlMV0NucP5tGkMZu0nIlC4xVtjRk4iMZ06c,8967
|
17
17
|
osism/commands/log.py,sha256=2IpYuosC7FZwwLvM8HmKSU1NRNIelVVYzqjjVMCrOJk,4072
|
18
|
-
osism/commands/manage.py,sha256=
|
19
|
-
osism/commands/netbox.py,sha256=
|
18
|
+
osism/commands/manage.py,sha256=SDJyH3zwdaOjVWURIIjm8WMo6zSor1Y_TiTYgeMt4pI,11932
|
19
|
+
osism/commands/netbox.py,sha256=FYBHcOR_cO-n7rcf4V_-DbwUCgMLFmrrPKCjd0zQOp4,4548
|
20
20
|
osism/commands/noset.py,sha256=7zDFuFMyNpo7DUOKcNiYV8nodtdMOYFp5LDPcuJhlZ8,1481
|
21
21
|
osism/commands/reconciler.py,sha256=Ja_b86gX6-_Pr3DmrUUvskmEnnJpHQ-XJNQLycMJeyc,2818
|
22
22
|
osism/commands/server.py,sha256=zFXRdYoj4ZNDJNPSaGddMPEWxt8G2GyMomPOcCOaN3c,4137
|
@@ -28,30 +28,30 @@ osism/commands/task.py,sha256=mwJJ7a71Lw3o_FX7j3rR0-NbPdPwMDOjbOAiiXE4uGc,543
|
|
28
28
|
osism/commands/validate.py,sha256=hIQB0zk4xIBZJORtBp_tWrXTRKKhB2qi6j-mznDxKR4,4191
|
29
29
|
osism/commands/vault.py,sha256=Ip0IMR7zaBkPbLJenXr4ZwxM6FnozZ9wn9rwHmFHo8s,1818
|
30
30
|
osism/commands/volume.py,sha256=SqD9pYgtcYnMu6sB2pG8lfrLHRq6GzOb_-RkWOOVZPo,3156
|
31
|
-
osism/commands/wait.py,sha256=
|
31
|
+
osism/commands/wait.py,sha256=mKFDqEXcaLlKw1T3MuBEZpNh7CeL3lpUXgubD2_f8es,6580
|
32
32
|
osism/commands/worker.py,sha256=iraCOEhCp7WgfjfZ0-12XQYQPUjpi9rSJK5Z9JfNJk4,1651
|
33
33
|
osism/core/__init__.py,sha256=bG7Ffen4LvQtgnYPFEpFccsWs81t4zqqeqn9ZeirH6E,38
|
34
|
-
osism/core/enums.py,sha256=
|
34
|
+
osism/core/enums.py,sha256=UDV3WoOp9kfGTPCQ94tr-2v6c07pNP2kYrxxv6pwxDI,9638
|
35
35
|
osism/core/playbooks.py,sha256=M3T3ajV-8Lt-orsRO3jAoukhaoYFr4EZ2dzYXQjt1kg,728
|
36
36
|
osism/data/__init__.py,sha256=izXdh0J3vPLQI7kBhJI7ibJQzPqU_nlONP0L4Cf_k6A,1504
|
37
37
|
osism/plugins/__init__.py,sha256=bG7Ffen4LvQtgnYPFEpFccsWs81t4zqqeqn9ZeirH6E,38
|
38
38
|
osism/services/__init__.py,sha256=bG7Ffen4LvQtgnYPFEpFccsWs81t4zqqeqn9ZeirH6E,38
|
39
39
|
osism/services/listener.py,sha256=JjCdwPG5U9b_xYDpGFQeiLPP4y00GM3Me6NW1tt6Jws,11275
|
40
|
-
osism/tasks/__init__.py,sha256=
|
41
|
-
osism/tasks/ansible.py,sha256=
|
40
|
+
osism/tasks/__init__.py,sha256=lrSkcZtbzhWsLS4hWadKfpP_tCd1pX1IhvrBU3EhKmM,8605
|
41
|
+
osism/tasks/ansible.py,sha256=RcLxLrjzL5_X6OjNHm3H0lZlmKKlYKIANB0M4_d4chE,1109
|
42
42
|
osism/tasks/ceph.py,sha256=eIQkah3Kj4INtOkF9kTjHbXJ3_J2lg48EWJKfHc-UYw,615
|
43
|
-
osism/tasks/conductor.py,sha256=
|
43
|
+
osism/tasks/conductor.py,sha256=P52Avy8OgNQ4koZp3QZLXJiN9uIiBcqrmDpc3UXsPzs,3639
|
44
44
|
osism/tasks/kolla.py,sha256=wJQpWn_01iWLkr7l7T7RNrQGfRgsgmYi4WQlTmNGvew,618
|
45
45
|
osism/tasks/kubernetes.py,sha256=VzXq_VrYU_CLm4cOruqnE3Kq2ydfO9glZ3p0bp3OYoc,625
|
46
|
-
osism/tasks/netbox.py,sha256=
|
47
|
-
osism/tasks/openstack.py,sha256=
|
48
|
-
osism/tasks/reconciler.py,sha256=
|
49
|
-
osism/utils/__init__.py,sha256=
|
50
|
-
osism-0.
|
51
|
-
osism-0.
|
52
|
-
osism-0.
|
53
|
-
osism-0.
|
54
|
-
osism-0.
|
55
|
-
osism-0.
|
56
|
-
osism-0.
|
57
|
-
osism-0.
|
46
|
+
osism/tasks/netbox.py,sha256=JTgMLp5WAGoupU5Os6xWnKHXACxfXVS33wM1rvbz6Y0,4432
|
47
|
+
osism/tasks/openstack.py,sha256=nhHiEcmI_AjM-oYnqjlJ0-c9qYZRQeruOTJsLbScxKI,10258
|
48
|
+
osism/tasks/reconciler.py,sha256=RGUcax2gDuyVLw1nGRQn5izXclnPBo9MRl0ndLDiiYQ,2707
|
49
|
+
osism/utils/__init__.py,sha256=DP2D7xyXnfWuH-c26elIwdwrMSY-oSkVsLFKsQfna9w,1477
|
50
|
+
osism-0.20250331.0.dist-info/licenses/AUTHORS,sha256=EKFIR9F27AvoEXp1cA6FkGbjEOFt4Rcbipr5RJc7jSs,64
|
51
|
+
osism-0.20250331.0.dist-info/licenses/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
|
52
|
+
osism-0.20250331.0.dist-info/METADATA,sha256=0iUarhckKfE774zkRlFp8Pn21wK2bW30nztf0udL2t8,2972
|
53
|
+
osism-0.20250331.0.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
|
54
|
+
osism-0.20250331.0.dist-info/entry_points.txt,sha256=DlfrvU14rI55WuTrwNRoce9FY3ric4HeZKZx_Z3NzCw,3015
|
55
|
+
osism-0.20250331.0.dist-info/pbr.json,sha256=dAx-E5EYMfbySf6Y9YoFJwWMs-TE1eIlvi2JwTL12Nw,47
|
56
|
+
osism-0.20250331.0.dist-info/top_level.txt,sha256=8L8dsI9hcaGHsdnR4k_LN9EM78EhwrXRFHyAryPXZtY,6
|
57
|
+
osism-0.20250331.0.dist-info/RECORD,,
|
@@ -40,7 +40,6 @@ manage netbox = osism.commands.netbox:Manage
|
|
40
40
|
manage server list = osism.commands.server:ServerList
|
41
41
|
manage server migrate = osism.commands.server:ServerMigrate
|
42
42
|
manage volume list = osism.commands.volume:VolumeList
|
43
|
-
netbox = osism.commands.netbox:Run
|
44
43
|
netbox ping = osism.commands.netbox:Ping
|
45
44
|
netbox sync = osism.commands.netbox:Sync
|
46
45
|
netbox sync ironic = osism.commands.netbox:Ironic
|
@@ -0,0 +1 @@
|
|
1
|
+
renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
|
@@ -0,0 +1 @@
|
|
1
|
+
{"git_version": "8324e98", "is_release": false}
|
@@ -1 +0,0 @@
|
|
1
|
-
janhorstmann <horstmann@osism.tech>
|
@@ -1 +0,0 @@
|
|
1
|
-
{"git_version": "73a2ae0", "is_release": false}
|
File without changes
|
File without changes
|