osism 0.20250505.0__py3-none-any.whl → 0.20250525.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- osism/api.py +6 -6
- osism/commands/apply.py +1 -1
- osism/commands/compute.py +246 -13
- osism/commands/configuration.py +4 -2
- osism/commands/manage.py +1 -1
- osism/commands/netbox.py +77 -9
- osism/commands/reconciler.py +4 -2
- osism/commands/validate.py +3 -3
- osism/settings.py +3 -3
- osism/tasks/ansible.py +9 -3
- osism/tasks/conductor.py +144 -100
- osism/tasks/reconciler.py +8 -32
- osism/utils/__init__.py +17 -0
- {osism-0.20250505.0.dist-info → osism-0.20250525.0.dist-info}/METADATA +12 -14
- {osism-0.20250505.0.dist-info → osism-0.20250525.0.dist-info}/RECORD +21 -21
- {osism-0.20250505.0.dist-info → osism-0.20250525.0.dist-info}/WHEEL +1 -1
- {osism-0.20250505.0.dist-info → osism-0.20250525.0.dist-info}/entry_points.txt +3 -3
- osism-0.20250525.0.dist-info/licenses/AUTHORS +1 -0
- osism-0.20250525.0.dist-info/pbr.json +1 -0
- osism-0.20250505.0.dist-info/licenses/AUTHORS +0 -1
- osism-0.20250505.0.dist-info/pbr.json +0 -1
- {osism-0.20250505.0.dist-info → osism-0.20250525.0.dist-info}/licenses/LICENSE +0 -0
- {osism-0.20250505.0.dist-info → osism-0.20250525.0.dist-info}/top_level.txt +0 -0
osism/api.py
CHANGED
@@ -41,28 +41,28 @@ class WebhookNetboxData(BaseModel):
|
|
41
41
|
class LogConfig(BaseModel):
|
42
42
|
"""Logging configuration to be set for the server"""
|
43
43
|
|
44
|
-
LOGGER_NAME: str = "
|
44
|
+
LOGGER_NAME: str = "osism"
|
45
45
|
LOG_FORMAT: str = "%(levelprefix)s | %(asctime)s | %(message)s"
|
46
46
|
LOG_LEVEL: str = "DEBUG"
|
47
47
|
|
48
48
|
# Logging config
|
49
|
-
version = 1
|
50
|
-
disable_existing_loggers = False
|
51
|
-
formatters = {
|
49
|
+
version: int = 1
|
50
|
+
disable_existing_loggers: bool = False
|
51
|
+
formatters: dict = {
|
52
52
|
"default": {
|
53
53
|
"()": "uvicorn.logging.DefaultFormatter",
|
54
54
|
"fmt": LOG_FORMAT,
|
55
55
|
"datefmt": "%Y-%m-%d %H:%M:%S",
|
56
56
|
},
|
57
57
|
}
|
58
|
-
handlers = {
|
58
|
+
handlers: dict = {
|
59
59
|
"default": {
|
60
60
|
"formatter": "default",
|
61
61
|
"class": "logging.StreamHandler",
|
62
62
|
"stream": "ext://sys.stderr",
|
63
63
|
},
|
64
64
|
}
|
65
|
-
loggers = {
|
65
|
+
loggers: dict = {
|
66
66
|
"api": {"handlers": ["default"], "level": LOG_LEVEL},
|
67
67
|
}
|
68
68
|
|
osism/commands/apply.py
CHANGED
@@ -116,7 +116,7 @@ class Run(Command):
|
|
116
116
|
if format == "log":
|
117
117
|
for c in t.children:
|
118
118
|
logger.info(
|
119
|
-
f"Task {c.task_id} is running in background. No more output. Check ARA for logs."
|
119
|
+
f"Task {c.task_id} (loadbalancer) is running in background. No more output. Check ARA for logs."
|
120
120
|
)
|
121
121
|
|
122
122
|
# As explained above, it is neceesary to wait for all tasks.
|
osism/commands/compute.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1
1
|
# SPDX-License-Identifier: Apache-2.0
|
2
2
|
|
3
3
|
import time
|
4
|
+
import datetime
|
4
5
|
|
5
6
|
from cliff.command import Command
|
6
7
|
from jc import parse
|
@@ -322,7 +323,13 @@ class ComputeMigrate(Command):
|
|
322
323
|
parser.add_argument(
|
323
324
|
"--no-wait",
|
324
325
|
default=False,
|
325
|
-
help="Do not wait for completion of migration",
|
326
|
+
help="Do not wait for completion of migration (Resize of cold migrated instances will not be confirmed!)",
|
327
|
+
action="store_true",
|
328
|
+
)
|
329
|
+
parser.add_argument(
|
330
|
+
"--no-cold-migration",
|
331
|
+
default=False,
|
332
|
+
help="Do not cold migrate instances",
|
326
333
|
action="store_true",
|
327
334
|
)
|
328
335
|
parser.add_argument(
|
@@ -368,6 +375,7 @@ class ComputeMigrate(Command):
|
|
368
375
|
target = parsed_args.target
|
369
376
|
force = parsed_args.force
|
370
377
|
no_wait = parsed_args.no_wait
|
378
|
+
no_cold_migration = parsed_args.no_cold_migration
|
371
379
|
yes = parsed_args.yes
|
372
380
|
domain = parsed_args.domain
|
373
381
|
project = parsed_args.project
|
@@ -393,9 +401,13 @@ class ComputeMigrate(Command):
|
|
393
401
|
logger.info(f"No migratable instances found on node {host}")
|
394
402
|
|
395
403
|
for server in result:
|
396
|
-
if server[2]
|
404
|
+
if server[2] in ["ACTIVE", "PAUSED"]:
|
405
|
+
migration_type = "live"
|
406
|
+
elif server[2] in ["SHUTOFF"] and not no_cold_migration:
|
407
|
+
migration_type = "cold"
|
408
|
+
else:
|
397
409
|
logger.info(
|
398
|
-
f"{server[0]} ({server[1]}) in status {server[2]} cannot be
|
410
|
+
f"{server[0]} ({server[1]}) in status {server[2]} cannot be migrated"
|
399
411
|
)
|
400
412
|
continue
|
401
413
|
|
@@ -403,27 +415,248 @@ class ComputeMigrate(Command):
|
|
403
415
|
answer = "yes"
|
404
416
|
else:
|
405
417
|
answer = prompt(
|
406
|
-
f"
|
418
|
+
f"{migration_type.capitalize()} migrate server {server[0]} ({server[1]}) [yes/no]: "
|
407
419
|
)
|
408
420
|
|
409
421
|
if answer in ["yes", "y"]:
|
410
|
-
logger.info(
|
411
|
-
|
412
|
-
server[0], host=target, block_migration="auto", force=force
|
422
|
+
logger.info(
|
423
|
+
f"{migration_type.capitalize()} migrating server {server[0]}"
|
413
424
|
)
|
425
|
+
if migration_type == "live":
|
426
|
+
conn.compute.live_migrate_server(
|
427
|
+
server[0], host=target, block_migration="auto", force=force
|
428
|
+
)
|
429
|
+
elif migration_type == "cold":
|
430
|
+
conn.compute.migrate_server(server[0], host=target)
|
414
431
|
|
415
432
|
if not no_wait:
|
416
|
-
|
417
|
-
while inner_wait:
|
433
|
+
while True:
|
418
434
|
time.sleep(2)
|
419
435
|
s = conn.compute.get_server(server[0])
|
420
|
-
if
|
436
|
+
if (
|
437
|
+
migration_type == "live"
|
438
|
+
and s.status in ["MIGRATING"]
|
439
|
+
or migration_type == "cold"
|
440
|
+
and s.status in ["RESIZE"]
|
441
|
+
):
|
421
442
|
logger.info(
|
422
|
-
f"
|
443
|
+
f"{migration_type.capitalize()} migration of {server[0]} ({server[1]}) is still in progress"
|
423
444
|
)
|
424
|
-
|
445
|
+
elif migration_type == "cold" and s.status in ["VERIFY_RESIZE"]:
|
446
|
+
try:
|
447
|
+
conn.compute.confirm_server_resize(s)
|
448
|
+
logger.info(
|
449
|
+
f"{migration_type.capitalize()} migration of {server[0]} ({server[1]}) confirmed"
|
450
|
+
)
|
451
|
+
except Exception as exc:
|
452
|
+
logger.error(
|
453
|
+
f"{migration_type.capitalize()} migration of {server[0]} ({server[1]}) could not be confirmed"
|
454
|
+
)
|
455
|
+
raise exc
|
456
|
+
# NOTE: There seems to be no simple way to check whether the resize
|
457
|
+
# has been confirmed. The state is still "VERIFY_RESIZE" afterwards.
|
458
|
+
# Therefore we drop out without waiting for the "SHUTOFF" state
|
459
|
+
break
|
425
460
|
else:
|
426
|
-
|
461
|
+
logger.info(
|
462
|
+
f"{migration_type.capitalize()} migration of {server[0]} ({server[1]}) completed with status {s.status}"
|
463
|
+
)
|
464
|
+
break
|
465
|
+
|
466
|
+
|
467
|
+
class ComputeMigrationList(Command):
|
468
|
+
def get_parser(self, prog_name):
|
469
|
+
parser = super(ComputeMigrationList, self).get_parser(prog_name)
|
470
|
+
parser.add_argument(
|
471
|
+
"--host",
|
472
|
+
default=None,
|
473
|
+
type=str,
|
474
|
+
help="Only list migrations with the given host as source or destination",
|
475
|
+
)
|
476
|
+
parser.add_argument(
|
477
|
+
"--server",
|
478
|
+
default=None,
|
479
|
+
type=str,
|
480
|
+
help="Only list migrations for the given instance (name or ID)",
|
481
|
+
)
|
482
|
+
parser.add_argument(
|
483
|
+
"--user",
|
484
|
+
default=None,
|
485
|
+
type=str,
|
486
|
+
help="Only list migrations for the given user (name or ID)",
|
487
|
+
)
|
488
|
+
parser.add_argument(
|
489
|
+
"--user-domain",
|
490
|
+
default=None,
|
491
|
+
type=str,
|
492
|
+
help="Domain the user belongs to (name or ID)",
|
493
|
+
)
|
494
|
+
parser.add_argument(
|
495
|
+
"--project",
|
496
|
+
default=None,
|
497
|
+
type=str,
|
498
|
+
help="Only list migrations for the given project (name or ID)",
|
499
|
+
)
|
500
|
+
parser.add_argument(
|
501
|
+
"--project-domain",
|
502
|
+
default=None,
|
503
|
+
type=str,
|
504
|
+
help="Domain the project belongs to (name or ID)",
|
505
|
+
)
|
506
|
+
parser.add_argument(
|
507
|
+
"--status",
|
508
|
+
default=None,
|
509
|
+
type=str,
|
510
|
+
help="Only list migrations with the given status",
|
511
|
+
)
|
512
|
+
parser.add_argument(
|
513
|
+
"--type",
|
514
|
+
default=None,
|
515
|
+
choices=["migration", "live-migration", "evacuation", "resize"],
|
516
|
+
type=str,
|
517
|
+
help="Only list migrations with the given type",
|
518
|
+
)
|
519
|
+
parser.add_argument(
|
520
|
+
"--changes-since",
|
521
|
+
default=None,
|
522
|
+
type=datetime.datetime.fromisoformat,
|
523
|
+
help="Only list migrations last chganged since the given date in ISO 8601 format (CCYY-MM-DDThh:mm:ss±hh:mm)",
|
524
|
+
)
|
525
|
+
parser.add_argument(
|
526
|
+
"--changes-before",
|
527
|
+
default=None,
|
528
|
+
type=datetime.datetime.fromisoformat,
|
529
|
+
help="Only list migrations last chganged before the given date in ISO 8601 format (CCYY-MM-DDThh:mm:ss±hh:mm)",
|
530
|
+
)
|
531
|
+
return parser
|
532
|
+
|
533
|
+
def take_action(self, parsed_args):
|
534
|
+
host = parsed_args.host
|
535
|
+
server = parsed_args.server
|
536
|
+
user = parsed_args.user
|
537
|
+
user_domain = parsed_args.user_domain
|
538
|
+
project = parsed_args.project
|
539
|
+
project_domain = parsed_args.project_domain
|
540
|
+
status = parsed_args.status
|
541
|
+
migration_type = parsed_args.type
|
542
|
+
changes_since = parsed_args.changes_since
|
543
|
+
changes_before = parsed_args.changes_before
|
544
|
+
|
545
|
+
if changes_before and changes_since:
|
546
|
+
if not changes_since <= changes_before:
|
547
|
+
logger.error(
|
548
|
+
"changes-since needs to be less or equal to changes-before"
|
549
|
+
)
|
550
|
+
return
|
551
|
+
|
552
|
+
conn = get_cloud_connection()
|
553
|
+
|
554
|
+
user_id = None
|
555
|
+
if user:
|
556
|
+
user_query = {}
|
557
|
+
|
558
|
+
if user_domain:
|
559
|
+
u_d = conn.identity.find_domain(user_domain, ignore_missing=True)
|
560
|
+
if u_d and "id" in u_d:
|
561
|
+
user_query = dict(domain_id=u_d.id)
|
562
|
+
else:
|
563
|
+
logger.error(f"No domain found for {user_domain}")
|
564
|
+
return
|
565
|
+
|
566
|
+
u = conn.identity.find_user(user, ignore_missing=True, **user_query)
|
567
|
+
if u and "id" in u:
|
568
|
+
user_id = u.id
|
569
|
+
else:
|
570
|
+
logger.error(f"No user found for {user}")
|
571
|
+
return
|
572
|
+
|
573
|
+
project_id = None
|
574
|
+
if project:
|
575
|
+
project_query = {}
|
576
|
+
|
577
|
+
if project_domain:
|
578
|
+
p_d = conn.identity.find_domain(project_domain, ignore_missing=True)
|
579
|
+
if p_d and "id" in p_d:
|
580
|
+
project_query = dict(domain_id=p_d.id)
|
581
|
+
else:
|
582
|
+
logger.error(f"No domain found for {project_domain}")
|
583
|
+
return
|
584
|
+
|
585
|
+
p = conn.identity.find_project(
|
586
|
+
project, ignore_missing=True, **project_query
|
587
|
+
)
|
588
|
+
if p and "id" in p:
|
589
|
+
project_id = p.id
|
590
|
+
else:
|
591
|
+
logger.error(f"No project found for {project}")
|
592
|
+
return
|
593
|
+
|
594
|
+
instance_uuid = None
|
595
|
+
if server:
|
596
|
+
try:
|
597
|
+
s = conn.compute.find_server(
|
598
|
+
server, details=False, ignore_missing=False, all_projects=True
|
599
|
+
)
|
600
|
+
if s and "id" in s:
|
601
|
+
instance_uuid = s.id
|
602
|
+
else:
|
603
|
+
raise openstack.exceptions.NotFoundException
|
604
|
+
except openstack.exceptions.DuplicateResource:
|
605
|
+
logger.error(f"Multiple servers where found for {server}")
|
606
|
+
return
|
607
|
+
except openstack.exceptions.NotFoundException:
|
608
|
+
logger.error(f"No server found for {server}")
|
609
|
+
return
|
610
|
+
|
611
|
+
query = {}
|
612
|
+
if host:
|
613
|
+
query.update(dict(host=host))
|
614
|
+
if instance_uuid:
|
615
|
+
query.update(dict(instance_uuid=instance_uuid))
|
616
|
+
if status:
|
617
|
+
query.update(dict(status=status))
|
618
|
+
if migration_type:
|
619
|
+
query.update(dict(migration_type=migration_type))
|
620
|
+
if user_id:
|
621
|
+
query.update(dict(user_id=user_id))
|
622
|
+
if project_id:
|
623
|
+
query.update(dict(project_id=project_id))
|
624
|
+
if changes_since:
|
625
|
+
query.update(dict(changes_since=changes_since))
|
626
|
+
if changes_before:
|
627
|
+
query.update(dict(changes_before=changes_before))
|
628
|
+
|
629
|
+
migrations = conn.compute.migrations(**query)
|
630
|
+
result = [
|
631
|
+
[
|
632
|
+
m.source_compute,
|
633
|
+
m.dest_compute,
|
634
|
+
m.status,
|
635
|
+
m.migration_type,
|
636
|
+
m["instance_uuid"],
|
637
|
+
m.user_id,
|
638
|
+
m.created_at,
|
639
|
+
m.updated_at,
|
640
|
+
]
|
641
|
+
for m in migrations
|
642
|
+
]
|
643
|
+
|
644
|
+
print(
|
645
|
+
tabulate(
|
646
|
+
result,
|
647
|
+
headers=[
|
648
|
+
"Source",
|
649
|
+
"Destintion",
|
650
|
+
"Status",
|
651
|
+
"Type",
|
652
|
+
"Server UUID",
|
653
|
+
"User",
|
654
|
+
"Created At",
|
655
|
+
"Updated At",
|
656
|
+
],
|
657
|
+
tablefmt="psql",
|
658
|
+
)
|
659
|
+
)
|
427
660
|
|
428
661
|
|
429
662
|
class ComputeStart(Command):
|
osism/commands/configuration.py
CHANGED
@@ -23,9 +23,11 @@ class Sync(Command):
|
|
23
23
|
"manager", "configuration", arguments, auto_release_time=60
|
24
24
|
)
|
25
25
|
|
26
|
-
logger.info(f"Task {t.task_id} was prepared for execution.")
|
27
26
|
logger.info(
|
28
|
-
f"
|
27
|
+
f"Task {t.task_id} (sync configuration) was prepared for execution."
|
28
|
+
)
|
29
|
+
logger.info(
|
30
|
+
f"It takes a moment until task {t.task_id} (sync configuration) has been started and output is visible here."
|
29
31
|
)
|
30
32
|
|
31
33
|
rc = handle_task(t, True, format, 60)
|
osism/commands/manage.py
CHANGED
@@ -12,7 +12,7 @@ import requests
|
|
12
12
|
from osism.data import TEMPLATE_IMAGE_CLUSTERAPI, TEMPLATE_IMAGE_OCTAVIA
|
13
13
|
from osism.tasks import openstack, handle_task
|
14
14
|
|
15
|
-
SUPPORTED_CLUSTERAPI_K8S_IMAGES = ["1.
|
15
|
+
SUPPORTED_CLUSTERAPI_K8S_IMAGES = ["1.31", "1.32", "1.33"]
|
16
16
|
|
17
17
|
|
18
18
|
class ImageClusterapi(Command):
|
osism/commands/netbox.py
CHANGED
@@ -1,9 +1,13 @@
|
|
1
1
|
# SPDX-License-Identifier: Apache-2.0
|
2
2
|
|
3
|
+
import os
|
4
|
+
import subprocess
|
5
|
+
|
3
6
|
from cliff.command import Command
|
4
7
|
from loguru import logger
|
8
|
+
import yaml
|
5
9
|
|
6
|
-
from osism.tasks import conductor, netbox,
|
10
|
+
from osism.tasks import conductor, netbox, handle_task
|
7
11
|
|
8
12
|
|
9
13
|
class Ironic(Command):
|
@@ -24,11 +28,11 @@ class Ironic(Command):
|
|
24
28
|
def take_action(self, parsed_args):
|
25
29
|
wait = not parsed_args.no_wait
|
26
30
|
|
27
|
-
task = conductor.
|
28
|
-
force_update=parsed_args.force_update
|
29
|
-
)
|
31
|
+
task = conductor.sync_ironic.delay(force_update=parsed_args.force_update)
|
30
32
|
if wait:
|
31
|
-
logger.info(
|
33
|
+
logger.info(
|
34
|
+
f"Task {task.task_id} (sync ironic) is running. Wait. No more output."
|
35
|
+
)
|
32
36
|
task.wait(timeout=None, interval=0.5)
|
33
37
|
|
34
38
|
|
@@ -45,9 +49,11 @@ class Sync(Command):
|
|
45
49
|
def take_action(self, parsed_args):
|
46
50
|
wait = not parsed_args.no_wait
|
47
51
|
|
48
|
-
task =
|
52
|
+
task = conductor.sync_netbox.delay()
|
49
53
|
if wait:
|
50
|
-
logger.info(
|
54
|
+
logger.info(
|
55
|
+
f"Task {task.task_id} (sync netbox) is running. Wait. No more output."
|
56
|
+
)
|
51
57
|
task.wait(timeout=None, interval=0.5)
|
52
58
|
|
53
59
|
|
@@ -140,9 +146,9 @@ class Manage(Command):
|
|
140
146
|
return handle_task(task, wait, format="script", timeout=3600)
|
141
147
|
|
142
148
|
|
143
|
-
class
|
149
|
+
class Versions(Command):
|
144
150
|
def get_parser(self, prog_name):
|
145
|
-
parser = super(
|
151
|
+
parser = super(Versions, self).get_parser(prog_name)
|
146
152
|
return parser
|
147
153
|
|
148
154
|
def take_action(self, parsed_args):
|
@@ -150,3 +156,65 @@ class Ping(Command):
|
|
150
156
|
task.wait(timeout=None, interval=0.5)
|
151
157
|
result = task.get()
|
152
158
|
print(result)
|
159
|
+
|
160
|
+
|
161
|
+
class Console(Command):
|
162
|
+
def get_parser(self, prog_name):
|
163
|
+
parser = super(Console, self).get_parser(prog_name)
|
164
|
+
parser.add_argument(
|
165
|
+
"type",
|
166
|
+
nargs=1,
|
167
|
+
choices=["info", "search", "filter", "shell"],
|
168
|
+
help="Type of the console (default: %(default)s)",
|
169
|
+
)
|
170
|
+
parser.add_argument(
|
171
|
+
"arguments", nargs="*", type=str, default="", help="Additional arguments"
|
172
|
+
)
|
173
|
+
|
174
|
+
return parser
|
175
|
+
|
176
|
+
def take_action(self, parsed_args):
|
177
|
+
type_console = parsed_args.type[0]
|
178
|
+
arguments = " ".join(
|
179
|
+
[f"'{item}'" if " " in item else item for item in parsed_args.arguments]
|
180
|
+
)
|
181
|
+
|
182
|
+
home_dir = os.path.expanduser("~")
|
183
|
+
nbcli_dir = os.path.join(home_dir, ".nbcli")
|
184
|
+
if not os.path.exists(nbcli_dir):
|
185
|
+
os.mkdir(nbcli_dir)
|
186
|
+
|
187
|
+
nbcli_file = os.path.join(nbcli_dir, "user_config.yml")
|
188
|
+
if not os.path.exists(nbcli_file):
|
189
|
+
try:
|
190
|
+
with open("/run/secrets/NETBOX_TOKEN", "r") as fp:
|
191
|
+
token = fp.read().strip()
|
192
|
+
except FileNotFoundError:
|
193
|
+
token = None
|
194
|
+
|
195
|
+
url = os.environ.get("NETBOX_API", None)
|
196
|
+
|
197
|
+
if not token or not url:
|
198
|
+
logger.error("Netbox integration not configured.")
|
199
|
+
return
|
200
|
+
|
201
|
+
subprocess.call(
|
202
|
+
["/usr/local/bin/nbcli", "init"],
|
203
|
+
stdout=subprocess.DEVNULL,
|
204
|
+
stderr=subprocess.DEVNULL,
|
205
|
+
)
|
206
|
+
os.remove(nbcli_file)
|
207
|
+
|
208
|
+
nbcli_config = {
|
209
|
+
"pynetbox": {
|
210
|
+
"url": url,
|
211
|
+
"token": token,
|
212
|
+
},
|
213
|
+
"requests": {"verify": False},
|
214
|
+
"nbcli": {"filter_limit": 50},
|
215
|
+
"user": {},
|
216
|
+
}
|
217
|
+
with open(nbcli_file, "w") as fp:
|
218
|
+
yaml.dump(nbcli_config, fp, default_flow_style=False)
|
219
|
+
|
220
|
+
subprocess.call(f"/usr/local/bin/nbcli {type_console} {arguments}", shell=True)
|
osism/commands/reconciler.py
CHANGED
@@ -47,7 +47,7 @@ class Sync(Command):
|
|
47
47
|
t = reconciler.run.delay(publish=wait)
|
48
48
|
if wait:
|
49
49
|
logger.info(
|
50
|
-
f"Task {t.task_id} is running in background. Output coming soon."
|
50
|
+
f"Task {t.task_id} (sync inventory) is running in background. Output coming soon."
|
51
51
|
)
|
52
52
|
rc = 0
|
53
53
|
stoptime = time.time() + task_timeout
|
@@ -77,4 +77,6 @@ class Sync(Command):
|
|
77
77
|
redis.close()
|
78
78
|
return rc
|
79
79
|
else:
|
80
|
-
logger.info(
|
80
|
+
logger.info(
|
81
|
+
f"Task {t.task_id} (sync inventory) is running in background. No more output."
|
82
|
+
)
|
osism/commands/validate.py
CHANGED
@@ -52,7 +52,7 @@ class Run(Command):
|
|
52
52
|
)
|
53
53
|
return parser
|
54
54
|
|
55
|
-
def _handle_task(self, t, wait, format, timeout):
|
55
|
+
def _handle_task(self, t, wait, format, timeout, playbook):
|
56
56
|
rc = 0
|
57
57
|
if wait:
|
58
58
|
stoptime = time.time() + timeout
|
@@ -85,7 +85,7 @@ class Run(Command):
|
|
85
85
|
else:
|
86
86
|
if format == "log":
|
87
87
|
logger.info(
|
88
|
-
f"Task {t.task_id} is running in background. No more output. Check ARA for logs."
|
88
|
+
f"Task {t.task_id} (validate {playbook}) is running in background. No more output. Check ARA for logs."
|
89
89
|
)
|
90
90
|
elif format == "script":
|
91
91
|
print(f"{t.task_id}")
|
@@ -120,6 +120,6 @@ class Run(Command):
|
|
120
120
|
environment = VALIDATE_PLAYBOOKS[validator]["environment"]
|
121
121
|
t = ansible.run.delay(environment, playbook, arguments)
|
122
122
|
|
123
|
-
rc = self._handle_task(t, wait, format, timeout)
|
123
|
+
rc = self._handle_task(t, wait, format, timeout, playbook)
|
124
124
|
|
125
125
|
return rc
|
osism/settings.py
CHANGED
@@ -23,7 +23,7 @@ REDIS_PORT: int = int(os.getenv("REDIS_PORT", "6379"))
|
|
23
23
|
REDIS_DB: int = int(os.getenv("REDIS_DB", "0"))
|
24
24
|
|
25
25
|
|
26
|
-
NETBOX_URL = os.getenv("NETBOX_API")
|
26
|
+
NETBOX_URL = os.getenv("NETBOX_API", os.getenv("NETBOX_URL"))
|
27
27
|
NETBOX_TOKEN = os.getenv("NETBOX_TOKEN", read_secret("NETBOX_TOKEN"))
|
28
28
|
IGNORE_SSL_ERRORS = os.getenv("IGNORE_SSL_ERRORS", "True") == "True"
|
29
29
|
|
@@ -35,8 +35,8 @@ INVENTORY_RECONCILER_SCHEDULE = float(
|
|
35
35
|
|
36
36
|
OSISM_API_URL = os.getenv("OSISM_API_URL", None)
|
37
37
|
|
38
|
-
|
39
|
-
"
|
38
|
+
NETBOX_FILTER_CONDUCTOR = os.getenv(
|
39
|
+
"NETBOX_FILTER_CONDUCTOR",
|
40
40
|
"[{'state': 'active', 'tag': ['managed-by-ironic']}]",
|
41
41
|
)
|
42
42
|
|
osism/tasks/ansible.py
CHANGED
@@ -1,8 +1,9 @@
|
|
1
1
|
# SPDX-License-Identifier: Apache-2.0
|
2
2
|
|
3
3
|
from celery import Celery
|
4
|
+
from pottery import Redlock
|
4
5
|
|
5
|
-
from osism import settings
|
6
|
+
from osism import settings, utils
|
6
7
|
from osism.tasks import Config, run_ansible_in_environment
|
7
8
|
|
8
9
|
app = Celery("ansible")
|
@@ -11,9 +12,14 @@ app.config_from_object(Config)
|
|
11
12
|
|
12
13
|
@app.on_after_configure.connect
|
13
14
|
def setup_periodic_tasks(sender, **kwargs):
|
14
|
-
|
15
|
-
|
15
|
+
lock = Redlock(
|
16
|
+
key="lock_osism_tasks_ansible_setup_periodic_tasks",
|
17
|
+
masters={utils.redis},
|
16
18
|
)
|
19
|
+
if settings.GATHER_FACTS_SCHEDULE > 0 and lock.acquire(timeout=10):
|
20
|
+
sender.add_periodic_task(
|
21
|
+
settings.GATHER_FACTS_SCHEDULE, gather_facts.s(), expires=10
|
22
|
+
)
|
17
23
|
|
18
24
|
|
19
25
|
@app.task(bind=True, name="osism.tasks.ansible.gather_facts")
|
osism/tasks/conductor.py
CHANGED
@@ -1,5 +1,7 @@
|
|
1
1
|
# SPDX-License-Identifier: Apache-2.0
|
2
2
|
|
3
|
+
from ansible import constants as ansible_constants
|
4
|
+
from ansible.parsing.vault import VaultLib, VaultSecret
|
3
5
|
from celery import Celery
|
4
6
|
from celery.signals import worker_process_init
|
5
7
|
import copy
|
@@ -18,83 +20,9 @@ app.config_from_object(Config)
|
|
18
20
|
|
19
21
|
|
20
22
|
configuration = {}
|
21
|
-
nb_device_query_list = None
|
22
23
|
|
23
24
|
|
24
|
-
|
25
|
-
def celery_init_worker(**kwargs):
|
26
|
-
global configuration
|
27
|
-
|
28
|
-
with open("/etc/conductor.yml") as fp:
|
29
|
-
configuration = yaml.load(fp, Loader=yaml.SafeLoader)
|
30
|
-
|
31
|
-
if not configuration:
|
32
|
-
logger.warning(
|
33
|
-
"The conductor configuration is empty. That's probably wrong"
|
34
|
-
)
|
35
|
-
configuration = {}
|
36
|
-
return
|
37
|
-
|
38
|
-
# Resolve all IDs in the conductor.yml
|
39
|
-
if Config.enable_ironic.lower() in ["true", "yes"]:
|
40
|
-
if "ironic_parameters" not in configuration:
|
41
|
-
logger.error(
|
42
|
-
"ironic_parameters not found in the conductor configuration"
|
43
|
-
)
|
44
|
-
return
|
45
|
-
|
46
|
-
if "driver_info" in configuration["ironic_parameters"]:
|
47
|
-
if "deploy_kernel" in configuration["ironic_parameters"]["driver_info"]:
|
48
|
-
result = openstack.image_get(
|
49
|
-
configuration["ironic_parameters"]["driver_info"][
|
50
|
-
"deploy_kernel"
|
51
|
-
]
|
52
|
-
)
|
53
|
-
configuration["ironic_parameters"]["driver_info"][
|
54
|
-
"deploy_kernel"
|
55
|
-
] = result.id
|
56
|
-
|
57
|
-
if (
|
58
|
-
"deploy_ramdisk"
|
59
|
-
in configuration["ironic_parameters"]["driver_info"]
|
60
|
-
):
|
61
|
-
result = openstack.image_get(
|
62
|
-
configuration["ironic_parameters"]["driver_info"][
|
63
|
-
"deploy_ramdisk"
|
64
|
-
]
|
65
|
-
)
|
66
|
-
configuration["ironic_parameters"]["driver_info"][
|
67
|
-
"deploy_ramdisk"
|
68
|
-
] = result.id
|
69
|
-
|
70
|
-
if (
|
71
|
-
"cleaning_network"
|
72
|
-
in configuration["ironic_parameters"]["driver_info"]
|
73
|
-
):
|
74
|
-
result = openstack.network_get(
|
75
|
-
configuration["ironic_parameters"]["driver_info"][
|
76
|
-
"cleaning_network"
|
77
|
-
]
|
78
|
-
)
|
79
|
-
configuration["ironic_parameters"]["driver_info"][
|
80
|
-
"cleaning_network"
|
81
|
-
] = result.id
|
82
|
-
|
83
|
-
if (
|
84
|
-
"provisioning_network"
|
85
|
-
in configuration["ironic_parameters"]["driver_info"]
|
86
|
-
):
|
87
|
-
result = openstack.network_get(
|
88
|
-
configuration["ironic_parameters"]["driver_info"][
|
89
|
-
"provisioning_network"
|
90
|
-
]
|
91
|
-
)
|
92
|
-
configuration["ironic_parameters"]["driver_info"][
|
93
|
-
"provisioning_network"
|
94
|
-
] = result.id
|
95
|
-
|
96
|
-
global nb_device_query_list
|
97
|
-
|
25
|
+
def get_nb_device_query_list():
|
98
26
|
try:
|
99
27
|
supported_nb_device_filters = [
|
100
28
|
"site",
|
@@ -105,9 +33,7 @@ def celery_init_worker(**kwargs):
|
|
105
33
|
"tag",
|
106
34
|
"state",
|
107
35
|
]
|
108
|
-
nb_device_query_list = yaml.safe_load(
|
109
|
-
settings.OSISM_CONDUCTOR_NETBOX_FILTER_LIST
|
110
|
-
)
|
36
|
+
nb_device_query_list = yaml.safe_load(settings.NETBOX_FILTER_CONDUCTOR)
|
111
37
|
if type(nb_device_query_list) is not list:
|
112
38
|
raise TypeError
|
113
39
|
for nb_device_query in nb_device_query_list:
|
@@ -129,13 +55,81 @@ def celery_init_worker(**kwargs):
|
|
129
55
|
raise ValueError(f"Invalid name {value_name} for {key}")
|
130
56
|
except (yaml.YAMLError, TypeError):
|
131
57
|
logger.error(
|
132
|
-
f"Setting
|
58
|
+
f"Setting NETBOX_FILTER_CONDUCTOR needs to be an array of mappings containing supported netbox device filters: {supported_nb_device_filters}"
|
133
59
|
)
|
134
60
|
nb_device_query_list = []
|
135
61
|
except ValueError as exc:
|
136
|
-
logger.error(f"Unknown value in
|
62
|
+
logger.error(f"Unknown value in NETBOX_FILTER_CONDUCTOR: {exc}")
|
137
63
|
nb_device_query_list = []
|
138
64
|
|
65
|
+
return nb_device_query_list
|
66
|
+
|
67
|
+
|
68
|
+
def get_configuration():
|
69
|
+
with open("/etc/conductor.yml") as fp:
|
70
|
+
configuration = yaml.load(fp, Loader=yaml.SafeLoader)
|
71
|
+
|
72
|
+
if not configuration:
|
73
|
+
logger.warning(
|
74
|
+
"The conductor configuration is empty. That's probably wrong"
|
75
|
+
)
|
76
|
+
return {}
|
77
|
+
|
78
|
+
if Config.enable_ironic.lower() not in ["true", "yes"]:
|
79
|
+
return configuration
|
80
|
+
|
81
|
+
if "ironic_parameters" not in configuration:
|
82
|
+
logger.error("ironic_parameters not found in the conductor configuration")
|
83
|
+
return configuration
|
84
|
+
|
85
|
+
if "driver_info" in configuration["ironic_parameters"]:
|
86
|
+
if "deploy_kernel" in configuration["ironic_parameters"]["driver_info"]:
|
87
|
+
result = openstack.image_get(
|
88
|
+
configuration["ironic_parameters"]["driver_info"]["deploy_kernel"]
|
89
|
+
)
|
90
|
+
configuration["ironic_parameters"]["driver_info"][
|
91
|
+
"deploy_kernel"
|
92
|
+
] = result.id
|
93
|
+
|
94
|
+
if "deploy_ramdisk" in configuration["ironic_parameters"]["driver_info"]:
|
95
|
+
result = openstack.image_get(
|
96
|
+
configuration["ironic_parameters"]["driver_info"]["deploy_ramdisk"]
|
97
|
+
)
|
98
|
+
configuration["ironic_parameters"]["driver_info"][
|
99
|
+
"deploy_ramdisk"
|
100
|
+
] = result.id
|
101
|
+
|
102
|
+
if "cleaning_network" in configuration["ironic_parameters"]["driver_info"]:
|
103
|
+
result = openstack.network_get(
|
104
|
+
configuration["ironic_parameters"]["driver_info"][
|
105
|
+
"cleaning_network"
|
106
|
+
]
|
107
|
+
)
|
108
|
+
configuration["ironic_parameters"]["driver_info"][
|
109
|
+
"cleaning_network"
|
110
|
+
] = result.id
|
111
|
+
|
112
|
+
if (
|
113
|
+
"provisioning_network"
|
114
|
+
in configuration["ironic_parameters"]["driver_info"]
|
115
|
+
):
|
116
|
+
result = openstack.network_get(
|
117
|
+
configuration["ironic_parameters"]["driver_info"][
|
118
|
+
"provisioning_network"
|
119
|
+
]
|
120
|
+
)
|
121
|
+
configuration["ironic_parameters"]["driver_info"][
|
122
|
+
"provisioning_network"
|
123
|
+
] = result.id
|
124
|
+
|
125
|
+
return configuration
|
126
|
+
|
127
|
+
|
128
|
+
@worker_process_init.connect
|
129
|
+
def celery_init_worker(**kwargs):
|
130
|
+
global configuration
|
131
|
+
configuration = get_configuration()
|
132
|
+
|
139
133
|
|
140
134
|
@app.on_after_configure.connect
|
141
135
|
def setup_periodic_tasks(sender, **kwargs):
|
@@ -151,8 +145,13 @@ def get_ironic_parameters(self):
|
|
151
145
|
return {}
|
152
146
|
|
153
147
|
|
154
|
-
@app.task(bind=True, name="osism.tasks.conductor.
|
155
|
-
def
|
148
|
+
@app.task(bind=True, name="osism.tasks.conductor.sync_netbox")
|
149
|
+
def sync_netbox(self, force_update=False):
|
150
|
+
logger.info("Not implemented")
|
151
|
+
|
152
|
+
|
153
|
+
@app.task(bind=True, name="osism.tasks.conductor.sync_ironic")
|
154
|
+
def sync_ironic(self, force_update=False):
|
156
155
|
def deep_compare(a, b, updates):
|
157
156
|
"""
|
158
157
|
Find items in a that do not exist in b or are different.
|
@@ -168,6 +167,31 @@ def sync_netbox_with_ironic(self, force_update=False):
|
|
168
167
|
if not updates[key]:
|
169
168
|
updates.pop(key)
|
170
169
|
|
170
|
+
def deep_merge(a, b):
|
171
|
+
for key, value in b.items():
|
172
|
+
if value == "DELETE":
|
173
|
+
# NOTE: Use special string to remove keys
|
174
|
+
a.pop(key, None)
|
175
|
+
elif (
|
176
|
+
key not in a.keys()
|
177
|
+
or not isinstance(a[key], dict)
|
178
|
+
or not isinstance(value, dict)
|
179
|
+
):
|
180
|
+
a[key] = value
|
181
|
+
else:
|
182
|
+
deep_merge(a[key], value)
|
183
|
+
|
184
|
+
def deep_decrypt(a, vault):
|
185
|
+
for key, value in list(a.items()):
|
186
|
+
if not isinstance(value, dict):
|
187
|
+
if vault.is_encrypted(value):
|
188
|
+
try:
|
189
|
+
a[key] = vault.decrypt(value).decode()
|
190
|
+
except Exception:
|
191
|
+
a.pop(key, None)
|
192
|
+
else:
|
193
|
+
deep_decrypt(a[key], vault)
|
194
|
+
|
171
195
|
driver_params = {
|
172
196
|
"ipmi": {
|
173
197
|
"address": "ipmi_address",
|
@@ -181,6 +205,7 @@ def sync_netbox_with_ironic(self, force_update=False):
|
|
181
205
|
}
|
182
206
|
|
183
207
|
devices = set()
|
208
|
+
nb_device_query_list = get_nb_device_query_list()
|
184
209
|
for nb_device_query in nb_device_query_list:
|
185
210
|
devices |= set(netbox.get_devices(**nb_device_query))
|
186
211
|
|
@@ -211,20 +236,51 @@ def sync_netbox_with_ironic(self, force_update=False):
|
|
211
236
|
# NOTE: Find nodes in netbox which are not present in Ironic and add them
|
212
237
|
for device in devices:
|
213
238
|
logger.info(f"Looking for {device.name} in ironic")
|
239
|
+
logger.info(device)
|
214
240
|
|
215
241
|
node_interfaces = list(netbox.get_interfaces_by_device(device.name))
|
216
242
|
|
217
243
|
node_attributes = get_ironic_parameters()
|
244
|
+
if (
|
245
|
+
"ironic_parameters" in device.custom_fields
|
246
|
+
and device.custom_fields["ironic_parameters"]
|
247
|
+
):
|
248
|
+
# NOTE: Update node attributes with overrides from netbox device
|
249
|
+
deep_merge(node_attributes, device.custom_fields["ironic_parameters"])
|
250
|
+
# NOTE: Decrypt ansible vaulted secrets
|
251
|
+
try:
|
252
|
+
vault_secret = utils.get_ansible_vault_password()
|
253
|
+
vault = VaultLib(
|
254
|
+
[
|
255
|
+
(
|
256
|
+
ansible_constants.DEFAULT_VAULT_ID_MATCH,
|
257
|
+
VaultSecret(vault_secret.encode()),
|
258
|
+
)
|
259
|
+
]
|
260
|
+
)
|
261
|
+
except Exception:
|
262
|
+
logger.error("Unable to get vault secret. Dropping encrypted entries")
|
263
|
+
vault = VaultLib()
|
264
|
+
deep_decrypt(node_attributes, vault)
|
218
265
|
if (
|
219
266
|
"driver" in node_attributes
|
220
267
|
and node_attributes["driver"] in driver_params.keys()
|
221
268
|
):
|
222
269
|
if "driver_info" in node_attributes:
|
270
|
+
# NOTE: Pop all fields belonging to a different driver
|
271
|
+
unused_drivers = [
|
272
|
+
driver
|
273
|
+
for driver in driver_params.keys()
|
274
|
+
if driver != node_attributes["driver"]
|
275
|
+
]
|
276
|
+
for key in list(node_attributes["driver_info"].keys()):
|
277
|
+
for driver in unused_drivers:
|
278
|
+
if key.startswith(driver + "_"):
|
279
|
+
node_attributes["driver_info"].pop(key, None)
|
280
|
+
# NOTE: Render driver address field
|
223
281
|
address_key = driver_params[node_attributes["driver"]]["address"]
|
224
282
|
if address_key in node_attributes["driver_info"]:
|
225
|
-
if "
|
226
|
-
node_mgmt_address = device.custom_fields["oob_address"]
|
227
|
-
elif "address" in device.oob_ip:
|
283
|
+
if device.oob_ip and "address" in device.oob_ip:
|
228
284
|
node_mgmt_address = device.oob_ip["address"]
|
229
285
|
else:
|
230
286
|
node_mgmt_addresses = [
|
@@ -248,18 +304,6 @@ def sync_netbox_with_ironic(self, force_update=False):
|
|
248
304
|
)
|
249
305
|
)
|
250
306
|
)
|
251
|
-
else:
|
252
|
-
logger.error(f"Could not find out-of-band address for {device}")
|
253
|
-
node_attributes["driver_info"].pop(address_key, None)
|
254
|
-
if (
|
255
|
-
"port" in driver_params[node_attributes["driver"]]
|
256
|
-
and "oob_port" in device.custom_fields
|
257
|
-
and device.custom_fields["oob_port"]
|
258
|
-
):
|
259
|
-
port_key = driver_params[node_attributes["driver"]]["port"]
|
260
|
-
node_attributes["driver_info"].update(
|
261
|
-
{port_key: device.custom_fields["oob_port"]}
|
262
|
-
)
|
263
307
|
node_attributes.update({"resource_class": device.name})
|
264
308
|
ports_attributes = [
|
265
309
|
dict(address=interface.mac_address)
|
@@ -268,11 +312,11 @@ def sync_netbox_with_ironic(self, force_update=False):
|
|
268
312
|
]
|
269
313
|
|
270
314
|
lock = Redlock(
|
271
|
-
key=f"
|
315
|
+
key=f"lock_osism_tasks_conductor_sync_ironic-{device.name}",
|
272
316
|
masters={utils.redis},
|
273
|
-
auto_release_time=
|
317
|
+
auto_release_time=600,
|
274
318
|
)
|
275
|
-
if lock.acquire(timeout=
|
319
|
+
if lock.acquire(timeout=120):
|
276
320
|
try:
|
277
321
|
logger.info(f"Processing device {device.name}")
|
278
322
|
node = openstack.baremetal_node_show(device.name, ignore_missing=True)
|
osism/tasks/reconciler.py
CHANGED
@@ -6,6 +6,7 @@ import subprocess
|
|
6
6
|
from celery import Celery
|
7
7
|
from loguru import logger
|
8
8
|
from pottery import Redlock
|
9
|
+
|
9
10
|
from osism import settings, utils
|
10
11
|
from osism.tasks import Config
|
11
12
|
|
@@ -15,9 +16,14 @@ app.config_from_object(Config)
|
|
15
16
|
|
16
17
|
@app.on_after_configure.connect
|
17
18
|
def setup_periodic_tasks(sender, **kwargs):
|
18
|
-
|
19
|
-
|
19
|
+
lock = Redlock(
|
20
|
+
key="lock_osism_tasks_reconciler_setup_periodic_tasks",
|
21
|
+
masters={utils.redis},
|
20
22
|
)
|
23
|
+
if settings.INVENTORY_RECONCILER_SCHEDULE > 0 and lock.acquire(timeout=10):
|
24
|
+
sender.add_periodic_task(
|
25
|
+
settings.INVENTORY_RECONCILER_SCHEDULE, run_on_change.s(), expires=10
|
26
|
+
)
|
21
27
|
|
22
28
|
|
23
29
|
@app.task(bind=True, name="osism.tasks.reconciler.run")
|
@@ -63,33 +69,3 @@ def run_on_change(self):
|
|
63
69
|
p.wait()
|
64
70
|
|
65
71
|
lock.release()
|
66
|
-
|
67
|
-
|
68
|
-
@app.task(bind=True, name="osism.tasks.reconciler.sync_inventory_with_netbox")
|
69
|
-
def sync_inventory_with_netbox(self):
|
70
|
-
lock = Redlock(
|
71
|
-
key="lock_osism_tasks_reconciler_sync_inventory_with_netbox",
|
72
|
-
masters={utils.redis},
|
73
|
-
auto_release_time=60,
|
74
|
-
)
|
75
|
-
|
76
|
-
if lock.acquire(timeout=20):
|
77
|
-
p = subprocess.Popen(
|
78
|
-
"/sync-inventory-with-netbox.sh",
|
79
|
-
shell=True,
|
80
|
-
stdout=subprocess.PIPE,
|
81
|
-
stderr=subprocess.STDOUT,
|
82
|
-
)
|
83
|
-
|
84
|
-
for line in io.TextIOWrapper(p.stdout, encoding="utf-8"):
|
85
|
-
# NOTE: use task_id or request_id in future
|
86
|
-
utils.redis.publish(
|
87
|
-
"netbox-sync-inventory-with-netbox", {"type": "stdout", "content": line}
|
88
|
-
)
|
89
|
-
|
90
|
-
lock.release()
|
91
|
-
|
92
|
-
# NOTE: use task_id or request_id in future
|
93
|
-
utils.redis.publish(
|
94
|
-
"netbox-sync-inventory-with-netbox", {"type": "action", "content": "quit"}
|
95
|
-
)
|
osism/utils/__init__.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1
1
|
# SPDX-License-Identifier: Apache-2.0
|
2
2
|
|
3
|
+
from cryptography.fernet import Fernet
|
3
4
|
import keystoneauth1
|
4
5
|
from loguru import logger
|
5
6
|
import openstack
|
@@ -95,6 +96,22 @@ def get_openstack_connection():
|
|
95
96
|
return conn
|
96
97
|
|
97
98
|
|
99
|
+
def get_ansible_vault_password():
|
100
|
+
keyfile = "/share/ansible_vault_password.key"
|
101
|
+
|
102
|
+
try:
|
103
|
+
with open(keyfile, "r") as fp:
|
104
|
+
key = fp.read()
|
105
|
+
f = Fernet(key)
|
106
|
+
|
107
|
+
encrypted_ansible_vault_password = redis.get("ansible_vault_password")
|
108
|
+
ansible_vault_password = f.decrypt(encrypted_ansible_vault_password)
|
109
|
+
return ansible_vault_password.decode("utf-8")
|
110
|
+
except Exception as exc:
|
111
|
+
logger.error("Unable to get ansible vault password")
|
112
|
+
raise exc
|
113
|
+
|
114
|
+
|
98
115
|
# https://stackoverflow.com/questions/2361426/get-the-first-item-from-an-iterable-that-matches-a-condition
|
99
116
|
def first(iterable, condition=lambda x: True):
|
100
117
|
"""
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: osism
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.20250525.0
|
4
4
|
Summary: OSISM manager interface
|
5
5
|
Home-page: https://github.com/osism/python-osism
|
6
6
|
Author: OSISM GmbH
|
@@ -28,40 +28,38 @@ Requires-Dist: Jinja2==3.1.6
|
|
28
28
|
Requires-Dist: PyYAML==6.0.2
|
29
29
|
Requires-Dist: ara==1.7.2
|
30
30
|
Requires-Dist: celery[redis]==5.5.2
|
31
|
-
Requires-Dist: cliff==4.
|
32
|
-
Requires-Dist: deepdiff==8.
|
31
|
+
Requires-Dist: cliff==4.10.0
|
32
|
+
Requires-Dist: deepdiff==8.5.0
|
33
33
|
Requires-Dist: docker==7.1.0
|
34
34
|
Requires-Dist: dtrack-auditor==1.5.0
|
35
35
|
Requires-Dist: fastapi==0.115.12
|
36
36
|
Requires-Dist: flower==2.0.1
|
37
|
-
Requires-Dist: hiredis==3.1
|
38
|
-
Requires-Dist: jc==1.25.
|
39
|
-
Requires-Dist: keystoneauth1==5.
|
37
|
+
Requires-Dist: hiredis==3.2.1
|
38
|
+
Requires-Dist: jc==1.25.5
|
39
|
+
Requires-Dist: keystoneauth1==5.11.0
|
40
40
|
Requires-Dist: kombu==5.5.3
|
41
41
|
Requires-Dist: kubernetes==32.0.1
|
42
42
|
Requires-Dist: loguru==0.7.3
|
43
|
+
Requires-Dist: nbcli==0.10.0.dev2
|
43
44
|
Requires-Dist: netmiko==4.5.0
|
44
|
-
Requires-Dist: nornir-ansible==2023.12.28
|
45
|
-
Requires-Dist: nornir==3.5.0
|
46
45
|
Requires-Dist: openstacksdk==4.5.0
|
47
46
|
Requires-Dist: pottery==3.0.1
|
48
47
|
Requires-Dist: prompt-toolkit==3.0.51
|
49
|
-
Requires-Dist:
|
50
|
-
Requires-Dist: pynetbox==7.4.1
|
48
|
+
Requires-Dist: pynetbox==7.5.0
|
51
49
|
Requires-Dist: pytest-testinfra==10.2.2
|
52
50
|
Requires-Dist: python-dateutil==2.9.0.post0
|
53
|
-
Requires-Dist: setuptools==80.
|
51
|
+
Requires-Dist: setuptools==80.8.0
|
54
52
|
Requires-Dist: sqlmodel==0.0.24
|
55
|
-
Requires-Dist: sushy==5.
|
53
|
+
Requires-Dist: sushy==5.6.0
|
56
54
|
Requires-Dist: tabulate==0.9.0
|
57
55
|
Requires-Dist: transitions==0.9.2
|
58
56
|
Requires-Dist: uvicorn[standard]==0.34.2
|
59
57
|
Requires-Dist: watchdog==6.0.0
|
60
58
|
Provides-Extra: ansible
|
61
59
|
Requires-Dist: ansible-runner==2.4.1; extra == "ansible"
|
62
|
-
Requires-Dist: ansible-core==2.18.
|
60
|
+
Requires-Dist: ansible-core==2.18.6; extra == "ansible"
|
63
61
|
Provides-Extra: openstack-image-manager
|
64
|
-
Requires-Dist: openstack-image-manager==0.
|
62
|
+
Requires-Dist: openstack-image-manager==0.20250508.0; extra == "openstack-image-manager"
|
65
63
|
Dynamic: author
|
66
64
|
Dynamic: author-email
|
67
65
|
Dynamic: classifier
|
@@ -1,29 +1,29 @@
|
|
1
1
|
osism/__init__.py,sha256=1UiNTBus0V0f2AbZQzAtVtu6zkfCCrw0OTq--NwFAqY,341
|
2
2
|
osism/__main__.py,sha256=ILe4gu61xEISiBsxanqTQIdSkV-YhpZXTRlguCYyssk,141
|
3
|
-
osism/api.py,sha256=
|
3
|
+
osism/api.py,sha256=t3HebSzk6fyY7bLJD9P95oEL1qWYXzpX6Yk1o_nVkMo,4356
|
4
4
|
osism/main.py,sha256=Dt2-9sLXcS-Ny4DAz7hrha-KRc7zd7BFUTRdfs_X8z4,893
|
5
|
-
osism/settings.py,sha256=
|
5
|
+
osism/settings.py,sha256=mkvbxVQ64ZD7Ypk-bRePHn0gZ5j6Lcu2a578eLU0gQs,1309
|
6
6
|
osism/actions/__init__.py,sha256=bG7Ffen4LvQtgnYPFEpFccsWs81t4zqqeqn9ZeirH6E,38
|
7
7
|
osism/commands/__init__.py,sha256=Ag4wX_DCgXRdoLn6t069jqb3DdRylsX2nyYkiyCx4uk,456
|
8
|
-
osism/commands/apply.py,sha256=
|
8
|
+
osism/commands/apply.py,sha256=mH3-NctgevVzP_1IW92FQeiYMCPB49K5hXbxmTY2vnA,16795
|
9
9
|
osism/commands/compose.py,sha256=iqzG7mS9E1VWaLNN6yQowjOqiHn3BMdj-yfXb3Dc4Ok,1200
|
10
|
-
osism/commands/compute.py,sha256=
|
11
|
-
osism/commands/configuration.py,sha256=
|
10
|
+
osism/commands/compute.py,sha256=cgqXWJa5wAvn-7e3FWCgX6hie_aK0yrKRkcNzjLXwDY,25799
|
11
|
+
osism/commands/configuration.py,sha256=sPe8b0dVKFRbr30xoeVdAnHbGwCwgUh0xa_Vzv5pSQQ,954
|
12
12
|
osism/commands/console.py,sha256=8BPz1hio5Wi6kONVAWFuSqkDRrMcLEYeFIY8dbtN6e4,3218
|
13
13
|
osism/commands/container.py,sha256=Fku2GaCM3Idq_FxExUtNqjrEM0XYjpVvXmueSVO8S_c,1601
|
14
14
|
osism/commands/get.py,sha256=ryytjtXWmlMV0NucP5tGkMZu0nIlC4xVtjRk4iMZ06c,8967
|
15
15
|
osism/commands/log.py,sha256=2IpYuosC7FZwwLvM8HmKSU1NRNIelVVYzqjjVMCrOJk,4072
|
16
|
-
osism/commands/manage.py,sha256=
|
17
|
-
osism/commands/netbox.py,sha256=
|
16
|
+
osism/commands/manage.py,sha256=WxUZEhylZj2IhydAe3BAr3S5ED6opG243skfSq5q41s,11971
|
17
|
+
osism/commands/netbox.py,sha256=70GjyPYSVH6dOYwEx1vuF5y9CtEE4vOiH_UNSYuqpjc,6646
|
18
18
|
osism/commands/noset.py,sha256=7zDFuFMyNpo7DUOKcNiYV8nodtdMOYFp5LDPcuJhlZ8,1481
|
19
|
-
osism/commands/reconciler.py,sha256=
|
19
|
+
osism/commands/reconciler.py,sha256=xOyPzQj66xwjdQd2ysCTHX2yBvmMVMppUDZTas6voXc,2882
|
20
20
|
osism/commands/server.py,sha256=avmoOv5rjOi-fN2A-27cPwOtiy2Q2j6UFtCh3QrfWAI,7512
|
21
21
|
osism/commands/service.py,sha256=A1lgAlGeCJpbFFqF55DRWPcCirIgpU0dzjzVLZ0mz3k,2649
|
22
22
|
osism/commands/set.py,sha256=xLBi2DzbVQo2jb3-cOIE9In5UB3vFxquQJkDN-EsfhM,1425
|
23
23
|
osism/commands/status.py,sha256=X-Rcj-XuNPDBoxsGkf96NswwpmTognxz1V6E2NX2ZgY,1997
|
24
24
|
osism/commands/sync.py,sha256=Vf9k7uVQTIu-8kK1u7Gjs3et3RRBEkmnNikot_PFJIE,484
|
25
25
|
osism/commands/task.py,sha256=mwJJ7a71Lw3o_FX7j3rR0-NbPdPwMDOjbOAiiXE4uGc,543
|
26
|
-
osism/commands/validate.py,sha256=
|
26
|
+
osism/commands/validate.py,sha256=cA0CSvcbTr0K_6C5EofULrJSEp5xthpRC0TZgb_eazU,4233
|
27
27
|
osism/commands/vault.py,sha256=Ip0IMR7zaBkPbLJenXr4ZwxM6FnozZ9wn9rwHmFHo8s,1818
|
28
28
|
osism/commands/volume.py,sha256=l6oAk__dFM8KKdLTWOvuSiI7tLh9wAPZp8hwmYF-NX0,6595
|
29
29
|
osism/commands/wait.py,sha256=mKFDqEXcaLlKw1T3MuBEZpNh7CeL3lpUXgubD2_f8es,6580
|
@@ -36,20 +36,20 @@ osism/plugins/__init__.py,sha256=bG7Ffen4LvQtgnYPFEpFccsWs81t4zqqeqn9ZeirH6E,38
|
|
36
36
|
osism/services/__init__.py,sha256=bG7Ffen4LvQtgnYPFEpFccsWs81t4zqqeqn9ZeirH6E,38
|
37
37
|
osism/services/listener.py,sha256=eEamlQsJqCuU9K2QFmk3yM9LAJZEanVcTLtGMsNCKjs,9783
|
38
38
|
osism/tasks/__init__.py,sha256=ZEu_KYsapTYp0etr-rLqie_NT_LndHDDpx53xITru5Y,8691
|
39
|
-
osism/tasks/ansible.py,sha256=
|
39
|
+
osism/tasks/ansible.py,sha256=_2zrHwynwwEv9nDnX-LbNCzcwy9dTUGo_yyutt34HyQ,1346
|
40
40
|
osism/tasks/ceph.py,sha256=eIQkah3Kj4INtOkF9kTjHbXJ3_J2lg48EWJKfHc-UYw,615
|
41
|
-
osism/tasks/conductor.py,sha256=
|
41
|
+
osism/tasks/conductor.py,sha256=nUaP9lQyfqKbtt--uDVbyiJMbmNx_vI7lYqYzSbLH4E,19599
|
42
42
|
osism/tasks/kolla.py,sha256=wJQpWn_01iWLkr7l7T7RNrQGfRgsgmYi4WQlTmNGvew,618
|
43
43
|
osism/tasks/kubernetes.py,sha256=VzXq_VrYU_CLm4cOruqnE3Kq2ydfO9glZ3p0bp3OYoc,625
|
44
44
|
osism/tasks/netbox.py,sha256=QVOLiTH2Su237YAS0QfXbQ86E-OA1JzrFDfyi9JBmvk,5658
|
45
45
|
osism/tasks/openstack.py,sha256=g15tCll5vP1pC6ysxRCTZxplsdGmXbxaCH3k1Qdv5Xg,6367
|
46
|
-
osism/tasks/reconciler.py,sha256=
|
47
|
-
osism/utils/__init__.py,sha256=
|
48
|
-
osism-0.
|
49
|
-
osism-0.
|
50
|
-
osism-0.
|
51
|
-
osism-0.
|
52
|
-
osism-0.
|
53
|
-
osism-0.
|
54
|
-
osism-0.
|
55
|
-
osism-0.
|
46
|
+
osism/tasks/reconciler.py,sha256=tnZEZZpveBCK4vHZkHE6wDcHfJAlsPcSjIVxB5ItSFM,1981
|
47
|
+
osism/utils/__init__.py,sha256=_Y4qchR5yyI_JKhBWd_jcsvDLYZjxO0c3iMA_VRQl58,4304
|
48
|
+
osism-0.20250525.0.dist-info/licenses/AUTHORS,sha256=EKFIR9F27AvoEXp1cA6FkGbjEOFt4Rcbipr5RJc7jSs,64
|
49
|
+
osism-0.20250525.0.dist-info/licenses/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
|
50
|
+
osism-0.20250525.0.dist-info/METADATA,sha256=fLU5Yf-qJPuZHU_QTKcNWqzKUjISG7yOVM9qKonpO2A,2903
|
51
|
+
osism-0.20250525.0.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
|
52
|
+
osism-0.20250525.0.dist-info/entry_points.txt,sha256=NR2buBhAPM7jc94gdtM_-kEwSIBz0l9x0nUg3fElybc,3051
|
53
|
+
osism-0.20250525.0.dist-info/pbr.json,sha256=xuXy9_UHMyU8zzNeRX1aaXpOYUbOci-0c7GrNIbGlmE,47
|
54
|
+
osism-0.20250525.0.dist-info/top_level.txt,sha256=8L8dsI9hcaGHsdnR4k_LN9EM78EhwrXRFHyAryPXZtY,6
|
55
|
+
osism-0.20250525.0.dist-info/RECORD,,
|
@@ -21,6 +21,7 @@ get states = osism.commands.get:States
|
|
21
21
|
get status = osism.commands.status:Run
|
22
22
|
get tasks = osism.commands.get:Tasks
|
23
23
|
get versions manager = osism.commands.get:VersionsManager
|
24
|
+
get versions netbox = osism.commands.netbox:Versions
|
24
25
|
log ansible = osism.commands.log:Ansible
|
25
26
|
log container = osism.commands.log:Container
|
26
27
|
log file = osism.commands.log:File
|
@@ -30,6 +31,7 @@ manage compute enable = osism.commands.compute:ComputeEnable
|
|
30
31
|
manage compute evacuate = osism.commands.compute:ComputeEvacuate
|
31
32
|
manage compute list = osism.commands.compute:ComputeList
|
32
33
|
manage compute migrate = osism.commands.compute:ComputeMigrate
|
34
|
+
manage compute migration list = osism.commands.compute:ComputeMigrationList
|
33
35
|
manage compute start = osism.commands.compute:ComputeStart
|
34
36
|
manage compute stop = osism.commands.compute:ComputeStop
|
35
37
|
manage flavors = osism.commands.manage:Flavors
|
@@ -40,9 +42,7 @@ manage netbox = osism.commands.netbox:Manage
|
|
40
42
|
manage server list = osism.commands.server:ServerList
|
41
43
|
manage server migrate = osism.commands.server:ServerMigrate
|
42
44
|
manage volume list = osism.commands.volume:VolumeList
|
43
|
-
netbox
|
44
|
-
netbox sync = osism.commands.netbox:Sync
|
45
|
-
netbox sync ironic = osism.commands.netbox:Ironic
|
45
|
+
netbox = osism.commands.netbox:Console
|
46
46
|
noset bootstrap = osism.commands.noset:NoBootstrap
|
47
47
|
noset maintenance = osism.commands.noset:NoMaintenance
|
48
48
|
noset vault password = osism.commands.vault:UnsetPassword
|
@@ -0,0 +1 @@
|
|
1
|
+
renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
|
@@ -0,0 +1 @@
|
|
1
|
+
{"git_version": "3c67308", "is_release": false}
|
@@ -1 +0,0 @@
|
|
1
|
-
janhorstmann <horstmann@osism.tech>
|
@@ -1 +0,0 @@
|
|
1
|
-
{"git_version": "6139ca4", "is_release": false}
|
File without changes
|
File without changes
|