osism 0.20250514.0__py3-none-any.whl → 0.20250530.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- osism/api.py +5 -5
- osism/commands/baremetal.py +237 -0
- osism/commands/compute.py +246 -13
- osism/commands/netbox.py +23 -4
- osism/settings.py +2 -2
- osism/tasks/ansible.py +9 -3
- osism/tasks/conductor.py +33 -6
- osism/tasks/netbox.py +5 -0
- osism/tasks/reconciler.py +8 -2
- {osism-0.20250514.0.dist-info → osism-0.20250530.0.dist-info}/METADATA +8 -9
- {osism-0.20250514.0.dist-info → osism-0.20250530.0.dist-info}/RECORD +17 -16
- {osism-0.20250514.0.dist-info → osism-0.20250530.0.dist-info}/WHEEL +1 -1
- {osism-0.20250514.0.dist-info → osism-0.20250530.0.dist-info}/entry_points.txt +4 -0
- osism-0.20250530.0.dist-info/licenses/AUTHORS +1 -0
- osism-0.20250530.0.dist-info/pbr.json +1 -0
- osism-0.20250514.0.dist-info/licenses/AUTHORS +0 -1
- osism-0.20250514.0.dist-info/pbr.json +0 -1
- {osism-0.20250514.0.dist-info → osism-0.20250530.0.dist-info}/licenses/LICENSE +0 -0
- {osism-0.20250514.0.dist-info → osism-0.20250530.0.dist-info}/top_level.txt +0 -0
osism/api.py
CHANGED
@@ -46,23 +46,23 @@ class LogConfig(BaseModel):
|
|
46
46
|
LOG_LEVEL: str = "DEBUG"
|
47
47
|
|
48
48
|
# Logging config
|
49
|
-
version = 1
|
50
|
-
disable_existing_loggers = False
|
51
|
-
formatters = {
|
49
|
+
version: int = 1
|
50
|
+
disable_existing_loggers: bool = False
|
51
|
+
formatters: dict = {
|
52
52
|
"default": {
|
53
53
|
"()": "uvicorn.logging.DefaultFormatter",
|
54
54
|
"fmt": LOG_FORMAT,
|
55
55
|
"datefmt": "%Y-%m-%d %H:%M:%S",
|
56
56
|
},
|
57
57
|
}
|
58
|
-
handlers = {
|
58
|
+
handlers: dict = {
|
59
59
|
"default": {
|
60
60
|
"formatter": "default",
|
61
61
|
"class": "logging.StreamHandler",
|
62
62
|
"stream": "ext://sys.stderr",
|
63
63
|
},
|
64
64
|
}
|
65
|
-
loggers = {
|
65
|
+
loggers: dict = {
|
66
66
|
"api": {"handlers": ["default"], "level": LOG_LEVEL},
|
67
67
|
}
|
68
68
|
|
@@ -0,0 +1,237 @@
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
2
|
+
|
3
|
+
from cliff.command import Command
|
4
|
+
|
5
|
+
from loguru import logger
|
6
|
+
import openstack
|
7
|
+
from tabulate import tabulate
|
8
|
+
import json
|
9
|
+
|
10
|
+
from osism.commands import get_cloud_connection
|
11
|
+
|
12
|
+
|
13
|
+
class BaremetalList(Command):
|
14
|
+
def get_parser(self, prog_name):
|
15
|
+
parser = super(BaremetalList, self).get_parser(prog_name)
|
16
|
+
parser.add_argument(
|
17
|
+
"--provision-state",
|
18
|
+
default=None,
|
19
|
+
choices=["enroll", "managable", "available", "active", "error"],
|
20
|
+
type=str,
|
21
|
+
help="Only list nodes with the given provision state",
|
22
|
+
)
|
23
|
+
parser.add_argument(
|
24
|
+
"--maintenance",
|
25
|
+
default=False,
|
26
|
+
action="store_true",
|
27
|
+
help="Only list baremetal nodes in maintenance mode",
|
28
|
+
)
|
29
|
+
return parser
|
30
|
+
|
31
|
+
def take_action(self, parsed_args):
|
32
|
+
provision_state = parsed_args.provision_state
|
33
|
+
maintenance = parsed_args.maintenance
|
34
|
+
|
35
|
+
conn = get_cloud_connection()
|
36
|
+
|
37
|
+
query = {}
|
38
|
+
if provision_state:
|
39
|
+
query.update(dict(provision_state=provision_state))
|
40
|
+
if maintenance:
|
41
|
+
query.update(dict(maintenance=maintenance))
|
42
|
+
|
43
|
+
baremetal = conn.baremetal.nodes(**query)
|
44
|
+
|
45
|
+
result = [
|
46
|
+
[
|
47
|
+
b["name"],
|
48
|
+
b["power_state"],
|
49
|
+
b["provision_state"],
|
50
|
+
b["maintenance"],
|
51
|
+
]
|
52
|
+
for b in baremetal
|
53
|
+
]
|
54
|
+
|
55
|
+
print(
|
56
|
+
tabulate(
|
57
|
+
result,
|
58
|
+
headers=[
|
59
|
+
"Name",
|
60
|
+
"Power State",
|
61
|
+
"Provision State",
|
62
|
+
"Maintenance",
|
63
|
+
],
|
64
|
+
tablefmt="psql",
|
65
|
+
)
|
66
|
+
)
|
67
|
+
|
68
|
+
|
69
|
+
class BaremetalDeploy(Command):
|
70
|
+
def get_parser(self, prog_name):
|
71
|
+
parser = super(BaremetalDeploy, self).get_parser(prog_name)
|
72
|
+
|
73
|
+
parser_exc_group = parser.add_mutually_exclusive_group(required=True)
|
74
|
+
parser_exc_group.add_argument(
|
75
|
+
"--all",
|
76
|
+
default=False,
|
77
|
+
help="Deploy all baremetal nodes in provision state available",
|
78
|
+
action="store_true",
|
79
|
+
)
|
80
|
+
parser_exc_group.add_argument(
|
81
|
+
"--name",
|
82
|
+
default=[],
|
83
|
+
help="Deploy given baremetal node when in provision state available. May be specified multiple times",
|
84
|
+
action="append",
|
85
|
+
)
|
86
|
+
parser.add_argument(
|
87
|
+
"--rebuild",
|
88
|
+
default=False,
|
89
|
+
help="Rebuild given nodes in active state",
|
90
|
+
action="store_true",
|
91
|
+
)
|
92
|
+
parser.add_argument(
|
93
|
+
"--yes-i-really-really-mean-it",
|
94
|
+
default=False,
|
95
|
+
help="Specify this in connection with '--rebuild --all' to actually rebuild all nodes",
|
96
|
+
action="store_true",
|
97
|
+
)
|
98
|
+
return parser
|
99
|
+
|
100
|
+
def take_action(self, parsed_args):
|
101
|
+
all_nodes = parsed_args.all
|
102
|
+
names = parsed_args.name
|
103
|
+
rebuild = parsed_args.rebuild
|
104
|
+
yes_i_really_really_mean_it = parsed_args.yes_i_really_really_mean_it
|
105
|
+
|
106
|
+
if all_nodes and rebuild and not yes_i_really_really_mean_it:
|
107
|
+
logger.error(
|
108
|
+
"Please confirm that you wish to rebuild all nodes by specifying '--yes-i-really-really-mean-it'"
|
109
|
+
)
|
110
|
+
return
|
111
|
+
|
112
|
+
conn = get_cloud_connection()
|
113
|
+
|
114
|
+
if all_nodes:
|
115
|
+
deploy_nodes = list(conn.baremetal.nodes(details=True))
|
116
|
+
else:
|
117
|
+
deploy_nodes = [
|
118
|
+
conn.baremetal.find_node(name, ignore_missing=True, details=True)
|
119
|
+
for name in names
|
120
|
+
]
|
121
|
+
|
122
|
+
for node_idx, node in enumerate(deploy_nodes):
|
123
|
+
if not node:
|
124
|
+
logger.warning(f"Could not find node {names[node_idx]}")
|
125
|
+
continue
|
126
|
+
|
127
|
+
if node.provision_state in ["available", "deploy failed"]:
|
128
|
+
provision_state = "active"
|
129
|
+
elif (
|
130
|
+
node.provision_state == "error"
|
131
|
+
or node.provision_state == "active"
|
132
|
+
and rebuild
|
133
|
+
):
|
134
|
+
provision_state = "rebuild"
|
135
|
+
else:
|
136
|
+
logger.warning(
|
137
|
+
f"Node {node.name} ({node.id}) not in supported provision state"
|
138
|
+
)
|
139
|
+
continue
|
140
|
+
|
141
|
+
try:
|
142
|
+
conn.baremetal.validate_node(
|
143
|
+
node.id, required=("boot", "deploy", "power")
|
144
|
+
)
|
145
|
+
except openstack.exceptions.ValidationException:
|
146
|
+
logger.warning(f"Node {node.name} ({node.id}) could not be validated")
|
147
|
+
continue
|
148
|
+
try:
|
149
|
+
config_drive = {"meta_data": {}}
|
150
|
+
if (
|
151
|
+
"netplan_parameters" in node.extra
|
152
|
+
and node.extra["netplan_parameters"]
|
153
|
+
):
|
154
|
+
config_drive["meta_data"].update(
|
155
|
+
{
|
156
|
+
"netplan_parameters": json.loads(
|
157
|
+
node.extra["netplan_parameters"]
|
158
|
+
)
|
159
|
+
}
|
160
|
+
)
|
161
|
+
if "frr_parameters" in node.extra and node.extra["frr_parameters"]:
|
162
|
+
config_drive["meta_data"].update(
|
163
|
+
{"frr_parameters": json.loads(node.extra["frr_parameters"])}
|
164
|
+
)
|
165
|
+
conn.baremetal.set_node_provision_state(
|
166
|
+
node.id, provision_state, config_drive=config_drive
|
167
|
+
)
|
168
|
+
except Exception as exc:
|
169
|
+
logger.warning(
|
170
|
+
f"Node {node.name} ({node.id}) could not be moved to active state: {exc}"
|
171
|
+
)
|
172
|
+
continue
|
173
|
+
|
174
|
+
|
175
|
+
class BaremetalUndeploy(Command):
|
176
|
+
def get_parser(self, prog_name):
|
177
|
+
parser = super(BaremetalUndeploy, self).get_parser(prog_name)
|
178
|
+
|
179
|
+
parser_exc_group = parser.add_mutually_exclusive_group(required=True)
|
180
|
+
parser_exc_group.add_argument(
|
181
|
+
"--all",
|
182
|
+
default=False,
|
183
|
+
help="Undeploy all baremetal nodes",
|
184
|
+
action="store_true",
|
185
|
+
)
|
186
|
+
parser_exc_group.add_argument(
|
187
|
+
"--name",
|
188
|
+
default=[],
|
189
|
+
help="Undeploy given baremetal node. May be specified multiple times",
|
190
|
+
action="append",
|
191
|
+
)
|
192
|
+
parser.add_argument(
|
193
|
+
"--yes-i-really-really-mean-it",
|
194
|
+
default=False,
|
195
|
+
help="Specify this to actually undeploy all nodes",
|
196
|
+
action="store_true",
|
197
|
+
)
|
198
|
+
return parser
|
199
|
+
|
200
|
+
def take_action(self, parsed_args):
|
201
|
+
all_nodes = parsed_args.all
|
202
|
+
names = parsed_args.name
|
203
|
+
yes_i_really_really_mean_it = parsed_args.yes_i_really_really_mean_it
|
204
|
+
|
205
|
+
if all_nodes and not yes_i_really_really_mean_it:
|
206
|
+
logger.error(
|
207
|
+
"Please confirm that you wish to undeploy all nodes by specifying '--yes-i-really-really-mean-it'"
|
208
|
+
)
|
209
|
+
return
|
210
|
+
|
211
|
+
conn = get_cloud_connection()
|
212
|
+
|
213
|
+
if all_nodes:
|
214
|
+
deploy_nodes = list(conn.baremetal.nodes())
|
215
|
+
else:
|
216
|
+
deploy_nodes = [
|
217
|
+
conn.baremetal.find_node(name, ignore_missing=True, details=False)
|
218
|
+
for name in names
|
219
|
+
]
|
220
|
+
|
221
|
+
for node_idx, node in enumerate(deploy_nodes):
|
222
|
+
if not node:
|
223
|
+
logger.warning(f"Could not find node {names[node_idx]}")
|
224
|
+
continue
|
225
|
+
|
226
|
+
if node.provision_state in ["active", "deploy failed", "error"]:
|
227
|
+
try:
|
228
|
+
conn.baremetal.set_node_provision_state(node.id, "undeploy")
|
229
|
+
except Exception as exc:
|
230
|
+
logger.warning(
|
231
|
+
f"Node {node.name} ({node.id}) could not be moved to available state: {exc}"
|
232
|
+
)
|
233
|
+
continue
|
234
|
+
else:
|
235
|
+
logger.warning(
|
236
|
+
f"Node {node.name} ({node.id}) not in supported provision state"
|
237
|
+
)
|
osism/commands/compute.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1
1
|
# SPDX-License-Identifier: Apache-2.0
|
2
2
|
|
3
3
|
import time
|
4
|
+
import datetime
|
4
5
|
|
5
6
|
from cliff.command import Command
|
6
7
|
from jc import parse
|
@@ -322,7 +323,13 @@ class ComputeMigrate(Command):
|
|
322
323
|
parser.add_argument(
|
323
324
|
"--no-wait",
|
324
325
|
default=False,
|
325
|
-
help="Do not wait for completion of migration",
|
326
|
+
help="Do not wait for completion of migration (Resize of cold migrated instances will not be confirmed!)",
|
327
|
+
action="store_true",
|
328
|
+
)
|
329
|
+
parser.add_argument(
|
330
|
+
"--no-cold-migration",
|
331
|
+
default=False,
|
332
|
+
help="Do not cold migrate instances",
|
326
333
|
action="store_true",
|
327
334
|
)
|
328
335
|
parser.add_argument(
|
@@ -368,6 +375,7 @@ class ComputeMigrate(Command):
|
|
368
375
|
target = parsed_args.target
|
369
376
|
force = parsed_args.force
|
370
377
|
no_wait = parsed_args.no_wait
|
378
|
+
no_cold_migration = parsed_args.no_cold_migration
|
371
379
|
yes = parsed_args.yes
|
372
380
|
domain = parsed_args.domain
|
373
381
|
project = parsed_args.project
|
@@ -393,9 +401,13 @@ class ComputeMigrate(Command):
|
|
393
401
|
logger.info(f"No migratable instances found on node {host}")
|
394
402
|
|
395
403
|
for server in result:
|
396
|
-
if server[2]
|
404
|
+
if server[2] in ["ACTIVE", "PAUSED"]:
|
405
|
+
migration_type = "live"
|
406
|
+
elif server[2] in ["SHUTOFF"] and not no_cold_migration:
|
407
|
+
migration_type = "cold"
|
408
|
+
else:
|
397
409
|
logger.info(
|
398
|
-
f"{server[0]} ({server[1]}) in status {server[2]} cannot be
|
410
|
+
f"{server[0]} ({server[1]}) in status {server[2]} cannot be migrated"
|
399
411
|
)
|
400
412
|
continue
|
401
413
|
|
@@ -403,27 +415,248 @@ class ComputeMigrate(Command):
|
|
403
415
|
answer = "yes"
|
404
416
|
else:
|
405
417
|
answer = prompt(
|
406
|
-
f"
|
418
|
+
f"{migration_type.capitalize()} migrate server {server[0]} ({server[1]}) [yes/no]: "
|
407
419
|
)
|
408
420
|
|
409
421
|
if answer in ["yes", "y"]:
|
410
|
-
logger.info(
|
411
|
-
|
412
|
-
server[0], host=target, block_migration="auto", force=force
|
422
|
+
logger.info(
|
423
|
+
f"{migration_type.capitalize()} migrating server {server[0]}"
|
413
424
|
)
|
425
|
+
if migration_type == "live":
|
426
|
+
conn.compute.live_migrate_server(
|
427
|
+
server[0], host=target, block_migration="auto", force=force
|
428
|
+
)
|
429
|
+
elif migration_type == "cold":
|
430
|
+
conn.compute.migrate_server(server[0], host=target)
|
414
431
|
|
415
432
|
if not no_wait:
|
416
|
-
|
417
|
-
while inner_wait:
|
433
|
+
while True:
|
418
434
|
time.sleep(2)
|
419
435
|
s = conn.compute.get_server(server[0])
|
420
|
-
if
|
436
|
+
if (
|
437
|
+
migration_type == "live"
|
438
|
+
and s.status in ["MIGRATING"]
|
439
|
+
or migration_type == "cold"
|
440
|
+
and s.status in ["RESIZE"]
|
441
|
+
):
|
421
442
|
logger.info(
|
422
|
-
f"
|
443
|
+
f"{migration_type.capitalize()} migration of {server[0]} ({server[1]}) is still in progress"
|
423
444
|
)
|
424
|
-
|
445
|
+
elif migration_type == "cold" and s.status in ["VERIFY_RESIZE"]:
|
446
|
+
try:
|
447
|
+
conn.compute.confirm_server_resize(s)
|
448
|
+
logger.info(
|
449
|
+
f"{migration_type.capitalize()} migration of {server[0]} ({server[1]}) confirmed"
|
450
|
+
)
|
451
|
+
except Exception as exc:
|
452
|
+
logger.error(
|
453
|
+
f"{migration_type.capitalize()} migration of {server[0]} ({server[1]}) could not be confirmed"
|
454
|
+
)
|
455
|
+
raise exc
|
456
|
+
# NOTE: There seems to be no simple way to check whether the resize
|
457
|
+
# has been confirmed. The state is still "VERIFY_RESIZE" afterwards.
|
458
|
+
# Therefore we drop out without waiting for the "SHUTOFF" state
|
459
|
+
break
|
425
460
|
else:
|
426
|
-
|
461
|
+
logger.info(
|
462
|
+
f"{migration_type.capitalize()} migration of {server[0]} ({server[1]}) completed with status {s.status}"
|
463
|
+
)
|
464
|
+
break
|
465
|
+
|
466
|
+
|
467
|
+
class ComputeMigrationList(Command):
|
468
|
+
def get_parser(self, prog_name):
|
469
|
+
parser = super(ComputeMigrationList, self).get_parser(prog_name)
|
470
|
+
parser.add_argument(
|
471
|
+
"--host",
|
472
|
+
default=None,
|
473
|
+
type=str,
|
474
|
+
help="Only list migrations with the given host as source or destination",
|
475
|
+
)
|
476
|
+
parser.add_argument(
|
477
|
+
"--server",
|
478
|
+
default=None,
|
479
|
+
type=str,
|
480
|
+
help="Only list migrations for the given instance (name or ID)",
|
481
|
+
)
|
482
|
+
parser.add_argument(
|
483
|
+
"--user",
|
484
|
+
default=None,
|
485
|
+
type=str,
|
486
|
+
help="Only list migrations for the given user (name or ID)",
|
487
|
+
)
|
488
|
+
parser.add_argument(
|
489
|
+
"--user-domain",
|
490
|
+
default=None,
|
491
|
+
type=str,
|
492
|
+
help="Domain the user belongs to (name or ID)",
|
493
|
+
)
|
494
|
+
parser.add_argument(
|
495
|
+
"--project",
|
496
|
+
default=None,
|
497
|
+
type=str,
|
498
|
+
help="Only list migrations for the given project (name or ID)",
|
499
|
+
)
|
500
|
+
parser.add_argument(
|
501
|
+
"--project-domain",
|
502
|
+
default=None,
|
503
|
+
type=str,
|
504
|
+
help="Domain the project belongs to (name or ID)",
|
505
|
+
)
|
506
|
+
parser.add_argument(
|
507
|
+
"--status",
|
508
|
+
default=None,
|
509
|
+
type=str,
|
510
|
+
help="Only list migrations with the given status",
|
511
|
+
)
|
512
|
+
parser.add_argument(
|
513
|
+
"--type",
|
514
|
+
default=None,
|
515
|
+
choices=["migration", "live-migration", "evacuation", "resize"],
|
516
|
+
type=str,
|
517
|
+
help="Only list migrations with the given type",
|
518
|
+
)
|
519
|
+
parser.add_argument(
|
520
|
+
"--changes-since",
|
521
|
+
default=None,
|
522
|
+
type=datetime.datetime.fromisoformat,
|
523
|
+
help="Only list migrations last chganged since the given date in ISO 8601 format (CCYY-MM-DDThh:mm:ss±hh:mm)",
|
524
|
+
)
|
525
|
+
parser.add_argument(
|
526
|
+
"--changes-before",
|
527
|
+
default=None,
|
528
|
+
type=datetime.datetime.fromisoformat,
|
529
|
+
help="Only list migrations last chganged before the given date in ISO 8601 format (CCYY-MM-DDThh:mm:ss±hh:mm)",
|
530
|
+
)
|
531
|
+
return parser
|
532
|
+
|
533
|
+
def take_action(self, parsed_args):
|
534
|
+
host = parsed_args.host
|
535
|
+
server = parsed_args.server
|
536
|
+
user = parsed_args.user
|
537
|
+
user_domain = parsed_args.user_domain
|
538
|
+
project = parsed_args.project
|
539
|
+
project_domain = parsed_args.project_domain
|
540
|
+
status = parsed_args.status
|
541
|
+
migration_type = parsed_args.type
|
542
|
+
changes_since = parsed_args.changes_since
|
543
|
+
changes_before = parsed_args.changes_before
|
544
|
+
|
545
|
+
if changes_before and changes_since:
|
546
|
+
if not changes_since <= changes_before:
|
547
|
+
logger.error(
|
548
|
+
"changes-since needs to be less or equal to changes-before"
|
549
|
+
)
|
550
|
+
return
|
551
|
+
|
552
|
+
conn = get_cloud_connection()
|
553
|
+
|
554
|
+
user_id = None
|
555
|
+
if user:
|
556
|
+
user_query = {}
|
557
|
+
|
558
|
+
if user_domain:
|
559
|
+
u_d = conn.identity.find_domain(user_domain, ignore_missing=True)
|
560
|
+
if u_d and "id" in u_d:
|
561
|
+
user_query = dict(domain_id=u_d.id)
|
562
|
+
else:
|
563
|
+
logger.error(f"No domain found for {user_domain}")
|
564
|
+
return
|
565
|
+
|
566
|
+
u = conn.identity.find_user(user, ignore_missing=True, **user_query)
|
567
|
+
if u and "id" in u:
|
568
|
+
user_id = u.id
|
569
|
+
else:
|
570
|
+
logger.error(f"No user found for {user}")
|
571
|
+
return
|
572
|
+
|
573
|
+
project_id = None
|
574
|
+
if project:
|
575
|
+
project_query = {}
|
576
|
+
|
577
|
+
if project_domain:
|
578
|
+
p_d = conn.identity.find_domain(project_domain, ignore_missing=True)
|
579
|
+
if p_d and "id" in p_d:
|
580
|
+
project_query = dict(domain_id=p_d.id)
|
581
|
+
else:
|
582
|
+
logger.error(f"No domain found for {project_domain}")
|
583
|
+
return
|
584
|
+
|
585
|
+
p = conn.identity.find_project(
|
586
|
+
project, ignore_missing=True, **project_query
|
587
|
+
)
|
588
|
+
if p and "id" in p:
|
589
|
+
project_id = p.id
|
590
|
+
else:
|
591
|
+
logger.error(f"No project found for {project}")
|
592
|
+
return
|
593
|
+
|
594
|
+
instance_uuid = None
|
595
|
+
if server:
|
596
|
+
try:
|
597
|
+
s = conn.compute.find_server(
|
598
|
+
server, details=False, ignore_missing=False, all_projects=True
|
599
|
+
)
|
600
|
+
if s and "id" in s:
|
601
|
+
instance_uuid = s.id
|
602
|
+
else:
|
603
|
+
raise openstack.exceptions.NotFoundException
|
604
|
+
except openstack.exceptions.DuplicateResource:
|
605
|
+
logger.error(f"Multiple servers where found for {server}")
|
606
|
+
return
|
607
|
+
except openstack.exceptions.NotFoundException:
|
608
|
+
logger.error(f"No server found for {server}")
|
609
|
+
return
|
610
|
+
|
611
|
+
query = {}
|
612
|
+
if host:
|
613
|
+
query.update(dict(host=host))
|
614
|
+
if instance_uuid:
|
615
|
+
query.update(dict(instance_uuid=instance_uuid))
|
616
|
+
if status:
|
617
|
+
query.update(dict(status=status))
|
618
|
+
if migration_type:
|
619
|
+
query.update(dict(migration_type=migration_type))
|
620
|
+
if user_id:
|
621
|
+
query.update(dict(user_id=user_id))
|
622
|
+
if project_id:
|
623
|
+
query.update(dict(project_id=project_id))
|
624
|
+
if changes_since:
|
625
|
+
query.update(dict(changes_since=changes_since))
|
626
|
+
if changes_before:
|
627
|
+
query.update(dict(changes_before=changes_before))
|
628
|
+
|
629
|
+
migrations = conn.compute.migrations(**query)
|
630
|
+
result = [
|
631
|
+
[
|
632
|
+
m.source_compute,
|
633
|
+
m.dest_compute,
|
634
|
+
m.status,
|
635
|
+
m.migration_type,
|
636
|
+
m["instance_uuid"],
|
637
|
+
m.user_id,
|
638
|
+
m.created_at,
|
639
|
+
m.updated_at,
|
640
|
+
]
|
641
|
+
for m in migrations
|
642
|
+
]
|
643
|
+
|
644
|
+
print(
|
645
|
+
tabulate(
|
646
|
+
result,
|
647
|
+
headers=[
|
648
|
+
"Source",
|
649
|
+
"Destintion",
|
650
|
+
"Status",
|
651
|
+
"Type",
|
652
|
+
"Server UUID",
|
653
|
+
"User",
|
654
|
+
"Created At",
|
655
|
+
"Updated At",
|
656
|
+
],
|
657
|
+
tablefmt="psql",
|
658
|
+
)
|
659
|
+
)
|
427
660
|
|
428
661
|
|
429
662
|
class ComputeStart(Command):
|
osism/commands/netbox.py
CHANGED
@@ -28,9 +28,7 @@ class Ironic(Command):
|
|
28
28
|
def take_action(self, parsed_args):
|
29
29
|
wait = not parsed_args.no_wait
|
30
30
|
|
31
|
-
task = conductor.
|
32
|
-
force_update=parsed_args.force_update
|
33
|
-
)
|
31
|
+
task = conductor.sync_ironic.delay(force_update=parsed_args.force_update)
|
34
32
|
if wait:
|
35
33
|
logger.info(
|
36
34
|
f"Task {task.task_id} (sync ironic) is running. Wait. No more output."
|
@@ -38,6 +36,27 @@ class Ironic(Command):
|
|
38
36
|
task.wait(timeout=None, interval=0.5)
|
39
37
|
|
40
38
|
|
39
|
+
class Sync(Command):
|
40
|
+
def get_parser(self, prog_name):
|
41
|
+
parser = super(Sync, self).get_parser(prog_name)
|
42
|
+
parser.add_argument(
|
43
|
+
"--no-wait",
|
44
|
+
help="Do not wait until the sync has been completed",
|
45
|
+
action="store_true",
|
46
|
+
)
|
47
|
+
return parser
|
48
|
+
|
49
|
+
def take_action(self, parsed_args):
|
50
|
+
wait = not parsed_args.no_wait
|
51
|
+
|
52
|
+
task = conductor.sync_netbox.delay()
|
53
|
+
if wait:
|
54
|
+
logger.info(
|
55
|
+
f"Task {task.task_id} (sync netbox) is running. Wait. No more output."
|
56
|
+
)
|
57
|
+
task.wait(timeout=None, interval=0.5)
|
58
|
+
|
59
|
+
|
41
60
|
class Manage(Command):
|
42
61
|
def get_parser(self, prog_name):
|
43
62
|
parser = super(Manage, self).get_parser(prog_name)
|
@@ -87,7 +106,7 @@ class Manage(Command):
|
|
87
106
|
|
88
107
|
def take_action(self, parsed_args):
|
89
108
|
wait = not parsed_args.no_wait
|
90
|
-
arguments = []
|
109
|
+
arguments = ["run"]
|
91
110
|
|
92
111
|
if parsed_args.no_netbox_wait:
|
93
112
|
arguments.append("--no-wait")
|
osism/settings.py
CHANGED
@@ -35,8 +35,8 @@ INVENTORY_RECONCILER_SCHEDULE = float(
|
|
35
35
|
|
36
36
|
OSISM_API_URL = os.getenv("OSISM_API_URL", None)
|
37
37
|
|
38
|
-
|
39
|
-
"
|
38
|
+
NETBOX_FILTER_CONDUCTOR = os.getenv(
|
39
|
+
"NETBOX_FILTER_CONDUCTOR",
|
40
40
|
"[{'state': 'active', 'tag': ['managed-by-ironic']}]",
|
41
41
|
)
|
42
42
|
|
osism/tasks/ansible.py
CHANGED
@@ -1,8 +1,9 @@
|
|
1
1
|
# SPDX-License-Identifier: Apache-2.0
|
2
2
|
|
3
3
|
from celery import Celery
|
4
|
+
from pottery import Redlock
|
4
5
|
|
5
|
-
from osism import settings
|
6
|
+
from osism import settings, utils
|
6
7
|
from osism.tasks import Config, run_ansible_in_environment
|
7
8
|
|
8
9
|
app = Celery("ansible")
|
@@ -11,9 +12,14 @@ app.config_from_object(Config)
|
|
11
12
|
|
12
13
|
@app.on_after_configure.connect
|
13
14
|
def setup_periodic_tasks(sender, **kwargs):
|
14
|
-
|
15
|
-
|
15
|
+
lock = Redlock(
|
16
|
+
key="lock_osism_tasks_ansible_setup_periodic_tasks",
|
17
|
+
masters={utils.redis},
|
16
18
|
)
|
19
|
+
if settings.GATHER_FACTS_SCHEDULE > 0 and lock.acquire(timeout=10):
|
20
|
+
sender.add_periodic_task(
|
21
|
+
settings.GATHER_FACTS_SCHEDULE, gather_facts.s(), expires=10
|
22
|
+
)
|
17
23
|
|
18
24
|
|
19
25
|
@app.task(bind=True, name="osism.tasks.ansible.gather_facts")
|
osism/tasks/conductor.py
CHANGED
@@ -10,6 +10,7 @@ import jinja2
|
|
10
10
|
from loguru import logger
|
11
11
|
from pottery import Redlock
|
12
12
|
import yaml
|
13
|
+
import json
|
13
14
|
|
14
15
|
from osism import settings
|
15
16
|
from osism import utils
|
@@ -33,7 +34,7 @@ def get_nb_device_query_list():
|
|
33
34
|
"tag",
|
34
35
|
"state",
|
35
36
|
]
|
36
|
-
nb_device_query_list = yaml.safe_load(settings.
|
37
|
+
nb_device_query_list = yaml.safe_load(settings.NETBOX_FILTER_CONDUCTOR)
|
37
38
|
if type(nb_device_query_list) is not list:
|
38
39
|
raise TypeError
|
39
40
|
for nb_device_query in nb_device_query_list:
|
@@ -55,11 +56,11 @@ def get_nb_device_query_list():
|
|
55
56
|
raise ValueError(f"Invalid name {value_name} for {key}")
|
56
57
|
except (yaml.YAMLError, TypeError):
|
57
58
|
logger.error(
|
58
|
-
f"Setting
|
59
|
+
f"Setting NETBOX_FILTER_CONDUCTOR needs to be an array of mappings containing supported netbox device filters: {supported_nb_device_filters}"
|
59
60
|
)
|
60
61
|
nb_device_query_list = []
|
61
62
|
except ValueError as exc:
|
62
|
-
logger.error(f"Unknown value in
|
63
|
+
logger.error(f"Unknown value in NETBOX_FILTER_CONDUCTOR: {exc}")
|
63
64
|
nb_device_query_list = []
|
64
65
|
|
65
66
|
return nb_device_query_list
|
@@ -145,8 +146,13 @@ def get_ironic_parameters(self):
|
|
145
146
|
return {}
|
146
147
|
|
147
148
|
|
148
|
-
@app.task(bind=True, name="osism.tasks.conductor.
|
149
|
-
def
|
149
|
+
@app.task(bind=True, name="osism.tasks.conductor.sync_netbox")
|
150
|
+
def sync_netbox(self, force_update=False):
|
151
|
+
logger.info("Not implemented")
|
152
|
+
|
153
|
+
|
154
|
+
@app.task(bind=True, name="osism.tasks.conductor.sync_ironic")
|
155
|
+
def sync_ironic(self, force_update=False):
|
150
156
|
def deep_compare(a, b, updates):
|
151
157
|
"""
|
152
158
|
Find items in a that do not exist in b or are different.
|
@@ -300,6 +306,27 @@ def sync_netbox_with_ironic(self, force_update=False):
|
|
300
306
|
)
|
301
307
|
)
|
302
308
|
node_attributes.update({"resource_class": device.name})
|
309
|
+
# NOTE: Write metadata used for provisioning into 'extra' field, so that it is available during node deploy without querying the netbox again
|
310
|
+
if "extra" not in node_attributes:
|
311
|
+
node_attributes["extra"] = {}
|
312
|
+
if (
|
313
|
+
"netplan_parameters" in device.custom_fields
|
314
|
+
and device.custom_fields["netplan_parameters"]
|
315
|
+
):
|
316
|
+
node_attributes["extra"].update(
|
317
|
+
{
|
318
|
+
"netplan_parameters": json.dumps(
|
319
|
+
device.custom_fields["netplan_parameters"]
|
320
|
+
)
|
321
|
+
}
|
322
|
+
)
|
323
|
+
if (
|
324
|
+
"frr_parameters" in device.custom_fields
|
325
|
+
and device.custom_fields["frr_parameters"]
|
326
|
+
):
|
327
|
+
node_attributes["extra"].update(
|
328
|
+
{"frr_parameters": json.dumps(device.custom_fields["frr_parameters"])}
|
329
|
+
)
|
303
330
|
ports_attributes = [
|
304
331
|
dict(address=interface.mac_address)
|
305
332
|
for interface in node_interfaces
|
@@ -307,7 +334,7 @@ def sync_netbox_with_ironic(self, force_update=False):
|
|
307
334
|
]
|
308
335
|
|
309
336
|
lock = Redlock(
|
310
|
-
key=f"
|
337
|
+
key=f"lock_osism_tasks_conductor_sync_ironic-{device.name}",
|
311
338
|
masters={utils.redis},
|
312
339
|
auto_release_time=600,
|
313
340
|
)
|
osism/tasks/netbox.py
CHANGED
@@ -151,6 +151,11 @@ def get_interfaces_by_device(self, device_name):
|
|
151
151
|
return utils.nb.dcim.interfaces.filter(device=device_name)
|
152
152
|
|
153
153
|
|
154
|
+
@app.task(bind=True, name="osism.tasks.netbox.get_addresses_by_device_and_interface")
|
155
|
+
def get_addresses_by_device_and_interface(self, device_name, interface_name):
|
156
|
+
return utils.nb.dcim.addresses.filter(device=device_name, interface=interface_name)
|
157
|
+
|
158
|
+
|
154
159
|
@app.task(bind=True, name="osism.tasks.netbox.manage")
|
155
160
|
def manage(self, *arguments, publish=True, locking=False, auto_release_time=3600):
|
156
161
|
netbox_manager_env = {
|
osism/tasks/reconciler.py
CHANGED
@@ -6,6 +6,7 @@ import subprocess
|
|
6
6
|
from celery import Celery
|
7
7
|
from loguru import logger
|
8
8
|
from pottery import Redlock
|
9
|
+
|
9
10
|
from osism import settings, utils
|
10
11
|
from osism.tasks import Config
|
11
12
|
|
@@ -15,9 +16,14 @@ app.config_from_object(Config)
|
|
15
16
|
|
16
17
|
@app.on_after_configure.connect
|
17
18
|
def setup_periodic_tasks(sender, **kwargs):
|
18
|
-
|
19
|
-
|
19
|
+
lock = Redlock(
|
20
|
+
key="lock_osism_tasks_reconciler_setup_periodic_tasks",
|
21
|
+
masters={utils.redis},
|
20
22
|
)
|
23
|
+
if settings.INVENTORY_RECONCILER_SCHEDULE > 0 and lock.acquire(timeout=10):
|
24
|
+
sender.add_periodic_task(
|
25
|
+
settings.INVENTORY_RECONCILER_SCHEDULE, run_on_change.s(), expires=10
|
26
|
+
)
|
21
27
|
|
22
28
|
|
23
29
|
@app.task(bind=True, name="osism.tasks.reconciler.run")
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: osism
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.20250530.0
|
4
4
|
Summary: OSISM manager interface
|
5
5
|
Home-page: https://github.com/osism/python-osism
|
6
6
|
Author: OSISM GmbH
|
@@ -28,15 +28,15 @@ Requires-Dist: Jinja2==3.1.6
|
|
28
28
|
Requires-Dist: PyYAML==6.0.2
|
29
29
|
Requires-Dist: ara==1.7.2
|
30
30
|
Requires-Dist: celery[redis]==5.5.2
|
31
|
-
Requires-Dist: cliff==4.
|
31
|
+
Requires-Dist: cliff==4.10.0
|
32
32
|
Requires-Dist: deepdiff==8.5.0
|
33
33
|
Requires-Dist: docker==7.1.0
|
34
34
|
Requires-Dist: dtrack-auditor==1.5.0
|
35
35
|
Requires-Dist: fastapi==0.115.12
|
36
36
|
Requires-Dist: flower==2.0.1
|
37
|
-
Requires-Dist: hiredis==3.
|
37
|
+
Requires-Dist: hiredis==3.2.1
|
38
38
|
Requires-Dist: jc==1.25.5
|
39
|
-
Requires-Dist: keystoneauth1==5.
|
39
|
+
Requires-Dist: keystoneauth1==5.11.0
|
40
40
|
Requires-Dist: kombu==5.5.3
|
41
41
|
Requires-Dist: kubernetes==32.0.1
|
42
42
|
Requires-Dist: loguru==0.7.3
|
@@ -45,20 +45,19 @@ Requires-Dist: netmiko==4.5.0
|
|
45
45
|
Requires-Dist: openstacksdk==4.5.0
|
46
46
|
Requires-Dist: pottery==3.0.1
|
47
47
|
Requires-Dist: prompt-toolkit==3.0.51
|
48
|
-
Requires-Dist:
|
49
|
-
Requires-Dist: pynetbox==7.4.1
|
48
|
+
Requires-Dist: pynetbox==7.5.0
|
50
49
|
Requires-Dist: pytest-testinfra==10.2.2
|
51
50
|
Requires-Dist: python-dateutil==2.9.0.post0
|
52
|
-
Requires-Dist: setuptools==80.
|
51
|
+
Requires-Dist: setuptools==80.9.0
|
53
52
|
Requires-Dist: sqlmodel==0.0.24
|
54
|
-
Requires-Dist: sushy==5.
|
53
|
+
Requires-Dist: sushy==5.6.0
|
55
54
|
Requires-Dist: tabulate==0.9.0
|
56
55
|
Requires-Dist: transitions==0.9.2
|
57
56
|
Requires-Dist: uvicorn[standard]==0.34.2
|
58
57
|
Requires-Dist: watchdog==6.0.0
|
59
58
|
Provides-Extra: ansible
|
60
59
|
Requires-Dist: ansible-runner==2.4.1; extra == "ansible"
|
61
|
-
Requires-Dist: ansible-core==2.18.
|
60
|
+
Requires-Dist: ansible-core==2.18.6; extra == "ansible"
|
62
61
|
Provides-Extra: openstack-image-manager
|
63
62
|
Requires-Dist: openstack-image-manager==0.20250508.0; extra == "openstack-image-manager"
|
64
63
|
Dynamic: author
|
@@ -1,20 +1,21 @@
|
|
1
1
|
osism/__init__.py,sha256=1UiNTBus0V0f2AbZQzAtVtu6zkfCCrw0OTq--NwFAqY,341
|
2
2
|
osism/__main__.py,sha256=ILe4gu61xEISiBsxanqTQIdSkV-YhpZXTRlguCYyssk,141
|
3
|
-
osism/api.py,sha256=
|
3
|
+
osism/api.py,sha256=t3HebSzk6fyY7bLJD9P95oEL1qWYXzpX6Yk1o_nVkMo,4356
|
4
4
|
osism/main.py,sha256=Dt2-9sLXcS-Ny4DAz7hrha-KRc7zd7BFUTRdfs_X8z4,893
|
5
|
-
osism/settings.py,sha256=
|
5
|
+
osism/settings.py,sha256=mkvbxVQ64ZD7Ypk-bRePHn0gZ5j6Lcu2a578eLU0gQs,1309
|
6
6
|
osism/actions/__init__.py,sha256=bG7Ffen4LvQtgnYPFEpFccsWs81t4zqqeqn9ZeirH6E,38
|
7
7
|
osism/commands/__init__.py,sha256=Ag4wX_DCgXRdoLn6t069jqb3DdRylsX2nyYkiyCx4uk,456
|
8
8
|
osism/commands/apply.py,sha256=mH3-NctgevVzP_1IW92FQeiYMCPB49K5hXbxmTY2vnA,16795
|
9
|
+
osism/commands/baremetal.py,sha256=EDLorDkt3iUblIZb-Fn6fonnMewVEOyrLBTlfxuQ2tc,8043
|
9
10
|
osism/commands/compose.py,sha256=iqzG7mS9E1VWaLNN6yQowjOqiHn3BMdj-yfXb3Dc4Ok,1200
|
10
|
-
osism/commands/compute.py,sha256=
|
11
|
+
osism/commands/compute.py,sha256=cgqXWJa5wAvn-7e3FWCgX6hie_aK0yrKRkcNzjLXwDY,25799
|
11
12
|
osism/commands/configuration.py,sha256=sPe8b0dVKFRbr30xoeVdAnHbGwCwgUh0xa_Vzv5pSQQ,954
|
12
13
|
osism/commands/console.py,sha256=8BPz1hio5Wi6kONVAWFuSqkDRrMcLEYeFIY8dbtN6e4,3218
|
13
14
|
osism/commands/container.py,sha256=Fku2GaCM3Idq_FxExUtNqjrEM0XYjpVvXmueSVO8S_c,1601
|
14
15
|
osism/commands/get.py,sha256=ryytjtXWmlMV0NucP5tGkMZu0nIlC4xVtjRk4iMZ06c,8967
|
15
16
|
osism/commands/log.py,sha256=2IpYuosC7FZwwLvM8HmKSU1NRNIelVVYzqjjVMCrOJk,4072
|
16
17
|
osism/commands/manage.py,sha256=WxUZEhylZj2IhydAe3BAr3S5ED6opG243skfSq5q41s,11971
|
17
|
-
osism/commands/netbox.py,sha256=
|
18
|
+
osism/commands/netbox.py,sha256=PagHlAwEyzKzhrqi4nikkD8sjnVqAzzbDt8PVkl2j9k,6651
|
18
19
|
osism/commands/noset.py,sha256=7zDFuFMyNpo7DUOKcNiYV8nodtdMOYFp5LDPcuJhlZ8,1481
|
19
20
|
osism/commands/reconciler.py,sha256=xOyPzQj66xwjdQd2ysCTHX2yBvmMVMppUDZTas6voXc,2882
|
20
21
|
osism/commands/server.py,sha256=avmoOv5rjOi-fN2A-27cPwOtiy2Q2j6UFtCh3QrfWAI,7512
|
@@ -36,20 +37,20 @@ osism/plugins/__init__.py,sha256=bG7Ffen4LvQtgnYPFEpFccsWs81t4zqqeqn9ZeirH6E,38
|
|
36
37
|
osism/services/__init__.py,sha256=bG7Ffen4LvQtgnYPFEpFccsWs81t4zqqeqn9ZeirH6E,38
|
37
38
|
osism/services/listener.py,sha256=eEamlQsJqCuU9K2QFmk3yM9LAJZEanVcTLtGMsNCKjs,9783
|
38
39
|
osism/tasks/__init__.py,sha256=ZEu_KYsapTYp0etr-rLqie_NT_LndHDDpx53xITru5Y,8691
|
39
|
-
osism/tasks/ansible.py,sha256=
|
40
|
+
osism/tasks/ansible.py,sha256=_2zrHwynwwEv9nDnX-LbNCzcwy9dTUGo_yyutt34HyQ,1346
|
40
41
|
osism/tasks/ceph.py,sha256=eIQkah3Kj4INtOkF9kTjHbXJ3_J2lg48EWJKfHc-UYw,615
|
41
|
-
osism/tasks/conductor.py,sha256=
|
42
|
+
osism/tasks/conductor.py,sha256=okLlmuSsJpmu6zQOR_dXX0LMQI1xFBlHRm76qB6txRk,20502
|
42
43
|
osism/tasks/kolla.py,sha256=wJQpWn_01iWLkr7l7T7RNrQGfRgsgmYi4WQlTmNGvew,618
|
43
44
|
osism/tasks/kubernetes.py,sha256=VzXq_VrYU_CLm4cOruqnE3Kq2ydfO9glZ3p0bp3OYoc,625
|
44
|
-
osism/tasks/netbox.py,sha256=
|
45
|
+
osism/tasks/netbox.py,sha256=Dq2hg2yiv_dHV-zygJgy9T1ZhTSE32_a34fhfURUfTA,5912
|
45
46
|
osism/tasks/openstack.py,sha256=g15tCll5vP1pC6ysxRCTZxplsdGmXbxaCH3k1Qdv5Xg,6367
|
46
|
-
osism/tasks/reconciler.py,sha256=
|
47
|
+
osism/tasks/reconciler.py,sha256=tnZEZZpveBCK4vHZkHE6wDcHfJAlsPcSjIVxB5ItSFM,1981
|
47
48
|
osism/utils/__init__.py,sha256=_Y4qchR5yyI_JKhBWd_jcsvDLYZjxO0c3iMA_VRQl58,4304
|
48
|
-
osism-0.
|
49
|
-
osism-0.
|
50
|
-
osism-0.
|
51
|
-
osism-0.
|
52
|
-
osism-0.
|
53
|
-
osism-0.
|
54
|
-
osism-0.
|
55
|
-
osism-0.
|
49
|
+
osism-0.20250530.0.dist-info/licenses/AUTHORS,sha256=EKFIR9F27AvoEXp1cA6FkGbjEOFt4Rcbipr5RJc7jSs,64
|
50
|
+
osism-0.20250530.0.dist-info/licenses/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
|
51
|
+
osism-0.20250530.0.dist-info/METADATA,sha256=p5ZH0PS7XC8iz2s-FFeGy10nd93NDinuBAS-YXCmPu4,2903
|
52
|
+
osism-0.20250530.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
53
|
+
osism-0.20250530.0.dist-info/entry_points.txt,sha256=cTxzUWJff6JKe1Jv-iMMKayIiC26S1_Fph3k5faiQvw,3252
|
54
|
+
osism-0.20250530.0.dist-info/pbr.json,sha256=3Gdc4g_dd2jHbkHZk28onO59wzLpSeDVrRWZwOrw3Wk,47
|
55
|
+
osism-0.20250530.0.dist-info/top_level.txt,sha256=8L8dsI9hcaGHsdnR4k_LN9EM78EhwrXRFHyAryPXZtY,6
|
56
|
+
osism-0.20250530.0.dist-info/RECORD,,
|
@@ -26,11 +26,15 @@ log ansible = osism.commands.log:Ansible
|
|
26
26
|
log container = osism.commands.log:Container
|
27
27
|
log file = osism.commands.log:File
|
28
28
|
log opensearch = osism.commands.log:Opensearch
|
29
|
+
manage baremetal deploy = osism.commands.baremetal:BaremetalDeploy
|
30
|
+
manage baremetal list = osism.commands.baremetal:BaremetalList
|
31
|
+
manage baremetal undeploy = osism.commands.baremetal:BaremetalUndeploy
|
29
32
|
manage compute disable = osism.commands.compute:ComputeDisable
|
30
33
|
manage compute enable = osism.commands.compute:ComputeEnable
|
31
34
|
manage compute evacuate = osism.commands.compute:ComputeEvacuate
|
32
35
|
manage compute list = osism.commands.compute:ComputeList
|
33
36
|
manage compute migrate = osism.commands.compute:ComputeMigrate
|
37
|
+
manage compute migration list = osism.commands.compute:ComputeMigrationList
|
34
38
|
manage compute start = osism.commands.compute:ComputeStart
|
35
39
|
manage compute stop = osism.commands.compute:ComputeStop
|
36
40
|
manage flavors = osism.commands.manage:Flavors
|
@@ -0,0 +1 @@
|
|
1
|
+
renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
|
@@ -0,0 +1 @@
|
|
1
|
+
{"git_version": "424407f", "is_release": false}
|
@@ -1 +0,0 @@
|
|
1
|
-
Christian Berendt <berendt@osism.tech>
|
@@ -1 +0,0 @@
|
|
1
|
-
{"git_version": "6bd0c0e", "is_release": false}
|
File without changes
|
File without changes
|