osism 0.20250709.0__py3-none-any.whl → 0.20250804.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- osism/commands/baremetal.py +202 -2
- osism/commands/compose.py +1 -1
- osism/commands/console.py +95 -7
- osism/commands/container.py +1 -1
- osism/commands/log.py +1 -1
- osism/commands/netbox.py +33 -10
- osism/commands/sonic.py +197 -2
- osism/tasks/__init__.py +43 -0
- osism/tasks/conductor/__init__.py +2 -2
- osism/tasks/conductor/config.py +3 -3
- osism/tasks/conductor/ironic.py +27 -5
- osism/tasks/conductor/sonic/interface.py +10 -0
- osism/tasks/conductor/utils.py +9 -2
- osism/utils/__init__.py +34 -1
- osism/utils/ssh.py +250 -0
- {osism-0.20250709.0.dist-info → osism-0.20250804.0.dist-info}/METADATA +5 -5
- {osism-0.20250709.0.dist-info → osism-0.20250804.0.dist-info}/RECORD +23 -22
- {osism-0.20250709.0.dist-info → osism-0.20250804.0.dist-info}/entry_points.txt +6 -0
- osism-0.20250804.0.dist-info/pbr.json +1 -0
- osism-0.20250709.0.dist-info/pbr.json +0 -1
- {osism-0.20250709.0.dist-info → osism-0.20250804.0.dist-info}/WHEEL +0 -0
- {osism-0.20250709.0.dist-info → osism-0.20250804.0.dist-info}/licenses/AUTHORS +0 -0
- {osism-0.20250709.0.dist-info → osism-0.20250804.0.dist-info}/licenses/LICENSE +0 -0
- {osism-0.20250709.0.dist-info → osism-0.20250804.0.dist-info}/top_level.txt +0 -0
osism/commands/baremetal.py
CHANGED
@@ -4,6 +4,8 @@ from cliff.command import Command
|
|
4
4
|
|
5
5
|
import tempfile
|
6
6
|
import os
|
7
|
+
import subprocess
|
8
|
+
import threading
|
7
9
|
from loguru import logger
|
8
10
|
import openstack
|
9
11
|
from tabulate import tabulate
|
@@ -13,6 +15,9 @@ from openstack.baremetal import configdrive as configdrive_builder
|
|
13
15
|
|
14
16
|
from osism.commands import get_cloud_connection
|
15
17
|
from osism import utils
|
18
|
+
from osism.tasks.conductor.netbox import get_nb_device_query_list_ironic
|
19
|
+
from osism.tasks import netbox
|
20
|
+
from osism.utils.ssh import cleanup_ssh_known_hosts_for_node
|
16
21
|
|
17
22
|
|
18
23
|
class BaremetalList(Command):
|
@@ -57,6 +62,8 @@ class BaremetalList(Command):
|
|
57
62
|
for b in baremetal
|
58
63
|
]
|
59
64
|
|
65
|
+
result.sort(key=lambda x: x[0])
|
66
|
+
|
60
67
|
print(
|
61
68
|
tabulate(
|
62
69
|
result,
|
@@ -206,7 +213,7 @@ class BaremetalDeploy(Command):
|
|
206
213
|
|
207
214
|
playbook = []
|
208
215
|
play = {
|
209
|
-
"name": "Run bootstrap
|
216
|
+
"name": "Run bootstrap",
|
210
217
|
"hosts": "localhost",
|
211
218
|
"connection": "local",
|
212
219
|
"gather_facts": True,
|
@@ -216,6 +223,15 @@ class BaremetalDeploy(Command):
|
|
216
223
|
"osism.commons.hosts",
|
217
224
|
"osism.commons.operator",
|
218
225
|
],
|
226
|
+
"tasks": [
|
227
|
+
{
|
228
|
+
"name": "Restart rsyslog service after hostname change",
|
229
|
+
"ansible.builtin.systemd": {
|
230
|
+
"name": "rsyslog",
|
231
|
+
"state": "restarted",
|
232
|
+
},
|
233
|
+
}
|
234
|
+
],
|
219
235
|
}
|
220
236
|
play["vars"].update(
|
221
237
|
{"hostname_name": node.name, "hosts_type": "template"}
|
@@ -321,9 +337,30 @@ class BaremetalUndeploy(Command):
|
|
321
337
|
if not node:
|
322
338
|
continue
|
323
339
|
|
324
|
-
if node.provision_state in [
|
340
|
+
if node.provision_state in [
|
341
|
+
"active",
|
342
|
+
"wait call-back",
|
343
|
+
"deploy failed",
|
344
|
+
"error",
|
345
|
+
]:
|
325
346
|
try:
|
326
347
|
node = conn.baremetal.set_node_provision_state(node.id, "undeploy")
|
348
|
+
logger.info(
|
349
|
+
f"Successfully initiated undeploy for node {node.name} ({node.id})"
|
350
|
+
)
|
351
|
+
|
352
|
+
# Clean up SSH known_hosts entries for the undeployed node
|
353
|
+
logger.info(f"Cleaning up SSH known_hosts entries for {node.name}")
|
354
|
+
result = cleanup_ssh_known_hosts_for_node(node.name)
|
355
|
+
if result:
|
356
|
+
logger.info(
|
357
|
+
f"SSH known_hosts cleanup completed successfully for {node.name}"
|
358
|
+
)
|
359
|
+
else:
|
360
|
+
logger.warning(
|
361
|
+
f"SSH known_hosts cleanup completed with warnings for {node.name}"
|
362
|
+
)
|
363
|
+
|
327
364
|
except Exception as exc:
|
328
365
|
logger.warning(
|
329
366
|
f"Node {node.name} ({node.id}) could not be moved to available state: {exc}"
|
@@ -333,3 +370,166 @@ class BaremetalUndeploy(Command):
|
|
333
370
|
logger.warning(
|
334
371
|
f"Node {node.name} ({node.id}) not in supported provision state"
|
335
372
|
)
|
373
|
+
|
374
|
+
|
375
|
+
class BaremetalPing(Command):
|
376
|
+
def get_parser(self, prog_name):
|
377
|
+
parser = super(BaremetalPing, self).get_parser(prog_name)
|
378
|
+
parser.add_argument(
|
379
|
+
"name",
|
380
|
+
nargs="?",
|
381
|
+
type=str,
|
382
|
+
help="Ping specific baremetal node by name",
|
383
|
+
)
|
384
|
+
return parser
|
385
|
+
|
386
|
+
def _ping_host(self, host, results, host_name):
|
387
|
+
"""Ping a host 3 times and store results."""
|
388
|
+
try:
|
389
|
+
result = subprocess.run(
|
390
|
+
["ping", "-c", "3", "-W", "5", host],
|
391
|
+
capture_output=True,
|
392
|
+
text=True,
|
393
|
+
timeout=20,
|
394
|
+
)
|
395
|
+
|
396
|
+
if result.returncode == 0:
|
397
|
+
output_lines = result.stdout.strip().split("\n")
|
398
|
+
stats_line = [line for line in output_lines if "packet loss" in line]
|
399
|
+
if stats_line:
|
400
|
+
loss_info = stats_line[0]
|
401
|
+
if "0% packet loss" in loss_info:
|
402
|
+
status = "SUCCESS"
|
403
|
+
else:
|
404
|
+
status = f"PARTIAL ({loss_info.split(',')[2].strip()})"
|
405
|
+
else:
|
406
|
+
status = "SUCCESS"
|
407
|
+
|
408
|
+
time_lines = [
|
409
|
+
line
|
410
|
+
for line in output_lines
|
411
|
+
if "round-trip" in line or "rtt" in line
|
412
|
+
]
|
413
|
+
if time_lines:
|
414
|
+
time_info = (
|
415
|
+
time_lines[0].split("=")[-1].strip()
|
416
|
+
if "=" in time_lines[0]
|
417
|
+
else "N/A"
|
418
|
+
)
|
419
|
+
else:
|
420
|
+
time_info = "N/A"
|
421
|
+
else:
|
422
|
+
status = "FAILED"
|
423
|
+
time_info = "N/A"
|
424
|
+
|
425
|
+
except (
|
426
|
+
subprocess.TimeoutExpired,
|
427
|
+
subprocess.CalledProcessError,
|
428
|
+
Exception,
|
429
|
+
) as e:
|
430
|
+
status = "ERROR"
|
431
|
+
time_info = str(e)[:50]
|
432
|
+
|
433
|
+
results[host_name] = {"host": host, "status": status, "time_info": time_info}
|
434
|
+
|
435
|
+
def take_action(self, parsed_args):
|
436
|
+
name = parsed_args.name
|
437
|
+
|
438
|
+
if not utils.nb:
|
439
|
+
logger.error("NetBox connection not available")
|
440
|
+
return
|
441
|
+
|
442
|
+
conn = get_cloud_connection()
|
443
|
+
|
444
|
+
try:
|
445
|
+
if name:
|
446
|
+
devices = [utils.nb.dcim.devices.get(name=name)]
|
447
|
+
if not devices[0]:
|
448
|
+
logger.error(f"Device {name} not found in NetBox")
|
449
|
+
return
|
450
|
+
else:
|
451
|
+
# Use the NETBOX_FILTER_CONDUCTOR_IRONIC setting to get devices
|
452
|
+
devices = set()
|
453
|
+
nb_device_query_list = get_nb_device_query_list_ironic()
|
454
|
+
for nb_device_query in nb_device_query_list:
|
455
|
+
devices |= set(netbox.get_devices(**nb_device_query))
|
456
|
+
devices = list(devices)
|
457
|
+
|
458
|
+
# Additionally filter by power state and provision state
|
459
|
+
filtered_devices = []
|
460
|
+
for device in devices:
|
461
|
+
if (
|
462
|
+
hasattr(device, "custom_fields")
|
463
|
+
and device.custom_fields
|
464
|
+
and device.custom_fields.get("power_state") == "power on"
|
465
|
+
and device.custom_fields.get("provision_state") == "active"
|
466
|
+
):
|
467
|
+
filtered_devices.append(device)
|
468
|
+
devices = filtered_devices
|
469
|
+
|
470
|
+
if not devices:
|
471
|
+
logger.info(
|
472
|
+
"No devices found matching criteria (managed-by-ironic, power on, active)"
|
473
|
+
)
|
474
|
+
return
|
475
|
+
|
476
|
+
ping_candidates = []
|
477
|
+
for device in devices:
|
478
|
+
if device.primary_ip4:
|
479
|
+
ip_address = str(device.primary_ip4.address).split("/")[0]
|
480
|
+
ping_candidates.append({"name": device.name, "ip": ip_address})
|
481
|
+
else:
|
482
|
+
logger.warning(f"Device {device.name} has no primary IPv4 address")
|
483
|
+
|
484
|
+
if not ping_candidates:
|
485
|
+
logger.info("No devices found with primary IPv4 addresses")
|
486
|
+
return
|
487
|
+
|
488
|
+
logger.info(f"Pinging {len(ping_candidates)} nodes (3 pings each)...")
|
489
|
+
|
490
|
+
results = {}
|
491
|
+
threads = []
|
492
|
+
|
493
|
+
for candidate in ping_candidates:
|
494
|
+
thread = threading.Thread(
|
495
|
+
target=self._ping_host,
|
496
|
+
args=(candidate["ip"], results, candidate["name"]),
|
497
|
+
)
|
498
|
+
threads.append(thread)
|
499
|
+
thread.start()
|
500
|
+
|
501
|
+
for thread in threads:
|
502
|
+
thread.join()
|
503
|
+
|
504
|
+
table_data = []
|
505
|
+
success_count = 0
|
506
|
+
failed_count = 0
|
507
|
+
|
508
|
+
for device_name in sorted(results.keys()):
|
509
|
+
result = results[device_name]
|
510
|
+
table_data.append(
|
511
|
+
[device_name, result["host"], result["status"], result["time_info"]]
|
512
|
+
)
|
513
|
+
|
514
|
+
if result["status"] == "SUCCESS":
|
515
|
+
success_count += 1
|
516
|
+
elif result["status"].startswith("PARTIAL"):
|
517
|
+
failed_count += 1
|
518
|
+
else:
|
519
|
+
failed_count += 1
|
520
|
+
|
521
|
+
print(
|
522
|
+
tabulate(
|
523
|
+
table_data,
|
524
|
+
headers=["Name", "IP Address", "Status", "Time Info"],
|
525
|
+
tablefmt="psql",
|
526
|
+
)
|
527
|
+
)
|
528
|
+
|
529
|
+
print(
|
530
|
+
f"\nSummary: {success_count} successful, {failed_count} failed/partial out of {len(ping_candidates)} total"
|
531
|
+
)
|
532
|
+
|
533
|
+
except Exception as e:
|
534
|
+
logger.error(f"Error during ping operation: {e}")
|
535
|
+
return
|
osism/commands/compose.py
CHANGED
@@ -26,7 +26,7 @@ class Run(Command):
|
|
26
26
|
ssh_command = (
|
27
27
|
f"docker compose --project-directory=/opt/{environment} {arguments}"
|
28
28
|
)
|
29
|
-
ssh_options = "-o StrictHostKeyChecking=no -o LogLevel=ERROR"
|
29
|
+
ssh_options = "-o StrictHostKeyChecking=no -o LogLevel=ERROR -o UserKnownHostsFile=/share/known_hosts"
|
30
30
|
|
31
31
|
# FIXME: use paramiko or something else more Pythonic + make operator user + key configurable
|
32
32
|
subprocess.call(
|
osism/commands/console.py
CHANGED
@@ -1,10 +1,94 @@
|
|
1
1
|
# SPDX-License-Identifier: Apache-2.0
|
2
2
|
|
3
|
+
import socket
|
3
4
|
import subprocess
|
5
|
+
from typing import Optional
|
4
6
|
|
5
7
|
from cliff.command import Command
|
8
|
+
from loguru import logger
|
6
9
|
from prompt_toolkit import prompt
|
7
10
|
|
11
|
+
from osism import utils
|
12
|
+
|
13
|
+
|
14
|
+
def resolve_hostname_to_ip(hostname: str) -> Optional[str]:
|
15
|
+
"""
|
16
|
+
Attempt to resolve hostname to IPv4 address using DNS.
|
17
|
+
|
18
|
+
Args:
|
19
|
+
hostname: The hostname to resolve
|
20
|
+
|
21
|
+
Returns:
|
22
|
+
IPv4 address string if successful, None if resolution fails
|
23
|
+
"""
|
24
|
+
try:
|
25
|
+
ip_address = socket.gethostbyname(hostname)
|
26
|
+
logger.debug(f"Resolved hostname {hostname} to {ip_address}")
|
27
|
+
return ip_address
|
28
|
+
except socket.gaierror as e:
|
29
|
+
logger.debug(f"DNS resolution failed for {hostname}: {e}")
|
30
|
+
return None
|
31
|
+
|
32
|
+
|
33
|
+
def get_primary_ipv4_from_netbox(hostname: str) -> Optional[str]:
|
34
|
+
"""
|
35
|
+
Retrieve primary IPv4 address for hostname from Netbox.
|
36
|
+
|
37
|
+
Args:
|
38
|
+
hostname: The hostname to look up in Netbox
|
39
|
+
|
40
|
+
Returns:
|
41
|
+
Primary IPv4 address string if found, None otherwise
|
42
|
+
"""
|
43
|
+
if not utils.nb:
|
44
|
+
logger.debug("Netbox integration not available")
|
45
|
+
return None
|
46
|
+
|
47
|
+
try:
|
48
|
+
device = utils.nb.dcim.devices.get(name=hostname)
|
49
|
+
if device and device.primary_ip4:
|
50
|
+
ip_address = str(device.primary_ip4.address).split("/")[0]
|
51
|
+
logger.info(f"Found primary IPv4 for {hostname} in Netbox: {ip_address}")
|
52
|
+
return ip_address
|
53
|
+
else:
|
54
|
+
logger.debug(f"No device or primary IPv4 found for {hostname} in Netbox")
|
55
|
+
return None
|
56
|
+
except Exception as e:
|
57
|
+
logger.warning(f"Error querying Netbox for {hostname}: {e}")
|
58
|
+
return None
|
59
|
+
|
60
|
+
|
61
|
+
def resolve_host_with_fallback(hostname: str) -> str:
|
62
|
+
"""
|
63
|
+
Resolve hostname with Netbox fallback.
|
64
|
+
|
65
|
+
First attempts DNS resolution. If that fails and Netbox integration is enabled,
|
66
|
+
attempts to retrieve the primary IPv4 address from Netbox.
|
67
|
+
|
68
|
+
Args:
|
69
|
+
hostname: The hostname to resolve
|
70
|
+
|
71
|
+
Returns:
|
72
|
+
Resolved IP address or original hostname if all resolution attempts fail
|
73
|
+
"""
|
74
|
+
# First try DNS resolution
|
75
|
+
ip_address = resolve_hostname_to_ip(hostname)
|
76
|
+
if ip_address:
|
77
|
+
return ip_address
|
78
|
+
|
79
|
+
# Fallback to Netbox if DNS resolution failed
|
80
|
+
logger.info(f"DNS resolution failed for {hostname}, trying Netbox fallback")
|
81
|
+
netbox_ip = get_primary_ipv4_from_netbox(hostname)
|
82
|
+
if netbox_ip:
|
83
|
+
logger.info(f"Using IPv4 address {netbox_ip} from Netbox for {hostname}")
|
84
|
+
return netbox_ip
|
85
|
+
|
86
|
+
# If both methods fail, return original hostname and let SSH handle the error
|
87
|
+
logger.warning(
|
88
|
+
f"Could not resolve {hostname} via DNS or Netbox, using original hostname"
|
89
|
+
)
|
90
|
+
return hostname
|
91
|
+
|
8
92
|
|
9
93
|
class Run(Command):
|
10
94
|
def get_parser(self, prog_name):
|
@@ -45,7 +129,7 @@ class Run(Command):
|
|
45
129
|
type_console = "clush"
|
46
130
|
host = host[1:]
|
47
131
|
|
48
|
-
ssh_options = "-o StrictHostKeyChecking=no -o LogLevel=ERROR"
|
132
|
+
ssh_options = "-o StrictHostKeyChecking=no -o LogLevel=ERROR -o UserKnownHostsFile=/share/known_hosts"
|
49
133
|
|
50
134
|
if type_console == "ansible":
|
51
135
|
subprocess.call(f"/run-ansible-console.sh {host}", shell=True)
|
@@ -55,9 +139,11 @@ class Run(Command):
|
|
55
139
|
shell=True,
|
56
140
|
)
|
57
141
|
elif type_console == "ssh":
|
142
|
+
# Resolve hostname with Netbox fallback
|
143
|
+
resolved_host = resolve_host_with_fallback(host)
|
58
144
|
# FIXME: use paramiko or something else more Pythonic + make operator user + key configurable
|
59
145
|
subprocess.call(
|
60
|
-
f"/usr/bin/ssh -i /ansible/secrets/id_rsa.operator {ssh_options} dragon@{
|
146
|
+
f"/usr/bin/ssh -i /ansible/secrets/id_rsa.operator {ssh_options} dragon@{resolved_host}",
|
61
147
|
shell=True,
|
62
148
|
)
|
63
149
|
elif type_console == "container_prompt":
|
@@ -67,9 +153,11 @@ class Run(Command):
|
|
67
153
|
break
|
68
154
|
|
69
155
|
ssh_command = f"docker {command}"
|
156
|
+
# Resolve hostname with Netbox fallback
|
157
|
+
resolved_host = resolve_host_with_fallback(host[:-1])
|
70
158
|
# FIXME: use paramiko or something else more Pythonic + make operator user + key configurable
|
71
159
|
subprocess.call(
|
72
|
-
f"/usr/bin/ssh -i /ansible/secrets/id_rsa.operator {ssh_options} dragon@{
|
160
|
+
f"/usr/bin/ssh -i /ansible/secrets/id_rsa.operator {ssh_options} dragon@{resolved_host} {ssh_command}",
|
73
161
|
shell=True,
|
74
162
|
)
|
75
163
|
elif type_console == "container":
|
@@ -78,12 +166,12 @@ class Run(Command):
|
|
78
166
|
target_command = "bash"
|
79
167
|
|
80
168
|
ssh_command = f"docker exec -it {target_containername} {target_command}"
|
81
|
-
ssh_options =
|
82
|
-
"-o RequestTTY=force -o StrictHostKeyChecking=no -o LogLevel=ERROR"
|
83
|
-
)
|
169
|
+
ssh_options = "-o RequestTTY=force -o StrictHostKeyChecking=no -o LogLevel=ERROR -o UserKnownHostsFile=/share/known_hosts"
|
84
170
|
|
171
|
+
# Resolve hostname with Netbox fallback
|
172
|
+
resolved_target_host = resolve_host_with_fallback(target_host)
|
85
173
|
# FIXME: use paramiko or something else more Pythonic + make operator user + key configurable
|
86
174
|
subprocess.call(
|
87
|
-
f"/usr/bin/ssh -i /ansible/secrets/id_rsa.operator {ssh_options} dragon@{
|
175
|
+
f"/usr/bin/ssh -i /ansible/secrets/id_rsa.operator {ssh_options} dragon@{resolved_target_host} {ssh_command}",
|
88
176
|
shell=True,
|
89
177
|
)
|
osism/commands/container.py
CHANGED
@@ -23,7 +23,7 @@ class Run(Command):
|
|
23
23
|
host = parsed_args.host[0]
|
24
24
|
command = " ".join(parsed_args.command)
|
25
25
|
|
26
|
-
ssh_options = "-o StrictHostKeyChecking=no -o LogLevel=ERROR"
|
26
|
+
ssh_options = "-o StrictHostKeyChecking=no -o LogLevel=ERROR -o UserKnownHostsFile=/share/known_hosts"
|
27
27
|
|
28
28
|
if not command:
|
29
29
|
while True:
|
osism/commands/log.py
CHANGED
@@ -52,7 +52,7 @@ class Container(Command):
|
|
52
52
|
parameters = " ".join(parsed_args.parameter)
|
53
53
|
|
54
54
|
ssh_command = f"docker logs {parameters} {container_name}"
|
55
|
-
ssh_options = "-o StrictHostKeyChecking=no -o LogLevel=ERROR"
|
55
|
+
ssh_options = "-o StrictHostKeyChecking=no -o LogLevel=ERROR -o UserKnownHostsFile=/share/known_hosts"
|
56
56
|
|
57
57
|
# FIXME: use paramiko or something else more Pythonic + make operator user + key configurable
|
58
58
|
subprocess.call(
|
osism/commands/netbox.py
CHANGED
@@ -14,6 +14,11 @@ from osism import utils
|
|
14
14
|
class Ironic(Command):
|
15
15
|
def get_parser(self, prog_name):
|
16
16
|
parser = super(Ironic, self).get_parser(prog_name)
|
17
|
+
parser.add_argument(
|
18
|
+
"node",
|
19
|
+
nargs="?",
|
20
|
+
help="Optional node name to sync only a specific node",
|
21
|
+
)
|
17
22
|
parser.add_argument(
|
18
23
|
"--no-wait",
|
19
24
|
help="Do not wait until the sync has been completed",
|
@@ -35,22 +40,40 @@ class Ironic(Command):
|
|
35
40
|
def take_action(self, parsed_args):
|
36
41
|
wait = not parsed_args.no_wait
|
37
42
|
task_timeout = parsed_args.task_timeout
|
43
|
+
node_name = parsed_args.node
|
38
44
|
|
39
|
-
task = conductor.sync_ironic.delay(
|
45
|
+
task = conductor.sync_ironic.delay(
|
46
|
+
node_name=node_name, force_update=parsed_args.force_update
|
47
|
+
)
|
40
48
|
if wait:
|
41
|
-
|
42
|
-
|
43
|
-
|
49
|
+
if node_name:
|
50
|
+
logger.info(
|
51
|
+
f"Task {task.task_id} (sync ironic for node {node_name}) is running in background. Output comming soon."
|
52
|
+
)
|
53
|
+
else:
|
54
|
+
logger.info(
|
55
|
+
f"Task {task.task_id} (sync ironic) is running in background. Output comming soon."
|
56
|
+
)
|
44
57
|
try:
|
45
58
|
return utils.fetch_task_output(task.id, timeout=task_timeout)
|
46
59
|
except TimeoutError:
|
47
|
-
|
48
|
-
|
49
|
-
|
60
|
+
if node_name:
|
61
|
+
logger.error(
|
62
|
+
f"Timeout while waiting for further output of task {task.task_id} (sync ironic for node {node_name})"
|
63
|
+
)
|
64
|
+
else:
|
65
|
+
logger.error(
|
66
|
+
f"Timeout while waiting for further output of task {task.task_id} (sync ironic)"
|
67
|
+
)
|
50
68
|
else:
|
51
|
-
|
52
|
-
|
53
|
-
|
69
|
+
if node_name:
|
70
|
+
logger.info(
|
71
|
+
f"Task {task.task_id} (sync ironic for node {node_name}) is running in background. No more output."
|
72
|
+
)
|
73
|
+
else:
|
74
|
+
logger.info(
|
75
|
+
f"Task {task.task_id} (sync ironic) is running in background. No more output."
|
76
|
+
)
|
54
77
|
|
55
78
|
|
56
79
|
class Sync(Command):
|