osism 0.20250709.0__py3-none-any.whl → 0.20250804.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- osism/commands/baremetal.py +202 -2
- osism/commands/compose.py +1 -1
- osism/commands/console.py +95 -7
- osism/commands/container.py +1 -1
- osism/commands/log.py +1 -1
- osism/commands/netbox.py +33 -10
- osism/commands/sonic.py +197 -2
- osism/tasks/__init__.py +43 -0
- osism/tasks/conductor/__init__.py +2 -2
- osism/tasks/conductor/config.py +3 -3
- osism/tasks/conductor/ironic.py +27 -5
- osism/tasks/conductor/sonic/interface.py +10 -0
- osism/tasks/conductor/utils.py +9 -2
- osism/utils/__init__.py +34 -1
- osism/utils/ssh.py +250 -0
- {osism-0.20250709.0.dist-info → osism-0.20250804.0.dist-info}/METADATA +5 -5
- {osism-0.20250709.0.dist-info → osism-0.20250804.0.dist-info}/RECORD +23 -22
- {osism-0.20250709.0.dist-info → osism-0.20250804.0.dist-info}/entry_points.txt +6 -0
- osism-0.20250804.0.dist-info/pbr.json +1 -0
- osism-0.20250709.0.dist-info/pbr.json +0 -1
- {osism-0.20250709.0.dist-info → osism-0.20250804.0.dist-info}/WHEEL +0 -0
- {osism-0.20250709.0.dist-info → osism-0.20250804.0.dist-info}/licenses/AUTHORS +0 -0
- {osism-0.20250709.0.dist-info → osism-0.20250804.0.dist-info}/licenses/LICENSE +0 -0
- {osism-0.20250709.0.dist-info → osism-0.20250804.0.dist-info}/top_level.txt +0 -0
osism/commands/sonic.py
CHANGED
@@ -9,9 +9,16 @@ from cliff.command import Command
|
|
9
9
|
from loguru import logger
|
10
10
|
import paramiko
|
11
11
|
from prompt_toolkit import prompt
|
12
|
+
from tabulate import tabulate
|
12
13
|
|
13
14
|
from osism import utils
|
14
15
|
from osism.tasks import netbox
|
16
|
+
from osism.tasks.conductor.netbox import (
|
17
|
+
get_nb_device_query_list_sonic,
|
18
|
+
get_device_oob_ip,
|
19
|
+
)
|
20
|
+
from osism.tasks.conductor.sonic.constants import DEFAULT_SONIC_ROLES, SUPPORTED_HWSKUS
|
21
|
+
from osism.utils.ssh import cleanup_ssh_known_hosts_for_node
|
15
22
|
|
16
23
|
# Suppress paramiko logging messages globally
|
17
24
|
logging.getLogger("paramiko").setLevel(logging.ERROR)
|
@@ -41,7 +48,15 @@ class SonicCommandBase(Command):
|
|
41
48
|
if not hasattr(device, "local_context_data") or not device.local_context_data:
|
42
49
|
logger.error(f"Device {hostname} has no local_context_data in NetBox")
|
43
50
|
return None
|
44
|
-
|
51
|
+
|
52
|
+
# Filter out keys that start with underscore
|
53
|
+
filtered_context = {
|
54
|
+
key: value
|
55
|
+
for key, value in device.local_context_data.items()
|
56
|
+
if not key.startswith("_")
|
57
|
+
}
|
58
|
+
|
59
|
+
return filtered_context
|
45
60
|
|
46
61
|
def _save_config_context(self, config_context, hostname, today):
|
47
62
|
"""Save config context to local file"""
|
@@ -94,6 +109,13 @@ class SonicCommandBase(Command):
|
|
94
109
|
return None
|
95
110
|
|
96
111
|
ssh = paramiko.SSHClient()
|
112
|
+
# Load system host keys from centralized known_hosts file
|
113
|
+
try:
|
114
|
+
ssh.load_host_keys("/share/known_hosts")
|
115
|
+
except FileNotFoundError:
|
116
|
+
logger.debug(
|
117
|
+
"Centralized known_hosts file not found, creating empty host key store"
|
118
|
+
)
|
97
119
|
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
98
120
|
|
99
121
|
try:
|
@@ -796,6 +818,14 @@ class Reset(SonicCommandBase):
|
|
796
818
|
logger.info("- Switch will need to be reinstalled after reset")
|
797
819
|
logger.info("- Connection will be terminated by the reboot")
|
798
820
|
|
821
|
+
# Clean up SSH known_hosts entries for the reset node
|
822
|
+
logger.info(f"Cleaning up SSH known_hosts entries for {hostname}")
|
823
|
+
result = cleanup_ssh_known_hosts_for_node(hostname)
|
824
|
+
if result:
|
825
|
+
logger.info("- SSH known_hosts cleanup completed successfully")
|
826
|
+
else:
|
827
|
+
logger.warning("- SSH known_hosts cleanup completed with warnings")
|
828
|
+
|
799
829
|
# Set provision_state to 'ztp' in NetBox
|
800
830
|
logger.info("Setting provision_state to 'ztp' in NetBox")
|
801
831
|
netbox.set_provision_state.delay(hostname, "ztp")
|
@@ -953,7 +983,7 @@ class Console(SonicCommandBase):
|
|
953
983
|
logger.info(f"Connecting to {hostname} ({ssh_host}) via SSH console")
|
954
984
|
|
955
985
|
# Execute SSH command using os.system to provide interactive terminal
|
956
|
-
ssh_command = f"ssh -i {ssh_key_path} -o StrictHostKeyChecking=no {ssh_username}@{ssh_host}"
|
986
|
+
ssh_command = f"ssh -i {ssh_key_path} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/share/known_hosts {ssh_username}@{ssh_host}"
|
957
987
|
|
958
988
|
logger.info("Starting SSH session...")
|
959
989
|
logger.info("To exit the console, type 'exit' or press Ctrl+D")
|
@@ -971,3 +1001,168 @@ class Console(SonicCommandBase):
|
|
971
1001
|
except Exception as e:
|
972
1002
|
logger.error(f"Error connecting to SONiC device {hostname}: {e}")
|
973
1003
|
return 1
|
1004
|
+
|
1005
|
+
|
1006
|
+
class List(Command):
|
1007
|
+
"""List SONiC switches with their details"""
|
1008
|
+
|
1009
|
+
def get_parser(self, prog_name):
|
1010
|
+
parser = super(List, self).get_parser(prog_name)
|
1011
|
+
parser.add_argument(
|
1012
|
+
"device",
|
1013
|
+
nargs="?",
|
1014
|
+
type=str,
|
1015
|
+
help="Optional device name to filter by (same as sonic sync parameter)",
|
1016
|
+
)
|
1017
|
+
return parser
|
1018
|
+
|
1019
|
+
def take_action(self, parsed_args):
|
1020
|
+
device_name = parsed_args.device
|
1021
|
+
|
1022
|
+
try:
|
1023
|
+
devices = []
|
1024
|
+
|
1025
|
+
if device_name:
|
1026
|
+
# When specific device is requested, fetch it directly
|
1027
|
+
try:
|
1028
|
+
device = utils.nb.dcim.devices.get(name=device_name)
|
1029
|
+
if device:
|
1030
|
+
# Check if device role matches allowed roles
|
1031
|
+
if device.role and device.role.slug in DEFAULT_SONIC_ROLES:
|
1032
|
+
devices.append(device)
|
1033
|
+
logger.debug(
|
1034
|
+
f"Found device: {device.name} with role: {device.role.slug}"
|
1035
|
+
)
|
1036
|
+
else:
|
1037
|
+
logger.warning(
|
1038
|
+
f"Device {device_name} has role '{device.role.slug if device.role else 'None'}' "
|
1039
|
+
f"which is not in allowed SONiC roles: {', '.join(DEFAULT_SONIC_ROLES)}"
|
1040
|
+
)
|
1041
|
+
return 1
|
1042
|
+
else:
|
1043
|
+
logger.error(f"Device {device_name} not found in NetBox")
|
1044
|
+
return 1
|
1045
|
+
except Exception as e:
|
1046
|
+
logger.error(f"Error fetching device {device_name}: {e}")
|
1047
|
+
return 1
|
1048
|
+
else:
|
1049
|
+
# Get device query list from NETBOX_FILTER_CONDUCTOR_SONIC
|
1050
|
+
nb_device_query_list = get_nb_device_query_list_sonic()
|
1051
|
+
|
1052
|
+
for nb_device_query in nb_device_query_list:
|
1053
|
+
# Query devices with the NETBOX_FILTER_CONDUCTOR_SONIC criteria
|
1054
|
+
for device in utils.nb.dcim.devices.filter(**nb_device_query):
|
1055
|
+
# Check if device role matches allowed roles
|
1056
|
+
if device.role and device.role.slug in DEFAULT_SONIC_ROLES:
|
1057
|
+
devices.append(device)
|
1058
|
+
logger.debug(
|
1059
|
+
f"Found device: {device.name} with role: {device.role.slug}"
|
1060
|
+
)
|
1061
|
+
|
1062
|
+
logger.info(f"Found {len(devices)} devices matching criteria")
|
1063
|
+
|
1064
|
+
# Prepare table data
|
1065
|
+
table_data = []
|
1066
|
+
headers = [
|
1067
|
+
"Name",
|
1068
|
+
"OOB IP",
|
1069
|
+
"Primary IP",
|
1070
|
+
"HWSKU",
|
1071
|
+
"Version",
|
1072
|
+
"Provision State",
|
1073
|
+
]
|
1074
|
+
|
1075
|
+
for device in devices:
|
1076
|
+
# Get device name
|
1077
|
+
device_name = device.name
|
1078
|
+
|
1079
|
+
# Get OOB IP address
|
1080
|
+
oob_ip = "N/A"
|
1081
|
+
try:
|
1082
|
+
oob_result = get_device_oob_ip(device)
|
1083
|
+
if oob_result:
|
1084
|
+
oob_ip = oob_result[0] # Get just the IP address
|
1085
|
+
except Exception as e:
|
1086
|
+
logger.debug(f"Could not get OOB IP for {device_name}: {e}")
|
1087
|
+
|
1088
|
+
# Get primary IP address
|
1089
|
+
primary_ip = "N/A"
|
1090
|
+
try:
|
1091
|
+
if device.primary_ip4:
|
1092
|
+
# Extract IP address from CIDR notation
|
1093
|
+
primary_ip = str(device.primary_ip4).split("/")[0]
|
1094
|
+
elif device.primary_ip6:
|
1095
|
+
# Extract IP address from CIDR notation
|
1096
|
+
primary_ip = str(device.primary_ip6).split("/")[0]
|
1097
|
+
except Exception as e:
|
1098
|
+
logger.debug(f"Could not get primary IP for {device_name}: {e}")
|
1099
|
+
|
1100
|
+
# Get HWSKU and Version from sonic_parameters custom field
|
1101
|
+
hwsku = "N/A"
|
1102
|
+
version = "N/A"
|
1103
|
+
try:
|
1104
|
+
if (
|
1105
|
+
hasattr(device, "custom_fields")
|
1106
|
+
and "sonic_parameters" in device.custom_fields
|
1107
|
+
and device.custom_fields["sonic_parameters"]
|
1108
|
+
and isinstance(device.custom_fields["sonic_parameters"], dict)
|
1109
|
+
):
|
1110
|
+
sonic_params = device.custom_fields["sonic_parameters"]
|
1111
|
+
if "hwsku" in sonic_params and sonic_params["hwsku"]:
|
1112
|
+
hwsku = sonic_params["hwsku"]
|
1113
|
+
if "version" in sonic_params and sonic_params["version"]:
|
1114
|
+
version = sonic_params["version"]
|
1115
|
+
except Exception as e:
|
1116
|
+
logger.debug(
|
1117
|
+
f"Could not extract sonic_parameters for {device_name}: {e}"
|
1118
|
+
)
|
1119
|
+
|
1120
|
+
# Determine provision state
|
1121
|
+
provision_state = "Unknown"
|
1122
|
+
try:
|
1123
|
+
if hwsku == "N/A":
|
1124
|
+
provision_state = "No HWSKU"
|
1125
|
+
elif hwsku not in SUPPORTED_HWSKUS:
|
1126
|
+
provision_state = "Unsupported HWSKU"
|
1127
|
+
else:
|
1128
|
+
# Use device status to determine provision state
|
1129
|
+
if device.status:
|
1130
|
+
status_value = (
|
1131
|
+
device.status.value
|
1132
|
+
if hasattr(device.status, "value")
|
1133
|
+
else str(device.status)
|
1134
|
+
)
|
1135
|
+
if status_value == "active":
|
1136
|
+
provision_state = "Provisioned"
|
1137
|
+
elif status_value == "staged":
|
1138
|
+
provision_state = "Staged"
|
1139
|
+
elif status_value == "planned":
|
1140
|
+
provision_state = "Planned"
|
1141
|
+
else:
|
1142
|
+
provision_state = status_value.title()
|
1143
|
+
else:
|
1144
|
+
provision_state = "No Status"
|
1145
|
+
except Exception as e:
|
1146
|
+
logger.debug(
|
1147
|
+
f"Could not determine provision state for {device_name}: {e}"
|
1148
|
+
)
|
1149
|
+
|
1150
|
+
table_data.append(
|
1151
|
+
[device_name, oob_ip, primary_ip, hwsku, version, provision_state]
|
1152
|
+
)
|
1153
|
+
|
1154
|
+
# Sort by device name
|
1155
|
+
table_data.sort(key=lambda x: x[0])
|
1156
|
+
|
1157
|
+
# Print the table
|
1158
|
+
if table_data:
|
1159
|
+
print(tabulate(table_data, headers=headers, tablefmt="psql"))
|
1160
|
+
print(f"\nTotal: {len(table_data)} devices")
|
1161
|
+
else:
|
1162
|
+
print("No SONiC devices found matching the criteria")
|
1163
|
+
|
1164
|
+
return 0
|
1165
|
+
|
1166
|
+
except Exception as e:
|
1167
|
+
logger.error(f"Error listing SONiC devices: {e}")
|
1168
|
+
return 1
|
osism/tasks/__init__.py
CHANGED
@@ -238,6 +238,49 @@ def handle_task(t, wait=True, format="log", timeout=3600):
|
|
238
238
|
f"osism wait --output --live --delay 2 {t.task_id}"
|
239
239
|
)
|
240
240
|
return 1
|
241
|
+
except KeyboardInterrupt:
|
242
|
+
logger.info(f"\nTask {t.task_id} interrupted by user (CTRL+C)")
|
243
|
+
|
244
|
+
# Prompt user for task revocation in interactive mode using prompt-toolkit
|
245
|
+
try:
|
246
|
+
from prompt_toolkit import prompt
|
247
|
+
|
248
|
+
# Use prompt-toolkit for better UX with yes/no options and default
|
249
|
+
response = (
|
250
|
+
prompt(
|
251
|
+
"Do you want to revoke the running task? [y/N]: ", default="n"
|
252
|
+
)
|
253
|
+
.strip()
|
254
|
+
.lower()
|
255
|
+
)
|
256
|
+
|
257
|
+
if response in ["y", "yes"]:
|
258
|
+
logger.info(f"Revoking task {t.task_id}...")
|
259
|
+
if utils.revoke_task(t.task_id):
|
260
|
+
logger.info(f"Task {t.task_id} has been revoked")
|
261
|
+
else:
|
262
|
+
logger.error(f"Failed to revoke task {t.task_id}")
|
263
|
+
else:
|
264
|
+
logger.info(f"Task {t.task_id} continues running in background")
|
265
|
+
logger.info(
|
266
|
+
"Use this command to continue waiting for this task: "
|
267
|
+
f"osism wait --output --live --delay 2 {t.task_id}"
|
268
|
+
)
|
269
|
+
except KeyboardInterrupt:
|
270
|
+
# Handle second CTRL+C during prompt
|
271
|
+
logger.info(f"\nTask {t.task_id} continues running in background")
|
272
|
+
logger.info(
|
273
|
+
"Use this command to continue waiting for this task: "
|
274
|
+
f"osism wait --output --live --delay 2 {t.task_id}"
|
275
|
+
)
|
276
|
+
except EOFError:
|
277
|
+
# Handle EOF (e.g., when input is not available)
|
278
|
+
logger.info(f"Task {t.task_id} continues running in background")
|
279
|
+
logger.info(
|
280
|
+
"Use this command to continue waiting for this task: "
|
281
|
+
f"osism wait --output --live --delay 2 {t.task_id}"
|
282
|
+
)
|
283
|
+
return 1
|
241
284
|
|
242
285
|
else:
|
243
286
|
if format == "log":
|
@@ -44,8 +44,8 @@ def sync_netbox(self, force_update=False):
|
|
44
44
|
|
45
45
|
|
46
46
|
@app.task(bind=True, name="osism.tasks.conductor.sync_ironic")
|
47
|
-
def sync_ironic(self, force_update=False):
|
48
|
-
_sync_ironic(self.request.id, get_ironic_parameters, force_update)
|
47
|
+
def sync_ironic(self, node_name=None, force_update=False):
|
48
|
+
_sync_ironic(self.request.id, get_ironic_parameters, node_name, force_update)
|
49
49
|
|
50
50
|
|
51
51
|
@app.task(bind=True, name="osism.tasks.conductor.sync_sonic")
|
osism/tasks/conductor/config.py
CHANGED
@@ -30,7 +30,7 @@ def get_configuration():
|
|
30
30
|
"image_source"
|
31
31
|
]
|
32
32
|
if not validators.uuid(image_source) and not validators.url(
|
33
|
-
image_source
|
33
|
+
image_source, simple_host=True
|
34
34
|
):
|
35
35
|
result = openstack.image_get(image_source)
|
36
36
|
if result:
|
@@ -46,7 +46,7 @@ def get_configuration():
|
|
46
46
|
"deploy_kernel"
|
47
47
|
]
|
48
48
|
if not validators.uuid(deploy_kernel) and not validators.url(
|
49
|
-
deploy_kernel
|
49
|
+
deploy_kernel, simple_host=True
|
50
50
|
):
|
51
51
|
result = openstack.image_get(deploy_kernel)
|
52
52
|
if result:
|
@@ -63,7 +63,7 @@ def get_configuration():
|
|
63
63
|
"deploy_ramdisk"
|
64
64
|
]
|
65
65
|
if not validators.uuid(deploy_ramdisk) and not validators.url(
|
66
|
-
deploy_ramdisk
|
66
|
+
deploy_ramdisk, simple_host=True
|
67
67
|
):
|
68
68
|
result = openstack.image_get(deploy_ramdisk)
|
69
69
|
if result:
|
osism/tasks/conductor/ironic.py
CHANGED
@@ -127,19 +127,41 @@ def _prepare_node_attributes(device, get_ironic_parameters):
|
|
127
127
|
return node_attributes
|
128
128
|
|
129
129
|
|
130
|
-
def sync_ironic(request_id, get_ironic_parameters, force_update=False):
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
130
|
+
def sync_ironic(request_id, get_ironic_parameters, node_name=None, force_update=False):
|
131
|
+
if node_name:
|
132
|
+
osism_utils.push_task_output(
|
133
|
+
request_id,
|
134
|
+
f"Starting NetBox device synchronisation with ironic for node {node_name}\n",
|
135
|
+
)
|
136
|
+
else:
|
137
|
+
osism_utils.push_task_output(
|
138
|
+
request_id,
|
139
|
+
"Starting NetBox device synchronisation with ironic\n",
|
140
|
+
)
|
135
141
|
devices = set()
|
136
142
|
nb_device_query_list = get_nb_device_query_list_ironic()
|
137
143
|
for nb_device_query in nb_device_query_list:
|
138
144
|
devices |= set(netbox.get_devices(**nb_device_query))
|
139
145
|
|
146
|
+
# Filter devices by node_name if specified
|
147
|
+
if node_name:
|
148
|
+
devices = {dev for dev in devices if dev.name == node_name}
|
149
|
+
if not devices:
|
150
|
+
osism_utils.push_task_output(
|
151
|
+
request_id,
|
152
|
+
f"Node {node_name} not found in NetBox\n",
|
153
|
+
)
|
154
|
+
osism_utils.finish_task_output(request_id, rc=1)
|
155
|
+
return
|
156
|
+
|
140
157
|
# NOTE: Find nodes in Ironic which are no longer present in NetBox and remove them
|
141
158
|
device_names = {dev.name for dev in devices}
|
142
159
|
nodes = openstack.baremetal_node_list()
|
160
|
+
|
161
|
+
# Filter nodes by node_name if specified
|
162
|
+
if node_name:
|
163
|
+
nodes = [node for node in nodes if node["Name"] == node_name]
|
164
|
+
|
143
165
|
for node in nodes:
|
144
166
|
osism_utils.push_task_output(
|
145
167
|
request_id, f"Looking for {node['Name']} in NetBox\n"
|
@@ -419,6 +419,16 @@ def _extract_port_number_from_alias(alias):
|
|
419
419
|
if not alias:
|
420
420
|
return None
|
421
421
|
|
422
|
+
# Try to extract number from Eth54(Port54) format first
|
423
|
+
paren_match = re.search(r"Eth(\d+)\(Port(\d+)\)", alias)
|
424
|
+
if paren_match:
|
425
|
+
port_number = int(paren_match.group(1))
|
426
|
+
logger.debug(
|
427
|
+
f"Extracted port number {port_number} from Eth(Port) alias '{alias}'"
|
428
|
+
)
|
429
|
+
return port_number
|
430
|
+
|
431
|
+
# Fallback to number at end of alias
|
422
432
|
match = re.search(r"(\d+)$", alias)
|
423
433
|
if match:
|
424
434
|
port_number = int(match.group(1))
|
osism/tasks/conductor/utils.py
CHANGED
@@ -75,8 +75,15 @@ def get_vault():
|
|
75
75
|
)
|
76
76
|
]
|
77
77
|
)
|
78
|
-
except
|
79
|
-
|
78
|
+
except ValueError as exc:
|
79
|
+
# Handle specific vault password configuration errors
|
80
|
+
logger.error(f"Vault password configuration error: {exc}")
|
81
|
+
logger.error("Please check your vault password setup in Redis")
|
82
|
+
vault = VaultLib()
|
83
|
+
except Exception as exc:
|
84
|
+
# Handle other errors (file access, decryption, etc.)
|
85
|
+
logger.error(f"Unable to get vault secret: {exc}")
|
86
|
+
logger.error("Dropping encrypted entries")
|
80
87
|
vault = VaultLib()
|
81
88
|
return vault
|
82
89
|
|
osism/utils/__init__.py
CHANGED
@@ -109,8 +109,18 @@ def get_ansible_vault_password():
|
|
109
109
|
f = Fernet(key)
|
110
110
|
|
111
111
|
encrypted_ansible_vault_password = redis.get("ansible_vault_password")
|
112
|
+
if encrypted_ansible_vault_password is None:
|
113
|
+
raise ValueError("Ansible vault password is not set in Redis")
|
114
|
+
|
112
115
|
ansible_vault_password = f.decrypt(encrypted_ansible_vault_password)
|
113
|
-
|
116
|
+
password = ansible_vault_password.decode("utf-8")
|
117
|
+
|
118
|
+
if not password or password.strip() == "":
|
119
|
+
raise ValueError(
|
120
|
+
"Ansible vault password is empty or contains only whitespace"
|
121
|
+
)
|
122
|
+
|
123
|
+
return password
|
114
124
|
except Exception as exc:
|
115
125
|
logger.error("Unable to get ansible vault password")
|
116
126
|
raise exc
|
@@ -185,6 +195,29 @@ def finish_task_output(task_id, rc=None):
|
|
185
195
|
redis.xadd(task_id, {"type": "action", "content": "quit"})
|
186
196
|
|
187
197
|
|
198
|
+
def revoke_task(task_id):
|
199
|
+
"""
|
200
|
+
Revoke a running Celery task.
|
201
|
+
|
202
|
+
Args:
|
203
|
+
task_id (str): The ID of the task to revoke
|
204
|
+
|
205
|
+
Returns:
|
206
|
+
bool: True if revocation was successful, False otherwise
|
207
|
+
"""
|
208
|
+
try:
|
209
|
+
from celery import Celery
|
210
|
+
from osism.tasks import Config
|
211
|
+
|
212
|
+
app = Celery("task")
|
213
|
+
app.config_from_object(Config)
|
214
|
+
app.control.revoke(task_id, terminate=True)
|
215
|
+
return True
|
216
|
+
except Exception as e:
|
217
|
+
logger.error(f"Failed to revoke task {task_id}: {e}")
|
218
|
+
return False
|
219
|
+
|
220
|
+
|
188
221
|
def create_redlock(key, auto_release_time=3600):
|
189
222
|
"""
|
190
223
|
Create a Redlock instance with output suppression during initialization.
|