redis 6.3.0__py3-none-any.whl → 7.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- redis/__init__.py +1 -2
- redis/_parsers/base.py +193 -8
- redis/_parsers/helpers.py +64 -6
- redis/_parsers/hiredis.py +16 -10
- redis/_parsers/resp3.py +11 -5
- redis/asyncio/client.py +65 -8
- redis/asyncio/cluster.py +57 -14
- redis/asyncio/connection.py +62 -2
- redis/asyncio/http/__init__.py +0 -0
- redis/asyncio/http/http_client.py +265 -0
- redis/asyncio/multidb/__init__.py +0 -0
- redis/asyncio/multidb/client.py +530 -0
- redis/asyncio/multidb/command_executor.py +339 -0
- redis/asyncio/multidb/config.py +210 -0
- redis/asyncio/multidb/database.py +69 -0
- redis/asyncio/multidb/event.py +84 -0
- redis/asyncio/multidb/failover.py +125 -0
- redis/asyncio/multidb/failure_detector.py +38 -0
- redis/asyncio/multidb/healthcheck.py +285 -0
- redis/background.py +204 -0
- redis/cache.py +1 -0
- redis/client.py +99 -22
- redis/cluster.py +14 -3
- redis/commands/core.py +348 -313
- redis/commands/helpers.py +0 -20
- redis/commands/json/_util.py +4 -2
- redis/commands/json/commands.py +2 -2
- redis/commands/search/__init__.py +2 -2
- redis/commands/search/aggregation.py +28 -30
- redis/commands/search/commands.py +13 -13
- redis/commands/search/field.py +2 -2
- redis/commands/search/query.py +23 -23
- redis/commands/vectorset/__init__.py +1 -1
- redis/commands/vectorset/commands.py +50 -25
- redis/commands/vectorset/utils.py +40 -4
- redis/connection.py +1258 -90
- redis/data_structure.py +81 -0
- redis/event.py +88 -14
- redis/exceptions.py +8 -0
- redis/http/__init__.py +0 -0
- redis/http/http_client.py +425 -0
- redis/maint_notifications.py +810 -0
- redis/multidb/__init__.py +0 -0
- redis/multidb/circuit.py +144 -0
- redis/multidb/client.py +526 -0
- redis/multidb/command_executor.py +350 -0
- redis/multidb/config.py +207 -0
- redis/multidb/database.py +130 -0
- redis/multidb/event.py +89 -0
- redis/multidb/exception.py +17 -0
- redis/multidb/failover.py +125 -0
- redis/multidb/failure_detector.py +104 -0
- redis/multidb/healthcheck.py +282 -0
- redis/retry.py +14 -1
- redis/utils.py +34 -0
- {redis-6.3.0.dist-info → redis-7.0.0.dist-info}/METADATA +7 -4
- redis-7.0.0.dist-info/RECORD +105 -0
- redis-6.3.0.dist-info/RECORD +0 -78
- {redis-6.3.0.dist-info → redis-7.0.0.dist-info}/WHEEL +0 -0
- {redis-6.3.0.dist-info → redis-7.0.0.dist-info}/licenses/LICENSE +0 -0
redis/client.py
CHANGED
|
@@ -56,6 +56,9 @@ from redis.exceptions import (
|
|
|
56
56
|
WatchError,
|
|
57
57
|
)
|
|
58
58
|
from redis.lock import Lock
|
|
59
|
+
from redis.maint_notifications import (
|
|
60
|
+
MaintNotificationsConfig,
|
|
61
|
+
)
|
|
59
62
|
from redis.retry import Retry
|
|
60
63
|
from redis.utils import (
|
|
61
64
|
_set_info_logger,
|
|
@@ -220,6 +223,8 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
220
223
|
ssl_keyfile: Optional[str] = None,
|
|
221
224
|
ssl_certfile: Optional[str] = None,
|
|
222
225
|
ssl_cert_reqs: Union[str, "ssl.VerifyMode"] = "required",
|
|
226
|
+
ssl_include_verify_flags: Optional[List["ssl.VerifyFlags"]] = None,
|
|
227
|
+
ssl_exclude_verify_flags: Optional[List["ssl.VerifyFlags"]] = None,
|
|
223
228
|
ssl_ca_certs: Optional[str] = None,
|
|
224
229
|
ssl_ca_path: Optional[str] = None,
|
|
225
230
|
ssl_ca_data: Optional[str] = None,
|
|
@@ -244,6 +249,7 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
244
249
|
cache: Optional[CacheInterface] = None,
|
|
245
250
|
cache_config: Optional[CacheConfig] = None,
|
|
246
251
|
event_dispatcher: Optional[EventDispatcher] = None,
|
|
252
|
+
maint_notifications_config: Optional[MaintNotificationsConfig] = None,
|
|
247
253
|
) -> None:
|
|
248
254
|
"""
|
|
249
255
|
Initialize a new Redis client.
|
|
@@ -271,6 +277,17 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
271
277
|
single_connection_client:
|
|
272
278
|
if `True`, connection pool is not used. In that case `Redis`
|
|
273
279
|
instance use is not thread safe.
|
|
280
|
+
decode_responses:
|
|
281
|
+
if `True`, the response will be decoded to utf-8.
|
|
282
|
+
Argument is ignored when connection_pool is provided.
|
|
283
|
+
maint_notifications_config:
|
|
284
|
+
configuration the pool to support maintenance notifications - see
|
|
285
|
+
`redis.maint_notifications.MaintNotificationsConfig` for details.
|
|
286
|
+
Only supported with RESP3
|
|
287
|
+
If not provided and protocol is RESP3, the maintenance notifications
|
|
288
|
+
will be enabled by default (logic is included in the connection pool
|
|
289
|
+
initialization).
|
|
290
|
+
Argument is ignored when connection_pool is provided.
|
|
274
291
|
"""
|
|
275
292
|
if event_dispatcher is None:
|
|
276
293
|
self._event_dispatcher = EventDispatcher()
|
|
@@ -325,6 +342,8 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
325
342
|
"ssl_keyfile": ssl_keyfile,
|
|
326
343
|
"ssl_certfile": ssl_certfile,
|
|
327
344
|
"ssl_cert_reqs": ssl_cert_reqs,
|
|
345
|
+
"ssl_include_verify_flags": ssl_include_verify_flags,
|
|
346
|
+
"ssl_exclude_verify_flags": ssl_exclude_verify_flags,
|
|
328
347
|
"ssl_ca_certs": ssl_ca_certs,
|
|
329
348
|
"ssl_ca_data": ssl_ca_data,
|
|
330
349
|
"ssl_check_hostname": ssl_check_hostname,
|
|
@@ -345,6 +364,22 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
345
364
|
"cache_config": cache_config,
|
|
346
365
|
}
|
|
347
366
|
)
|
|
367
|
+
maint_notifications_enabled = (
|
|
368
|
+
maint_notifications_config and maint_notifications_config.enabled
|
|
369
|
+
)
|
|
370
|
+
if maint_notifications_enabled and protocol not in [
|
|
371
|
+
3,
|
|
372
|
+
"3",
|
|
373
|
+
]:
|
|
374
|
+
raise RedisError(
|
|
375
|
+
"Maintenance notifications handlers on connection are only supported with RESP version 3"
|
|
376
|
+
)
|
|
377
|
+
if maint_notifications_config:
|
|
378
|
+
kwargs.update(
|
|
379
|
+
{
|
|
380
|
+
"maint_notifications_config": maint_notifications_config,
|
|
381
|
+
}
|
|
382
|
+
)
|
|
348
383
|
connection_pool = ConnectionPool(**kwargs)
|
|
349
384
|
self._event_dispatcher.dispatch(
|
|
350
385
|
AfterPooledConnectionsInstantiationEvent(
|
|
@@ -368,9 +403,7 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
368
403
|
]:
|
|
369
404
|
raise RedisError("Client caching is only supported with RESP version 3")
|
|
370
405
|
|
|
371
|
-
|
|
372
|
-
# TODO: Remove this before next major version (7.0.0)
|
|
373
|
-
self.single_connection_lock = threading.Lock()
|
|
406
|
+
self.single_connection_lock = threading.RLock()
|
|
374
407
|
self.connection = None
|
|
375
408
|
self._single_connection_client = single_connection_client
|
|
376
409
|
if self._single_connection_client:
|
|
@@ -568,7 +601,8 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
568
601
|
|
|
569
602
|
def client(self):
|
|
570
603
|
return self.__class__(
|
|
571
|
-
connection_pool=self.connection_pool,
|
|
604
|
+
connection_pool=self.connection_pool,
|
|
605
|
+
single_connection_client=True,
|
|
572
606
|
)
|
|
573
607
|
|
|
574
608
|
def __enter__(self):
|
|
@@ -637,7 +671,11 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
637
671
|
),
|
|
638
672
|
lambda _: self._close_connection(conn),
|
|
639
673
|
)
|
|
674
|
+
|
|
640
675
|
finally:
|
|
676
|
+
if conn and conn.should_reconnect():
|
|
677
|
+
self._close_connection(conn)
|
|
678
|
+
conn.connect()
|
|
641
679
|
if self._single_connection_client:
|
|
642
680
|
self.single_connection_lock.release()
|
|
643
681
|
if not self.connection:
|
|
@@ -688,11 +726,7 @@ class Monitor:
|
|
|
688
726
|
self.connection = self.connection_pool.get_connection()
|
|
689
727
|
|
|
690
728
|
def __enter__(self):
|
|
691
|
-
self.
|
|
692
|
-
# check that monitor returns 'OK', but don't return it to user
|
|
693
|
-
response = self.connection.read_response()
|
|
694
|
-
if not bool_ok(response):
|
|
695
|
-
raise RedisError(f"MONITOR failed: {response}")
|
|
729
|
+
self._start_monitor()
|
|
696
730
|
return self
|
|
697
731
|
|
|
698
732
|
def __exit__(self, *args):
|
|
@@ -702,8 +736,13 @@ class Monitor:
|
|
|
702
736
|
def next_command(self):
|
|
703
737
|
"""Parse the response from a monitor command"""
|
|
704
738
|
response = self.connection.read_response()
|
|
739
|
+
|
|
740
|
+
if response is None:
|
|
741
|
+
return None
|
|
742
|
+
|
|
705
743
|
if isinstance(response, bytes):
|
|
706
744
|
response = self.connection.encoder.decode(response, force=True)
|
|
745
|
+
|
|
707
746
|
command_time, command_data = response.split(" ", 1)
|
|
708
747
|
m = self.monitor_re.match(command_data)
|
|
709
748
|
db_id, client_info, command = m.groups()
|
|
@@ -739,6 +778,14 @@ class Monitor:
|
|
|
739
778
|
while True:
|
|
740
779
|
yield self.next_command()
|
|
741
780
|
|
|
781
|
+
def _start_monitor(self):
|
|
782
|
+
self.connection.send_command("MONITOR")
|
|
783
|
+
# check that monitor returns 'OK', but don't return it to user
|
|
784
|
+
response = self.connection.read_response()
|
|
785
|
+
|
|
786
|
+
if not bool_ok(response):
|
|
787
|
+
raise RedisError(f"MONITOR failed: {response}")
|
|
788
|
+
|
|
742
789
|
|
|
743
790
|
class PubSub:
|
|
744
791
|
"""
|
|
@@ -776,9 +823,7 @@ class PubSub:
|
|
|
776
823
|
else:
|
|
777
824
|
self._event_dispatcher = event_dispatcher
|
|
778
825
|
|
|
779
|
-
|
|
780
|
-
# TODO: Remove this before next major version (7.0.0)
|
|
781
|
-
self._lock = threading.Lock()
|
|
826
|
+
self._lock = threading.RLock()
|
|
782
827
|
if self.encoder is None:
|
|
783
828
|
self.encoder = self.connection_pool.get_encoder()
|
|
784
829
|
self.health_check_response_b = self.encoder.encode(self.HEALTH_CHECK_MESSAGE)
|
|
@@ -885,7 +930,7 @@ class PubSub:
|
|
|
885
930
|
"""
|
|
886
931
|
ttl = 10
|
|
887
932
|
conn = self.connection
|
|
888
|
-
while self.health_check_response_counter > 0 and ttl > 0:
|
|
933
|
+
while conn and self.health_check_response_counter > 0 and ttl > 0:
|
|
889
934
|
if self._execute(conn, conn.can_read, timeout=conn.socket_timeout):
|
|
890
935
|
response = self._execute(conn, conn.read_response)
|
|
891
936
|
if self.is_health_check_response(response):
|
|
@@ -915,11 +960,17 @@ class PubSub:
|
|
|
915
960
|
called by the # connection to resubscribe us to any channels and
|
|
916
961
|
patterns we were previously listening to
|
|
917
962
|
"""
|
|
918
|
-
|
|
963
|
+
|
|
964
|
+
if conn.should_reconnect():
|
|
965
|
+
self._reconnect(conn)
|
|
966
|
+
|
|
967
|
+
response = conn.retry.call_with_retry(
|
|
919
968
|
lambda: command(*args, **kwargs),
|
|
920
969
|
lambda _: self._reconnect(conn),
|
|
921
970
|
)
|
|
922
971
|
|
|
972
|
+
return response
|
|
973
|
+
|
|
923
974
|
def parse_response(self, block=True, timeout=0):
|
|
924
975
|
"""Parse the response from a publish/subscribe command"""
|
|
925
976
|
conn = self.connection
|
|
@@ -1129,6 +1180,7 @@ class PubSub:
|
|
|
1129
1180
|
return None
|
|
1130
1181
|
|
|
1131
1182
|
response = self.parse_response(block=(timeout is None), timeout=timeout)
|
|
1183
|
+
|
|
1132
1184
|
if response:
|
|
1133
1185
|
return self.handle_message(response, ignore_subscribe_messages)
|
|
1134
1186
|
return None
|
|
@@ -1137,7 +1189,10 @@ class PubSub:
|
|
|
1137
1189
|
|
|
1138
1190
|
def ping(self, message: Union[str, None] = None) -> bool:
|
|
1139
1191
|
"""
|
|
1140
|
-
Ping the Redis server
|
|
1192
|
+
Ping the Redis server to test connectivity.
|
|
1193
|
+
|
|
1194
|
+
Sends a PING command to the Redis server and returns True if the server
|
|
1195
|
+
responds with "PONG".
|
|
1141
1196
|
"""
|
|
1142
1197
|
args = ["PING", message] if message is not None else ["PING"]
|
|
1143
1198
|
return self.execute_command(*args)
|
|
@@ -1152,6 +1207,7 @@ class PubSub:
|
|
|
1152
1207
|
return None
|
|
1153
1208
|
if isinstance(response, bytes):
|
|
1154
1209
|
response = [b"pong", response] if response != b"PONG" else [b"pong", b""]
|
|
1210
|
+
|
|
1155
1211
|
message_type = str_if_bytes(response[0])
|
|
1156
1212
|
if message_type == "pmessage":
|
|
1157
1213
|
message = {
|
|
@@ -1221,6 +1277,8 @@ class PubSub:
|
|
|
1221
1277
|
sleep_time: float = 0.0,
|
|
1222
1278
|
daemon: bool = False,
|
|
1223
1279
|
exception_handler: Optional[Callable] = None,
|
|
1280
|
+
pubsub=None,
|
|
1281
|
+
sharded_pubsub: bool = False,
|
|
1224
1282
|
) -> "PubSubWorkerThread":
|
|
1225
1283
|
for channel, handler in self.channels.items():
|
|
1226
1284
|
if handler is None:
|
|
@@ -1234,8 +1292,13 @@ class PubSub:
|
|
|
1234
1292
|
f"Shard Channel: '{s_channel}' has no handler registered"
|
|
1235
1293
|
)
|
|
1236
1294
|
|
|
1295
|
+
pubsub = self if pubsub is None else pubsub
|
|
1237
1296
|
thread = PubSubWorkerThread(
|
|
1238
|
-
|
|
1297
|
+
pubsub,
|
|
1298
|
+
sleep_time,
|
|
1299
|
+
daemon=daemon,
|
|
1300
|
+
exception_handler=exception_handler,
|
|
1301
|
+
sharded_pubsub=sharded_pubsub,
|
|
1239
1302
|
)
|
|
1240
1303
|
thread.start()
|
|
1241
1304
|
return thread
|
|
@@ -1250,12 +1313,14 @@ class PubSubWorkerThread(threading.Thread):
|
|
|
1250
1313
|
exception_handler: Union[
|
|
1251
1314
|
Callable[[Exception, "PubSub", "PubSubWorkerThread"], None], None
|
|
1252
1315
|
] = None,
|
|
1316
|
+
sharded_pubsub: bool = False,
|
|
1253
1317
|
):
|
|
1254
1318
|
super().__init__()
|
|
1255
1319
|
self.daemon = daemon
|
|
1256
1320
|
self.pubsub = pubsub
|
|
1257
1321
|
self.sleep_time = sleep_time
|
|
1258
1322
|
self.exception_handler = exception_handler
|
|
1323
|
+
self.sharded_pubsub = sharded_pubsub
|
|
1259
1324
|
self._running = threading.Event()
|
|
1260
1325
|
|
|
1261
1326
|
def run(self) -> None:
|
|
@@ -1266,7 +1331,14 @@ class PubSubWorkerThread(threading.Thread):
|
|
|
1266
1331
|
sleep_time = self.sleep_time
|
|
1267
1332
|
while self._running.is_set():
|
|
1268
1333
|
try:
|
|
1269
|
-
|
|
1334
|
+
if not self.sharded_pubsub:
|
|
1335
|
+
pubsub.get_message(
|
|
1336
|
+
ignore_subscribe_messages=True, timeout=sleep_time
|
|
1337
|
+
)
|
|
1338
|
+
else:
|
|
1339
|
+
pubsub.get_sharded_message(
|
|
1340
|
+
ignore_subscribe_messages=True, timeout=sleep_time
|
|
1341
|
+
)
|
|
1270
1342
|
except BaseException as e:
|
|
1271
1343
|
if self.exception_handler is None:
|
|
1272
1344
|
raise
|
|
@@ -1355,6 +1427,7 @@ class Pipeline(Redis):
|
|
|
1355
1427
|
# clean up the other instance attributes
|
|
1356
1428
|
self.watching = False
|
|
1357
1429
|
self.explicit_transaction = False
|
|
1430
|
+
|
|
1358
1431
|
# we can safely return the connection to the pool here since we're
|
|
1359
1432
|
# sure we're no longer WATCHing anything
|
|
1360
1433
|
if self.connection:
|
|
@@ -1514,6 +1587,7 @@ class Pipeline(Redis):
|
|
|
1514
1587
|
if command_name in self.response_callbacks:
|
|
1515
1588
|
r = self.response_callbacks[command_name](r, **options)
|
|
1516
1589
|
data.append(r)
|
|
1590
|
+
|
|
1517
1591
|
return data
|
|
1518
1592
|
|
|
1519
1593
|
def _execute_pipeline(self, connection, commands, raise_on_error):
|
|
@@ -1521,16 +1595,17 @@ class Pipeline(Redis):
|
|
|
1521
1595
|
all_cmds = connection.pack_commands([args for args, _ in commands])
|
|
1522
1596
|
connection.send_packed_command(all_cmds)
|
|
1523
1597
|
|
|
1524
|
-
|
|
1598
|
+
responses = []
|
|
1525
1599
|
for args, options in commands:
|
|
1526
1600
|
try:
|
|
1527
|
-
|
|
1601
|
+
responses.append(self.parse_response(connection, args[0], **options))
|
|
1528
1602
|
except ResponseError as e:
|
|
1529
|
-
|
|
1603
|
+
responses.append(e)
|
|
1530
1604
|
|
|
1531
1605
|
if raise_on_error:
|
|
1532
|
-
self.raise_first_error(commands,
|
|
1533
|
-
|
|
1606
|
+
self.raise_first_error(commands, responses)
|
|
1607
|
+
|
|
1608
|
+
return responses
|
|
1534
1609
|
|
|
1535
1610
|
def raise_first_error(self, commands, response):
|
|
1536
1611
|
for i, r in enumerate(response):
|
|
@@ -1615,6 +1690,8 @@ class Pipeline(Redis):
|
|
|
1615
1690
|
lambda error: self._disconnect_raise_on_watching(conn, error),
|
|
1616
1691
|
)
|
|
1617
1692
|
finally:
|
|
1693
|
+
# in reset() the connection is disconnected before returned to the pool if
|
|
1694
|
+
# it is marked for reconnect.
|
|
1618
1695
|
self.reset()
|
|
1619
1696
|
|
|
1620
1697
|
def discard(self):
|
redis/cluster.py
CHANGED
|
@@ -50,6 +50,7 @@ from redis.exceptions import (
|
|
|
50
50
|
WatchError,
|
|
51
51
|
)
|
|
52
52
|
from redis.lock import Lock
|
|
53
|
+
from redis.maint_notifications import MaintNotificationsConfig
|
|
53
54
|
from redis.retry import Retry
|
|
54
55
|
from redis.utils import (
|
|
55
56
|
deprecated_args,
|
|
@@ -170,6 +171,7 @@ REDIS_ALLOWED_KEYS = (
|
|
|
170
171
|
"redis_connect_func",
|
|
171
172
|
"password",
|
|
172
173
|
"port",
|
|
174
|
+
"timeout",
|
|
173
175
|
"queue_class",
|
|
174
176
|
"retry",
|
|
175
177
|
"retry_on_timeout",
|
|
@@ -183,6 +185,8 @@ REDIS_ALLOWED_KEYS = (
|
|
|
183
185
|
"ssl_ca_data",
|
|
184
186
|
"ssl_certfile",
|
|
185
187
|
"ssl_cert_reqs",
|
|
188
|
+
"ssl_include_verify_flags",
|
|
189
|
+
"ssl_exclude_verify_flags",
|
|
186
190
|
"ssl_keyfile",
|
|
187
191
|
"ssl_password",
|
|
188
192
|
"ssl_check_hostname",
|
|
@@ -692,6 +696,7 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
|
|
|
692
696
|
self._event_dispatcher = EventDispatcher()
|
|
693
697
|
else:
|
|
694
698
|
self._event_dispatcher = event_dispatcher
|
|
699
|
+
self.startup_nodes = startup_nodes
|
|
695
700
|
self.nodes_manager = NodesManager(
|
|
696
701
|
startup_nodes=startup_nodes,
|
|
697
702
|
from_url=from_url,
|
|
@@ -1659,6 +1664,11 @@ class NodesManager:
|
|
|
1659
1664
|
backoff=NoBackoff(), retries=0, supported_errors=(ConnectionError,)
|
|
1660
1665
|
)
|
|
1661
1666
|
|
|
1667
|
+
protocol = kwargs.get("protocol", None)
|
|
1668
|
+
if protocol in [3, "3"]:
|
|
1669
|
+
kwargs.update(
|
|
1670
|
+
{"maint_notifications_config": MaintNotificationsConfig(enabled=False)}
|
|
1671
|
+
)
|
|
1662
1672
|
if self.from_url:
|
|
1663
1673
|
# Create a redis node with a costumed connection pool
|
|
1664
1674
|
kwargs.update({"host": host})
|
|
@@ -2716,8 +2726,8 @@ class PipelineStrategy(AbstractStrategy):
|
|
|
2716
2726
|
|
|
2717
2727
|
If one of the retryable exceptions has been thrown we assume that:
|
|
2718
2728
|
- connection_pool was disconnected
|
|
2719
|
-
- connection_pool was
|
|
2720
|
-
-
|
|
2729
|
+
- connection_pool was reset
|
|
2730
|
+
- refresh_table_asap set to True
|
|
2721
2731
|
|
|
2722
2732
|
It will try the number of times specified by
|
|
2723
2733
|
the retries in config option "self.retry"
|
|
@@ -3161,7 +3171,8 @@ class TransactionStrategy(AbstractStrategy):
|
|
|
3161
3171
|
self._nodes_manager.initialize()
|
|
3162
3172
|
self.reinitialize_counter = 0
|
|
3163
3173
|
else:
|
|
3164
|
-
|
|
3174
|
+
if isinstance(error, AskError):
|
|
3175
|
+
self._nodes_manager.update_moved_exception(error)
|
|
3165
3176
|
|
|
3166
3177
|
self._executing = False
|
|
3167
3178
|
|