redis 5.3.0b5__py3-none-any.whl → 6.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. redis/__init__.py +2 -11
  2. redis/_parsers/base.py +14 -2
  3. redis/_parsers/resp3.py +2 -2
  4. redis/asyncio/client.py +102 -82
  5. redis/asyncio/cluster.py +147 -102
  6. redis/asyncio/connection.py +77 -24
  7. redis/asyncio/lock.py +26 -5
  8. redis/asyncio/retry.py +12 -0
  9. redis/asyncio/sentinel.py +11 -1
  10. redis/asyncio/utils.py +1 -1
  11. redis/auth/token.py +6 -2
  12. redis/backoff.py +15 -0
  13. redis/client.py +106 -98
  14. redis/cluster.py +208 -79
  15. redis/commands/cluster.py +1 -11
  16. redis/commands/core.py +219 -207
  17. redis/commands/helpers.py +19 -76
  18. redis/commands/json/__init__.py +1 -1
  19. redis/commands/redismodules.py +5 -17
  20. redis/commands/search/aggregation.py +3 -1
  21. redis/commands/search/commands.py +43 -16
  22. redis/commands/search/dialect.py +3 -0
  23. redis/commands/search/profile_information.py +14 -0
  24. redis/commands/search/query.py +5 -1
  25. redis/commands/timeseries/__init__.py +1 -1
  26. redis/commands/vectorset/__init__.py +46 -0
  27. redis/commands/vectorset/commands.py +367 -0
  28. redis/commands/vectorset/utils.py +94 -0
  29. redis/connection.py +78 -29
  30. redis/exceptions.py +4 -1
  31. redis/lock.py +24 -4
  32. redis/ocsp.py +2 -1
  33. redis/retry.py +12 -0
  34. redis/sentinel.py +3 -1
  35. redis/utils.py +114 -1
  36. {redis-5.3.0b5.dist-info → redis-6.0.0.dist-info}/METADATA +57 -23
  37. redis-6.0.0.dist-info/RECORD +78 -0
  38. {redis-5.3.0b5.dist-info → redis-6.0.0.dist-info}/WHEEL +1 -2
  39. redis/commands/graph/__init__.py +0 -263
  40. redis/commands/graph/commands.py +0 -313
  41. redis/commands/graph/edge.py +0 -91
  42. redis/commands/graph/exceptions.py +0 -3
  43. redis/commands/graph/execution_plan.py +0 -211
  44. redis/commands/graph/node.py +0 -88
  45. redis/commands/graph/path.py +0 -78
  46. redis/commands/graph/query_result.py +0 -588
  47. redis-5.3.0b5.dist-info/RECORD +0 -82
  48. redis-5.3.0b5.dist-info/top_level.txt +0 -1
  49. /redis/commands/search/{indexDefinition.py → index_definition.py} +0 -0
  50. {redis-5.3.0b5.dist-info → redis-6.0.0.dist-info/licenses}/LICENSE +0 -0
redis/__init__.py CHANGED
@@ -1,5 +1,3 @@
1
- from importlib import metadata
2
-
3
1
  from redis import asyncio # noqa
4
2
  from redis.backoff import default_backoff
5
3
  from redis.client import Redis, StrictRedis
@@ -44,16 +42,9 @@ def int_or_str(value):
44
42
  return value
45
43
 
46
44
 
47
- try:
48
- __version__ = metadata.version("redis")
49
- except metadata.PackageNotFoundError:
50
- __version__ = "99.99.99"
51
-
45
+ __version__ = "6.0.0"
46
+ VERSION = tuple(map(int_or_str, __version__.split(".")))
52
47
 
53
- try:
54
- VERSION = tuple(map(int_or_str, __version__.split(".")))
55
- except AttributeError:
56
- VERSION = tuple([99, 99, 99])
57
48
 
58
49
  __all__ = [
59
50
  "AuthenticationError",
redis/_parsers/base.py CHANGED
@@ -9,26 +9,32 @@ else:
9
9
  from async_timeout import timeout as async_timeout
10
10
 
11
11
  from ..exceptions import (
12
+ AskError,
12
13
  AuthenticationError,
13
14
  AuthenticationWrongNumberOfArgsError,
14
15
  BusyLoadingError,
16
+ ClusterCrossSlotError,
17
+ ClusterDownError,
15
18
  ConnectionError,
16
19
  ExecAbortError,
20
+ MasterDownError,
17
21
  ModuleError,
22
+ MovedError,
18
23
  NoPermissionError,
19
24
  NoScriptError,
20
25
  OutOfMemoryError,
21
26
  ReadOnlyError,
22
27
  RedisError,
23
28
  ResponseError,
29
+ TryAgainError,
24
30
  )
25
31
  from ..typing import EncodableT
26
32
  from .encoders import Encoder
27
33
  from .socket import SERVER_CLOSED_CONNECTION_ERROR, SocketBuffer
28
34
 
29
- MODULE_LOAD_ERROR = "Error loading the extension. " "Please check the server logs."
35
+ MODULE_LOAD_ERROR = "Error loading the extension. Please check the server logs."
30
36
  NO_SUCH_MODULE_ERROR = "Error unloading module: no such module with that name"
31
- MODULE_UNLOAD_NOT_POSSIBLE_ERROR = "Error unloading module: operation not " "possible."
37
+ MODULE_UNLOAD_NOT_POSSIBLE_ERROR = "Error unloading module: operation not possible."
32
38
  MODULE_EXPORTS_DATA_TYPES_ERROR = (
33
39
  "Error unloading module: the module "
34
40
  "exports one or more module-side data "
@@ -72,6 +78,12 @@ class BaseParser(ABC):
72
78
  "READONLY": ReadOnlyError,
73
79
  "NOAUTH": AuthenticationError,
74
80
  "NOPERM": NoPermissionError,
81
+ "ASK": AskError,
82
+ "TRYAGAIN": TryAgainError,
83
+ "MOVED": MovedError,
84
+ "CLUSTERDOWN": ClusterDownError,
85
+ "CROSSSLOT": ClusterCrossSlotError,
86
+ "MASTERDOWN": MasterDownError,
75
87
  }
76
88
 
77
89
  @classmethod
redis/_parsers/resp3.py CHANGED
@@ -19,7 +19,7 @@ class _RESP3Parser(_RESPBase):
19
19
 
20
20
  def handle_pubsub_push_response(self, response):
21
21
  logger = getLogger("push_response")
22
- logger.info("Push response: " + str(response))
22
+ logger.debug("Push response: " + str(response))
23
23
  return response
24
24
 
25
25
  def read_response(self, disable_decoding=False, push_request=False):
@@ -150,7 +150,7 @@ class _AsyncRESP3Parser(_AsyncRESPBase):
150
150
 
151
151
  async def handle_pubsub_push_response(self, response):
152
152
  logger = getLogger("push_response")
153
- logger.info("Push response: " + str(response))
153
+ logger.debug("Push response: " + str(response))
154
154
  return response
155
155
 
156
156
  async def read_response(
redis/asyncio/client.py CHANGED
@@ -2,7 +2,6 @@ import asyncio
2
2
  import copy
3
3
  import inspect
4
4
  import re
5
- import ssl
6
5
  import warnings
7
6
  from typing import (
8
7
  TYPE_CHECKING,
@@ -40,6 +39,7 @@ from redis.asyncio.connection import (
40
39
  )
41
40
  from redis.asyncio.lock import Lock
42
41
  from redis.asyncio.retry import Retry
42
+ from redis.backoff import ExponentialWithJitterBackoff
43
43
  from redis.client import (
44
44
  EMPTY_RESPONSE,
45
45
  NEVER_DECODE,
@@ -66,19 +66,27 @@ from redis.exceptions import (
66
66
  PubSubError,
67
67
  RedisError,
68
68
  ResponseError,
69
- TimeoutError,
70
69
  WatchError,
71
70
  )
72
71
  from redis.typing import ChannelT, EncodableT, KeyT
73
72
  from redis.utils import (
74
73
  HIREDIS_AVAILABLE,
74
+ SSL_AVAILABLE,
75
75
  _set_info_logger,
76
+ deprecated_args,
76
77
  deprecated_function,
77
78
  get_lib_version,
78
79
  safe_str,
79
80
  str_if_bytes,
81
+ truncate_text,
80
82
  )
81
83
 
84
+ if TYPE_CHECKING and SSL_AVAILABLE:
85
+ from ssl import TLSVersion, VerifyMode
86
+ else:
87
+ TLSVersion = None
88
+ VerifyMode = None
89
+
82
90
  PubSubHandler = Callable[[Dict[str, str]], Awaitable[None]]
83
91
  _KeyT = TypeVar("_KeyT", bound=KeyT)
84
92
  _ArgT = TypeVar("_ArgT", KeyT, EncodableT)
@@ -201,6 +209,11 @@ class Redis(
201
209
  client.auto_close_connection_pool = True
202
210
  return client
203
211
 
212
+ @deprecated_args(
213
+ args_to_warn=["retry_on_timeout"],
214
+ reason="TimeoutError is included by default.",
215
+ version="6.0.0",
216
+ )
204
217
  def __init__(
205
218
  self,
206
219
  *,
@@ -218,15 +231,18 @@ class Redis(
218
231
  encoding_errors: str = "strict",
219
232
  decode_responses: bool = False,
220
233
  retry_on_timeout: bool = False,
234
+ retry: Retry = Retry(
235
+ backoff=ExponentialWithJitterBackoff(base=1, cap=10), retries=3
236
+ ),
221
237
  retry_on_error: Optional[list] = None,
222
238
  ssl: bool = False,
223
239
  ssl_keyfile: Optional[str] = None,
224
240
  ssl_certfile: Optional[str] = None,
225
- ssl_cert_reqs: str = "required",
241
+ ssl_cert_reqs: Union[str, VerifyMode] = "required",
226
242
  ssl_ca_certs: Optional[str] = None,
227
243
  ssl_ca_data: Optional[str] = None,
228
- ssl_check_hostname: bool = False,
229
- ssl_min_version: Optional[ssl.TLSVersion] = None,
244
+ ssl_check_hostname: bool = True,
245
+ ssl_min_version: Optional[TLSVersion] = None,
230
246
  ssl_ciphers: Optional[str] = None,
231
247
  max_connections: Optional[int] = None,
232
248
  single_connection_client: bool = False,
@@ -235,7 +251,6 @@ class Redis(
235
251
  lib_name: Optional[str] = "redis-py",
236
252
  lib_version: Optional[str] = get_lib_version(),
237
253
  username: Optional[str] = None,
238
- retry: Optional[Retry] = None,
239
254
  auto_close_connection_pool: Optional[bool] = None,
240
255
  redis_connect_func=None,
241
256
  credential_provider: Optional[CredentialProvider] = None,
@@ -244,10 +259,24 @@ class Redis(
244
259
  ):
245
260
  """
246
261
  Initialize a new Redis client.
247
- To specify a retry policy for specific errors, first set
248
- `retry_on_error` to a list of the error/s to retry on, then set
249
- `retry` to a valid `Retry` object.
250
- To retry on TimeoutError, `retry_on_timeout` can also be set to `True`.
262
+
263
+ To specify a retry policy for specific errors, you have two options:
264
+
265
+ 1. Set the `retry_on_error` to a list of the error/s to retry on, and
266
+ you can also set `retry` to a valid `Retry` object(in case the default
267
+ one is not appropriate) - with this approach the retries will be triggered
268
+ on the default errors specified in the Retry object enriched with the
269
+ errors specified in `retry_on_error`.
270
+
271
+ 2. Define a `Retry` object with configured 'supported_errors' and set
272
+ it to the `retry` parameter - with this approach you completely redefine
273
+ the errors on which retries will happen.
274
+
275
+ `retry_on_timeout` is deprecated - please include the TimeoutError
276
+ either in the Retry object or in the `retry_on_error` list.
277
+
278
+ When 'connection_pool' is provided - the retry configuration of the
279
+ provided pool will be used.
251
280
  """
252
281
  kwargs: Dict[str, Any]
253
282
  if event_dispatcher is None:
@@ -273,8 +302,6 @@ class Redis(
273
302
  # Create internal connection pool, expected to be closed by Redis instance
274
303
  if not retry_on_error:
275
304
  retry_on_error = []
276
- if retry_on_timeout is True:
277
- retry_on_error.append(TimeoutError)
278
305
  kwargs = {
279
306
  "db": db,
280
307
  "username": username,
@@ -284,7 +311,6 @@ class Redis(
284
311
  "encoding": encoding,
285
312
  "encoding_errors": encoding_errors,
286
313
  "decode_responses": decode_responses,
287
- "retry_on_timeout": retry_on_timeout,
288
314
  "retry_on_error": retry_on_error,
289
315
  "retry": copy.deepcopy(retry),
290
316
  "max_connections": max_connections,
@@ -375,7 +401,7 @@ class Redis(
375
401
  if self.single_connection_client:
376
402
  async with self._single_conn_lock:
377
403
  if self.connection is None:
378
- self.connection = await self.connection_pool.get_connection("_")
404
+ self.connection = await self.connection_pool.get_connection()
379
405
 
380
406
  self._event_dispatcher.dispatch(
381
407
  AfterSingleConnectionInstantiationEvent(
@@ -396,10 +422,10 @@ class Redis(
396
422
  """Get the connection's key-word arguments"""
397
423
  return self.connection_pool.connection_kwargs
398
424
 
399
- def get_retry(self) -> Optional["Retry"]:
425
+ def get_retry(self) -> Optional[Retry]:
400
426
  return self.get_connection_kwargs().get("retry")
401
427
 
402
- def set_retry(self, retry: "Retry") -> None:
428
+ def set_retry(self, retry: Retry) -> None:
403
429
  self.get_connection_kwargs().update({"retry": retry})
404
430
  self.connection_pool.set_retry(retry)
405
431
 
@@ -478,6 +504,7 @@ class Redis(
478
504
  blocking_timeout: Optional[float] = None,
479
505
  lock_class: Optional[Type[Lock]] = None,
480
506
  thread_local: bool = True,
507
+ raise_on_release_error: bool = True,
481
508
  ) -> Lock:
482
509
  """
483
510
  Return a new Lock object using key ``name`` that mimics
@@ -524,6 +551,11 @@ class Redis(
524
551
  thread-1 would see the token value as "xyz" and would be
525
552
  able to successfully release the thread-2's lock.
526
553
 
554
+ ``raise_on_release_error`` indicates whether to raise an exception when
555
+ the lock is no longer owned when exiting the context manager. By default,
556
+ this is True, meaning an exception will be raised. If False, the warning
557
+ will be logged and the exception will be suppressed.
558
+
527
559
  In some use cases it's necessary to disable thread local storage. For
528
560
  example, if you have code where one thread acquires a lock and passes
529
561
  that lock instance to a worker thread to release later. If thread
@@ -541,6 +573,7 @@ class Redis(
541
573
  blocking=blocking,
542
574
  blocking_timeout=blocking_timeout,
543
575
  thread_local=thread_local,
576
+ raise_on_release_error=raise_on_release_error,
544
577
  )
545
578
 
546
579
  def pubsub(self, **kwargs) -> "PubSub":
@@ -619,18 +652,17 @@ class Redis(
619
652
  await conn.send_command(*args)
620
653
  return await self.parse_response(conn, command_name, **options)
621
654
 
622
- async def _disconnect_raise(self, conn: Connection, error: Exception):
655
+ async def _close_connection(self, conn: Connection):
623
656
  """
624
- Close the connection and raise an exception
625
- if retry_on_error is not set or the error
626
- is not one of the specified error types
657
+ Close the connection before retrying.
658
+
659
+ The supported exceptions are already checked in the
660
+ retry object so we don't need to do it here.
661
+
662
+ After we disconnect the connection, it will try to reconnect and
663
+ do a health check as part of the send_command logic(on connection level).
627
664
  """
628
665
  await conn.disconnect()
629
- if (
630
- conn.retry_on_error is None
631
- or isinstance(error, tuple(conn.retry_on_error)) is False
632
- ):
633
- raise error
634
666
 
635
667
  # COMMAND EXECUTION AND PROTOCOL PARSING
636
668
  async def execute_command(self, *args, **options):
@@ -638,7 +670,7 @@ class Redis(
638
670
  await self.initialize()
639
671
  pool = self.connection_pool
640
672
  command_name = args[0]
641
- conn = self.connection or await pool.get_connection(command_name, **options)
673
+ conn = self.connection or await pool.get_connection()
642
674
 
643
675
  if self.single_connection_client:
644
676
  await self._single_conn_lock.acquire()
@@ -647,7 +679,7 @@ class Redis(
647
679
  lambda: self._send_command_parse_response(
648
680
  conn, command_name, *args, **options
649
681
  ),
650
- lambda error: self._disconnect_raise(conn, error),
682
+ lambda _: self._close_connection(conn),
651
683
  )
652
684
  finally:
653
685
  if self.single_connection_client:
@@ -712,7 +744,7 @@ class Monitor:
712
744
 
713
745
  async def connect(self):
714
746
  if self.connection is None:
715
- self.connection = await self.connection_pool.get_connection("MONITOR")
747
+ self.connection = await self.connection_pool.get_connection()
716
748
 
717
749
  async def __aenter__(self):
718
750
  await self.connect()
@@ -900,9 +932,7 @@ class PubSub:
900
932
  Ensure that the PubSub is connected
901
933
  """
902
934
  if self.connection is None:
903
- self.connection = await self.connection_pool.get_connection(
904
- "pubsub", self.shard_hint
905
- )
935
+ self.connection = await self.connection_pool.get_connection()
906
936
  # register a callback that re-subscribes to any channels we
907
937
  # were listening to when we were disconnected
908
938
  self.connection.register_connect_callback(self.on_connect)
@@ -917,19 +947,11 @@ class PubSub:
917
947
  )
918
948
  )
919
949
 
920
- async def _disconnect_raise_connect(self, conn, error):
950
+ async def _reconnect(self, conn):
921
951
  """
922
- Close the connection and raise an exception
923
- if retry_on_error is not set or the error is not one
924
- of the specified error types. Otherwise, try to
925
- reconnect
952
+ Try to reconnect
926
953
  """
927
954
  await conn.disconnect()
928
- if (
929
- conn.retry_on_error is None
930
- or isinstance(error, tuple(conn.retry_on_error)) is False
931
- ):
932
- raise error
933
955
  await conn.connect()
934
956
 
935
957
  async def _execute(self, conn, command, *args, **kwargs):
@@ -942,7 +964,7 @@ class PubSub:
942
964
  """
943
965
  return await conn.retry.call_with_retry(
944
966
  lambda: command(*args, **kwargs),
945
- lambda error: self._disconnect_raise_connect(conn, error),
967
+ lambda _: self._reconnect(conn),
946
968
  )
947
969
 
948
970
  async def parse_response(self, block: bool = True, timeout: float = 0):
@@ -1233,7 +1255,8 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
1233
1255
  in one transmission. This is convenient for batch processing, such as
1234
1256
  saving all the values in a list to Redis.
1235
1257
 
1236
- All commands executed within a pipeline are wrapped with MULTI and EXEC
1258
+ All commands executed within a pipeline(when running in transactional mode,
1259
+ which is the default behavior) are wrapped with MULTI and EXEC
1237
1260
  calls. This guarantees all commands executed in the pipeline will be
1238
1261
  executed atomically.
1239
1262
 
@@ -1262,7 +1285,7 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
1262
1285
  self.shard_hint = shard_hint
1263
1286
  self.watching = False
1264
1287
  self.command_stack: CommandStackT = []
1265
- self.scripts: Set["Script"] = set()
1288
+ self.scripts: Set[Script] = set()
1266
1289
  self.explicit_transaction = False
1267
1290
 
1268
1291
  async def __aenter__(self: _RedisT) -> _RedisT:
@@ -1334,52 +1357,50 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
1334
1357
  return self.immediate_execute_command(*args, **kwargs)
1335
1358
  return self.pipeline_execute_command(*args, **kwargs)
1336
1359
 
1337
- async def _disconnect_reset_raise(self, conn, error):
1360
+ async def _disconnect_reset_raise_on_watching(
1361
+ self,
1362
+ conn: Connection,
1363
+ error: Exception,
1364
+ ):
1338
1365
  """
1339
- Close the connection, reset watching state and
1340
- raise an exception if we were watching,
1341
- if retry_on_error is not set or the error is not one
1342
- of the specified error types.
1366
+ Close the connection reset watching state and
1367
+ raise an exception if we were watching.
1368
+
1369
+ The supported exceptions are already checked in the
1370
+ retry object so we don't need to do it here.
1371
+
1372
+ After we disconnect the connection, it will try to reconnect and
1373
+ do a health check as part of the send_command logic(on connection level).
1343
1374
  """
1344
1375
  await conn.disconnect()
1345
1376
  # if we were already watching a variable, the watch is no longer
1346
1377
  # valid since this connection has died. raise a WatchError, which
1347
1378
  # indicates the user should retry this transaction.
1348
1379
  if self.watching:
1349
- await self.aclose()
1380
+ await self.reset()
1350
1381
  raise WatchError(
1351
- "A ConnectionError occurred on while watching one or more keys"
1382
+ f"A {type(error).__name__} occurred while watching one or more keys"
1352
1383
  )
1353
- # if retry_on_error is not set or the error is not one
1354
- # of the specified error types, raise it
1355
- if (
1356
- conn.retry_on_error is None
1357
- or isinstance(error, tuple(conn.retry_on_error)) is False
1358
- ):
1359
- await self.aclose()
1360
- raise
1361
1384
 
1362
1385
  async def immediate_execute_command(self, *args, **options):
1363
1386
  """
1364
- Execute a command immediately, but don't auto-retry on a
1365
- ConnectionError if we're already WATCHing a variable. Used when
1366
- issuing WATCH or subsequent commands retrieving their values but before
1387
+ Execute a command immediately, but don't auto-retry on the supported
1388
+ errors for retry if we're already WATCHing a variable.
1389
+ Used when issuing WATCH or subsequent commands retrieving their values but before
1367
1390
  MULTI is called.
1368
1391
  """
1369
1392
  command_name = args[0]
1370
1393
  conn = self.connection
1371
1394
  # if this is the first call, we need a connection
1372
1395
  if not conn:
1373
- conn = await self.connection_pool.get_connection(
1374
- command_name, self.shard_hint
1375
- )
1396
+ conn = await self.connection_pool.get_connection()
1376
1397
  self.connection = conn
1377
1398
 
1378
1399
  return await conn.retry.call_with_retry(
1379
1400
  lambda: self._send_command_parse_response(
1380
1401
  conn, command_name, *args, **options
1381
1402
  ),
1382
- lambda error: self._disconnect_reset_raise(conn, error),
1403
+ lambda error: self._disconnect_reset_raise_on_watching(conn, error),
1383
1404
  )
1384
1405
 
1385
1406
  def pipeline_execute_command(self, *args, **options):
@@ -1505,7 +1526,10 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
1505
1526
  self, exception: Exception, number: int, command: Iterable[object]
1506
1527
  ) -> None:
1507
1528
  cmd = " ".join(map(safe_str, command))
1508
- msg = f"Command # {number} ({cmd}) of pipeline caused error: {exception.args}"
1529
+ msg = (
1530
+ f"Command # {number} ({truncate_text(cmd)}) "
1531
+ "of pipeline caused error: {exception.args}"
1532
+ )
1509
1533
  exception.args = (msg,) + exception.args[1:]
1510
1534
 
1511
1535
  async def parse_response(
@@ -1531,11 +1555,15 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
1531
1555
  if not exist:
1532
1556
  s.sha = await immediate("SCRIPT LOAD", s.script)
1533
1557
 
1534
- async def _disconnect_raise_reset(self, conn: Connection, error: Exception):
1558
+ async def _disconnect_raise_on_watching(self, conn: Connection, error: Exception):
1535
1559
  """
1536
- Close the connection, raise an exception if we were watching,
1537
- and raise an exception if retry_on_error is not set or the
1538
- error is not one of the specified error types.
1560
+ Close the connection, raise an exception if we were watching.
1561
+
1562
+ The supported exceptions are already checked in the
1563
+ retry object so we don't need to do it here.
1564
+
1565
+ After we disconnect the connection, it will try to reconnect and
1566
+ do a health check as part of the send_command logic(on connection level).
1539
1567
  """
1540
1568
  await conn.disconnect()
1541
1569
  # if we were watching a variable, the watch is no longer valid
@@ -1543,16 +1571,8 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
1543
1571
  # indicates the user should retry this transaction.
1544
1572
  if self.watching:
1545
1573
  raise WatchError(
1546
- "A ConnectionError occurred on while watching one or more keys"
1574
+ f"A {type(error).__name__} occurred while watching one or more keys"
1547
1575
  )
1548
- # if retry_on_error is not set or the error is not one
1549
- # of the specified error types, raise it
1550
- if (
1551
- conn.retry_on_error is None
1552
- or isinstance(error, tuple(conn.retry_on_error)) is False
1553
- ):
1554
- await self.reset()
1555
- raise
1556
1576
 
1557
1577
  async def execute(self, raise_on_error: bool = True) -> List[Any]:
1558
1578
  """Execute all the commands in the current pipeline"""
@@ -1568,7 +1588,7 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
1568
1588
 
1569
1589
  conn = self.connection
1570
1590
  if not conn:
1571
- conn = await self.connection_pool.get_connection("MULTI", self.shard_hint)
1591
+ conn = await self.connection_pool.get_connection()
1572
1592
  # assign to self.connection so reset() releases the connection
1573
1593
  # back to the pool after we're done
1574
1594
  self.connection = conn
@@ -1577,7 +1597,7 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
1577
1597
  try:
1578
1598
  return await conn.retry.call_with_retry(
1579
1599
  lambda: execute(conn, stack, raise_on_error),
1580
- lambda error: self._disconnect_raise_reset(conn, error),
1600
+ lambda error: self._disconnect_raise_on_watching(conn, error),
1581
1601
  )
1582
1602
  finally:
1583
1603
  await self.reset()