coredis 4.24.0__py3-none-any.whl → 5.0.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. coredis/__init__.py +1 -3
  2. coredis/_packer.py +10 -10
  3. coredis/_protocols.py +23 -32
  4. coredis/_py_311_typing.py +20 -0
  5. coredis/_py_312_typing.py +17 -0
  6. coredis/_utils.py +49 -51
  7. coredis/_version.py +3 -3
  8. coredis/cache.py +57 -82
  9. coredis/client/__init__.py +1 -2
  10. coredis/client/basic.py +129 -56
  11. coredis/client/cluster.py +147 -70
  12. coredis/commands/__init__.py +27 -7
  13. coredis/commands/_key_spec.py +11 -10
  14. coredis/commands/_utils.py +1 -1
  15. coredis/commands/_validators.py +30 -20
  16. coredis/commands/_wrappers.py +19 -99
  17. coredis/commands/bitfield.py +10 -2
  18. coredis/commands/constants.py +20 -3
  19. coredis/commands/core.py +1627 -1246
  20. coredis/commands/function.py +21 -19
  21. coredis/commands/monitor.py +0 -71
  22. coredis/commands/pubsub.py +7 -142
  23. coredis/commands/request.py +108 -0
  24. coredis/commands/script.py +9 -9
  25. coredis/commands/sentinel.py +60 -49
  26. coredis/connection.py +14 -15
  27. coredis/exceptions.py +2 -2
  28. coredis/experimental/__init__.py +0 -4
  29. coredis/globals.py +3 -0
  30. coredis/modules/autocomplete.py +28 -30
  31. coredis/modules/base.py +15 -31
  32. coredis/modules/filters.py +269 -245
  33. coredis/modules/graph.py +61 -62
  34. coredis/modules/json.py +172 -140
  35. coredis/modules/response/_callbacks/autocomplete.py +5 -4
  36. coredis/modules/response/_callbacks/graph.py +34 -29
  37. coredis/modules/response/_callbacks/json.py +5 -3
  38. coredis/modules/response/_callbacks/search.py +49 -53
  39. coredis/modules/response/_callbacks/timeseries.py +18 -30
  40. coredis/modules/response/types.py +1 -5
  41. coredis/modules/search.py +186 -169
  42. coredis/modules/timeseries.py +184 -164
  43. coredis/parser.py +6 -19
  44. coredis/pipeline.py +391 -422
  45. coredis/pool/basic.py +7 -7
  46. coredis/pool/cluster.py +3 -3
  47. coredis/pool/nodemanager.py +10 -3
  48. coredis/response/_callbacks/__init__.py +76 -57
  49. coredis/response/_callbacks/acl.py +0 -3
  50. coredis/response/_callbacks/cluster.py +25 -16
  51. coredis/response/_callbacks/command.py +8 -6
  52. coredis/response/_callbacks/connection.py +4 -3
  53. coredis/response/_callbacks/geo.py +17 -13
  54. coredis/response/_callbacks/hash.py +13 -11
  55. coredis/response/_callbacks/keys.py +9 -5
  56. coredis/response/_callbacks/module.py +2 -3
  57. coredis/response/_callbacks/script.py +6 -8
  58. coredis/response/_callbacks/sentinel.py +21 -17
  59. coredis/response/_callbacks/server.py +36 -14
  60. coredis/response/_callbacks/sets.py +3 -4
  61. coredis/response/_callbacks/sorted_set.py +27 -24
  62. coredis/response/_callbacks/streams.py +22 -13
  63. coredis/response/_callbacks/strings.py +7 -6
  64. coredis/response/_callbacks/vector_sets.py +126 -0
  65. coredis/response/types.py +13 -4
  66. coredis/sentinel.py +1 -1
  67. coredis/stream.py +4 -3
  68. coredis/tokens.py +343 -16
  69. coredis/typing.py +432 -79
  70. {coredis-4.24.0.dist-info → coredis-5.0.0rc1.dist-info}/METADATA +4 -5
  71. coredis-5.0.0rc1.dist-info/RECORD +95 -0
  72. coredis/client/keydb.py +0 -336
  73. coredis/pipeline.pyi +0 -2103
  74. coredis-4.24.0.dist-info/RECORD +0 -93
  75. {coredis-4.24.0.dist-info → coredis-5.0.0rc1.dist-info}/WHEEL +0 -0
  76. {coredis-4.24.0.dist-info → coredis-5.0.0rc1.dist-info}/licenses/LICENSE +0 -0
  77. {coredis-4.24.0.dist-info → coredis-5.0.0rc1.dist-info}/top_level.txt +0 -0
coredis/pipeline.py CHANGED
@@ -5,21 +5,21 @@ import functools
5
5
  import inspect
6
6
  import sys
7
7
  import textwrap
8
+ import warnings
8
9
  from abc import ABCMeta
9
10
  from concurrent.futures import CancelledError
10
- from dataclasses import dataclass, field
11
- from itertools import chain
12
11
  from types import TracebackType
13
12
  from typing import Any, cast
14
13
 
15
- from wrapt import ObjectProxy # type: ignore
14
+ from deprecated.sphinx import deprecated
16
15
 
17
- from coredis._utils import b, hash_slot
18
- from coredis.client import Client, Redis, RedisCluster
16
+ from coredis._utils import b, hash_slot, nativestr
17
+ from coredis.client import Client, RedisCluster
18
+ from coredis.commands import CommandRequest, CommandResponseT
19
19
  from coredis.commands._key_spec import KeySpec
20
20
  from coredis.commands.constants import CommandName, NodeFlag
21
21
  from coredis.commands.script import Script
22
- from coredis.connection import BaseConnection, ClusterConnection, CommandInvocation
22
+ from coredis.connection import BaseConnection, ClusterConnection, CommandInvocation, Connection
23
23
  from coredis.exceptions import (
24
24
  AskError,
25
25
  ClusterCrossSlotError,
@@ -48,16 +48,23 @@ from coredis.response._callbacks import (
48
48
  from coredis.retry import ConstantRetryPolicy, retryable
49
49
  from coredis.typing import (
50
50
  AnyStr,
51
+ Awaitable,
51
52
  Callable,
52
- Coroutine,
53
- Generic,
53
+ ExecutionParameters,
54
+ Generator,
54
55
  Iterable,
55
56
  KeyT,
56
57
  Parameters,
57
58
  ParamSpec,
59
+ RedisCommand,
60
+ RedisCommandP,
61
+ RedisValueT,
58
62
  ResponseType,
63
+ Self,
59
64
  StringT,
65
+ T_co,
60
66
  TypeVar,
67
+ Unpack,
61
68
  ValueT,
62
69
  )
63
70
 
@@ -75,42 +82,84 @@ UNWATCH_COMMANDS = {CommandName.DISCARD, CommandName.EXEC, CommandName.UNWATCH}
75
82
 
76
83
 
77
84
  def wrap_pipeline_method(
78
- kls: PipelineMeta, func: Callable[P, Coroutine[Any, Any, R]]
79
- ) -> Callable[P, Coroutine[Any, Any, R]]:
85
+ kls: PipelineMeta, func: Callable[P, Awaitable[R]]
86
+ ) -> Callable[P, Awaitable[R]]:
80
87
  @functools.wraps(func)
81
- async def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
82
- return await func(*args, **kwargs)
88
+ def wrapper(*args: P.args, **kwargs: P.kwargs) -> Awaitable[R]:
89
+ return func(*args, **kwargs)
83
90
 
84
91
  wrapper.__annotations__ = wrapper.__annotations__.copy()
85
92
  wrapper.__annotations__["return"] = kls
86
93
  wrapper.__doc__ = textwrap.dedent(wrapper.__doc__ or "")
87
94
  wrapper.__doc__ = f"""
88
95
  Pipeline variant of :meth:`coredis.Redis.{func.__name__}` that does not execute
89
- immediately and instead pushes the command into a stack for batch send
90
- and returns the instance of :class:`{kls.__name__}` itself.
96
+ immediately and instead pushes the command into a stack for batch send.
91
97
 
92
- To fetch the return values call :meth:`{kls.__name__}.execute` to process the pipeline
93
- and retrieve responses for the commands executed in the pipeline.
98
+ The return value can be retrieved either as part of the tuple returned by
99
+ :meth:`~{kls.__name__}.execute` or by awaiting the :class:`~coredis.commands.CommandRequest`
100
+ instance after calling :meth:`~{kls.__name__}.execute`
94
101
 
95
102
  {wrapper.__doc__}
96
103
  """
97
104
  return wrapper
98
105
 
99
106
 
100
- @dataclass
101
- class PipelineCommand:
102
- command: bytes
103
- args: tuple[ValueT, ...]
104
- callback: Callable[..., Any] = NoopCallback() # type: ignore
105
- options: dict[str, ValueT | None] = field(default_factory=dict)
106
- request: asyncio.Future[ResponseType] | None = None
107
+ class PipelineCommandRequest(CommandRequest[CommandResponseT]):
108
+ client: Pipeline[Any] | ClusterPipeline[Any]
109
+ queued_response: Awaitable[bytes | str]
107
110
 
111
+ def __init__(
112
+ self,
113
+ client: Pipeline[Any] | ClusterPipeline[Any],
114
+ name: bytes,
115
+ *arguments: ValueT,
116
+ callback: Callable[..., CommandResponseT],
117
+ execution_parameters: ExecutionParameters | None = None,
118
+ ) -> None:
119
+ super().__init__(
120
+ client, name, *arguments, callback=callback, execution_parameters=execution_parameters
121
+ )
122
+ if (client.watching or name == CommandName.WATCH) and not client.explicit_transaction:
123
+ self.response = client.immediate_execute_command(
124
+ self, callback=callback, **self.execution_parameters
125
+ )
126
+ else:
127
+ client.pipeline_execute_command(self) # type: ignore[arg-type]
128
+
129
+ async def __backward_compatibility_return(self) -> Pipeline[Any] | ClusterPipeline[Any]:
130
+ return self.client
108
131
 
109
- @dataclass
110
- class ClusterPipelineCommand(PipelineCommand):
111
- position: int = 0
112
- result: Any | None = None # type: ignore
113
- asking: bool = False
132
+ def __await__(self) -> Generator[None, None, CommandResponseT]:
133
+ if hasattr(self, "response"):
134
+ return self.response.__await__()
135
+ else:
136
+ warnings.warn(
137
+ """
138
+ Awaiting a pipeline command response before calling `execute()` on the pipeline instance
139
+ has no effect and returns the pipeline instance itself for backward compatibility.
140
+
141
+ To add commands to a pipeline simply call the methods synchronously. The awaitable response
142
+ can be awaited after calling `execute()` to retrieve a statically typed response if required.
143
+ """
144
+ )
145
+ return self.__backward_compatibility_return().__await__() # type: ignore[return-value]
146
+
147
+
148
+ class ClusterPipelineCommandRequest(PipelineCommandRequest[CommandResponseT]):
149
+ def __init__(
150
+ self,
151
+ client: ClusterPipeline[Any],
152
+ name: bytes,
153
+ *arguments: ValueT,
154
+ callback: Callable[..., CommandResponseT],
155
+ execution_parameters: ExecutionParameters | None = None,
156
+ ) -> None:
157
+ self.position: int = 0
158
+ self.result: Any | None = None
159
+ self.asking: bool = False
160
+ super().__init__(
161
+ client, name, *arguments, callback=callback, execution_parameters=execution_parameters
162
+ )
114
163
 
115
164
 
116
165
  class NodeCommands:
@@ -121,16 +170,18 @@ class NodeCommands:
121
170
  in_transaction: bool = False,
122
171
  timeout: float | None = None,
123
172
  ):
124
- self.client = client
173
+ self.client: RedisCluster[Any] = client
125
174
  self.connection = connection
126
- self.commands: list[ClusterPipelineCommand] = []
175
+ self.commands: list[ClusterPipelineCommandRequest[Any]] = []
127
176
  self.in_transaction = in_transaction
128
177
  self.timeout = timeout
178
+ self.multi_cmd: asyncio.Future[ResponseType] | None = None
179
+ self.exec_cmd: asyncio.Future[ResponseType] | None = None
129
180
 
130
- def extend(self, c: list[ClusterPipelineCommand]) -> None:
181
+ def extend(self, c: list[ClusterPipelineCommandRequest[Any]]) -> None:
131
182
  self.commands.extend(c)
132
183
 
133
- def append(self, c: ClusterPipelineCommand) -> None:
184
+ def append(self, c: ClusterPipelineCommandRequest[Any]) -> None:
134
185
  self.commands.append(c)
135
186
 
136
187
  async def write(self) -> None:
@@ -146,20 +197,32 @@ class NodeCommands:
146
197
  # build up all commands into a single request to increase network perf
147
198
  # send all the commands and catch connection and timeout errors.
148
199
  try:
200
+ if self.in_transaction:
201
+ self.multi_cmd = await connection.create_request(
202
+ CommandName.MULTI, timeout=self.timeout
203
+ )
149
204
  requests = await connection.create_requests(
150
205
  [
151
206
  CommandInvocation(
152
- cmd.command,
153
- cmd.args,
154
- (bool(cmd.options.get("decode")) if cmd.options.get("decode") else None),
207
+ cmd.name,
208
+ cmd.arguments,
209
+ (
210
+ bool(cmd.execution_parameters.get("decode"))
211
+ if cmd.execution_parameters.get("decode")
212
+ else None
213
+ ),
155
214
  None,
156
215
  )
157
216
  for cmd in commands
158
217
  ],
159
218
  timeout=self.timeout,
160
219
  )
220
+ if self.in_transaction:
221
+ self.exec_cmd = await connection.create_request(
222
+ CommandName.EXEC, timeout=self.timeout
223
+ )
161
224
  for i, cmd in enumerate(commands):
162
- cmd.request = requests[i]
225
+ cmd.response = requests[i]
163
226
  except (ConnectionError, TimeoutError) as e:
164
227
  for c in commands:
165
228
  c.result = e
@@ -167,51 +230,50 @@ class NodeCommands:
167
230
  async def read(self) -> None:
168
231
  connection = self.connection
169
232
  success = True
170
-
233
+ multi_result = None
234
+ if self.multi_cmd:
235
+ multi_result = await self.multi_cmd
236
+ success = multi_result in {b"OK", "OK"}
171
237
  for c in self.commands:
172
238
  if c.result is None:
173
239
  try:
174
- c.result = await c.request if c.request else None
240
+ c.result = await c.response if c.response else None
175
241
  except ExecAbortError:
176
242
  raise
177
243
  except (ConnectionError, TimeoutError, RedisError) as e:
178
244
  success = False
179
245
  c.result = e
180
-
181
- if self.in_transaction:
182
- transaction_result = []
246
+ if self.in_transaction and self.exec_cmd:
183
247
  if success:
184
- for c in self.commands:
185
- if c.command == CommandName.EXEC:
186
- if c.result:
187
- transaction_result = cast(list[ResponseType], c.result)
188
- else:
189
- raise WatchError("Watched variable changed.")
248
+ res = await self.exec_cmd
249
+ if res:
250
+ transaction_result = cast(list[ResponseType], res)
251
+ else:
252
+ raise WatchError("Watched variable changed.")
190
253
  for idx, c in enumerate(
191
254
  [
192
255
  _c
193
256
  for _c in sorted(self.commands, key=lambda x: x.position)
194
- if _c.command not in {CommandName.MULTI, CommandName.EXEC}
257
+ if _c.name not in {CommandName.MULTI, CommandName.EXEC}
195
258
  ]
196
259
  ):
197
260
  if isinstance(c.callback, AsyncPreProcessingCallback):
198
- await c.callback.pre_process(
199
- self.client, transaction_result[idx], **c.options
200
- )
261
+ await c.callback.pre_process(self.client, transaction_result[idx])
201
262
  c.result = c.callback(
202
263
  transaction_result[idx],
203
264
  version=connection.protocol_version,
204
- **c.options,
205
265
  )
206
- elif isinstance(self.commands[0].result, BaseException):
207
- raise self.commands[0].result
266
+ elif isinstance(multi_result, BaseException):
267
+ raise multi_result
208
268
 
209
269
 
210
270
  class PipelineMeta(ABCMeta):
211
271
  RESULT_CALLBACKS: dict[str, Callable[..., Any]]
212
272
  NODES_FLAGS: dict[str, NodeFlag]
213
273
 
214
- def __new__(cls, name: str, bases: tuple[type, ...], namespace: dict[str, object]):
274
+ def __new__(
275
+ cls, name: str, bases: tuple[type, ...], namespace: dict[str, object]
276
+ ) -> PipelineMeta:
215
277
  kls = super().__new__(cls, name, bases, namespace)
216
278
 
217
279
  for name, method in PipelineMeta.get_methods(kls).items():
@@ -226,7 +288,9 @@ class PipelineMeta(ABCMeta):
226
288
 
227
289
 
228
290
  class ClusterPipelineMeta(PipelineMeta):
229
- def __new__(cls, name: str, bases: tuple[type, ...], namespace: dict[str, object]):
291
+ def __new__(
292
+ cls, name: str, bases: tuple[type, ...], namespace: dict[str, object]
293
+ ) -> PipelineMeta:
230
294
  kls = super().__new__(cls, name, bases, namespace)
231
295
  for name, method in ClusterPipelineMeta.get_methods(kls).items():
232
296
  cmd = getattr(method, "__coredis_command", None)
@@ -242,7 +306,7 @@ class ClusterPipelineMeta(PipelineMeta):
242
306
  return kls
243
307
 
244
308
 
245
- class PipelineImpl(Client[AnyStr], metaclass=PipelineMeta):
309
+ class Pipeline(Client[AnyStr], metaclass=PipelineMeta):
246
310
  """Pipeline for the Redis class"""
247
311
 
248
312
  """
@@ -256,14 +320,10 @@ class PipelineImpl(Client[AnyStr], metaclass=PipelineMeta):
256
320
 
257
321
  Any command raising an exception does *not* halt the execution of
258
322
  subsequent commands in the pipeline. Instead, the exception is caught
259
- and its instance is placed into the response list returned by execute().
260
- Code iterating over the response list should be able to deal with an
261
- instance of an exception as a potential value. In general, these will be
262
- ResponseError exceptions, such as those raised when issuing a command
263
- on a key of a different datatype.
323
+ and its instance is placed into the response list returned by await pipeline.execute()
264
324
  """
265
325
 
266
- command_stack: list[PipelineCommand]
326
+ command_stack: list[PipelineCommandRequest[Any]]
267
327
  connection_pool: ConnectionPool
268
328
 
269
329
  def __init__(
@@ -273,9 +333,9 @@ class PipelineImpl(Client[AnyStr], metaclass=PipelineMeta):
273
333
  watches: Parameters[KeyT] | None = None,
274
334
  timeout: float | None = None,
275
335
  ) -> None:
276
- self.client = client
336
+ self.client: Client[AnyStr] = client
277
337
  self.connection_pool = client.connection_pool
278
- self.connection = None
338
+ self.connection: Connection | None = None
279
339
  self._transaction = transaction
280
340
  self.watching = False
281
341
  self.watches: Parameters[KeyT] | None = watches or None
@@ -284,9 +344,10 @@ class PipelineImpl(Client[AnyStr], metaclass=PipelineMeta):
284
344
  self.explicit_transaction = False
285
345
  self.scripts: set[Script[AnyStr]] = set()
286
346
  self.timeout = timeout
347
+ self.type_adapter = client.type_adapter
287
348
 
288
- async def __aenter__(self) -> PipelineImpl[AnyStr]:
289
- return self
349
+ async def __aenter__(self) -> Pipeline[AnyStr]:
350
+ return await self.get_instance()
290
351
 
291
352
  async def __aexit__(
292
353
  self,
@@ -294,7 +355,10 @@ class PipelineImpl(Client[AnyStr], metaclass=PipelineMeta):
294
355
  exc_value: BaseException | None,
295
356
  traceback: TracebackType | None,
296
357
  ) -> None:
297
- await self.reset_pipeline()
358
+ await self.clear()
359
+
360
+ def __await__(self) -> Generator[Any, Any, Pipeline[AnyStr]]:
361
+ return self.get_instance().__await__()
298
362
 
299
363
  def __len__(self) -> int:
300
364
  return len(self.command_stack)
@@ -302,16 +366,37 @@ class PipelineImpl(Client[AnyStr], metaclass=PipelineMeta):
302
366
  def __bool__(self) -> bool:
303
367
  return True
304
368
 
305
- async def reset_pipeline(self) -> None:
369
+ async def get_instance(self) -> Pipeline[AnyStr]:
370
+ return self
371
+
372
+ def create_request(
373
+ self,
374
+ name: bytes,
375
+ *arguments: ValueT,
376
+ callback: Callable[..., T_co],
377
+ execution_parameters: ExecutionParameters | None = None,
378
+ ) -> CommandRequest[T_co]:
379
+ """
380
+ :meta private:
381
+ """
382
+ return PipelineCommandRequest(
383
+ self, name, *arguments, callback=callback, execution_parameters=execution_parameters
384
+ )
385
+
386
+ async def clear(self) -> None:
387
+ """
388
+ Empties the pipeline and resets / returns the connection
389
+ back to the pool
390
+ """
306
391
  self.command_stack.clear()
307
- self.scripts: set[Script[AnyStr]] = set()
392
+ self.scripts = set()
308
393
  # make sure to reset the connection state in the event that we were
309
394
  # watching something
310
395
 
311
396
  if self.watching and self.connection:
312
397
  try:
313
398
  # call this manually since our unwatch or
314
- # immediate_execute_command methods can call reset_pipeline()
399
+ # immediate_execute_command methods can call clear()
315
400
  request = await self.connection.create_request(CommandName.UNWATCH, decode=False)
316
401
  await request
317
402
  except ConnectionError:
@@ -328,6 +413,22 @@ class PipelineImpl(Client[AnyStr], metaclass=PipelineMeta):
328
413
  self.connection_pool.release(self.connection)
329
414
  self.connection = None
330
415
 
416
+ #: :meta private:
417
+ reset_pipeline = clear
418
+
419
+ @deprecated(
420
+ "The reset method in pipelines clashes with the redis ``RESET`` command. Use :meth:`clear` instead",
421
+ "5.0.0",
422
+ )
423
+ def reset(self) -> CommandRequest[None]:
424
+ """
425
+ Empties the pipeline and resets / returns the connection
426
+ back to the pool
427
+
428
+ :meta private:
429
+ """
430
+ return self.clear() # type: ignore
431
+
331
432
  def multi(self) -> None:
332
433
  """
333
434
  Starts a transactional block of the pipeline after WATCH commands
@@ -341,27 +442,20 @@ class PipelineImpl(Client[AnyStr], metaclass=PipelineMeta):
341
442
  raise RedisError("Commands without an initial WATCH have already been issued")
342
443
  self.explicit_transaction = True
343
444
 
344
- async def execute_command(
445
+ def execute_command(
345
446
  self,
346
- command: bytes,
347
- *args: ValueT,
348
- callback: Callable[..., Any] = NoopCallback(), # type: ignore
349
- **options: ValueT | None,
350
- ) -> PipelineImpl[AnyStr]: # type: ignore
351
- if (self.watching or command == CommandName.WATCH) and not self.explicit_transaction:
352
- return await self.immediate_execute_command(
353
- command, *args, callback=callback, **options
354
- ) # type: ignore
355
-
356
- return self.pipeline_execute_command(command, *args, callback=callback, **options)
447
+ command: RedisCommandP,
448
+ callback: Callable[..., R] = NoopCallback(),
449
+ **options: Unpack[ExecutionParameters],
450
+ ) -> Awaitable[R]:
451
+ raise NotImplementedError
357
452
 
358
453
  async def immediate_execute_command(
359
454
  self,
360
- command: bytes,
361
- *args: ValueT,
362
- callback: Callable[..., Any] = NoopCallback(), # type: ignore
363
- **kwargs: ValueT | None,
364
- ) -> Any: # type: ignore
455
+ command: RedisCommandP,
456
+ callback: Callable[..., R] = NoopCallback(),
457
+ **kwargs: Unpack[ExecutionParameters],
458
+ ) -> R:
365
459
  """
366
460
  Executes a command immediately, but don't auto-retry on a
367
461
  ConnectionError if we're already WATCHing a variable. Used when
@@ -372,17 +466,16 @@ class PipelineImpl(Client[AnyStr], metaclass=PipelineMeta):
372
466
  """
373
467
  conn = self.connection
374
468
  # if this is the first call, we need a connection
375
-
376
469
  if not conn:
377
470
  conn = await self.connection_pool.get_connection()
378
471
  self.connection = conn
379
472
  try:
380
- request = await conn.create_request(command, *args, decode=kwargs.get("decode"))
381
-
473
+ request = await conn.create_request(
474
+ command.name, *command.arguments, decode=kwargs.get("decode")
475
+ )
382
476
  return callback(
383
477
  await request,
384
478
  version=conn.protocol_version,
385
- **kwargs,
386
479
  )
387
480
  except (ConnectionError, TimeoutError):
388
481
  conn.disconnect()
@@ -390,26 +483,26 @@ class PipelineImpl(Client[AnyStr], metaclass=PipelineMeta):
390
483
  # if we're not already watching, we can safely retry the command
391
484
  try:
392
485
  if not self.watching:
393
- request = await conn.create_request(command, *args, decode=kwargs.get("decode"))
394
- return callback(await request, version=conn.protocol_version, **kwargs)
486
+ request = await conn.create_request(
487
+ command.name, *command.arguments, decode=kwargs.get("decode")
488
+ )
489
+ return callback(await request, version=conn.protocol_version)
490
+ raise
395
491
  except ConnectionError:
396
492
  # the retry failed so cleanup.
397
493
  conn.disconnect()
398
- await self.reset_pipeline()
494
+ await self.clear()
399
495
  raise
400
496
  finally:
401
- if command in UNWATCH_COMMANDS:
497
+ if command.name in UNWATCH_COMMANDS:
402
498
  self.watching = False
403
- elif command == CommandName.WATCH:
499
+ elif command.name == CommandName.WATCH:
404
500
  self.watching = True
405
501
 
406
502
  def pipeline_execute_command(
407
503
  self,
408
- command: bytes,
409
- *args: ValueT,
410
- callback: Callable[..., Any],
411
- **options: ValueT | None,
412
- ) -> PipelineImpl[AnyStr]:
504
+ command: PipelineCommandRequest[R],
505
+ ) -> None:
413
506
  """
414
507
  Stages a command to be executed next execute() invocation
415
508
 
@@ -423,52 +516,34 @@ class PipelineImpl(Client[AnyStr], metaclass=PipelineMeta):
423
516
 
424
517
  :meta private:
425
518
  """
426
- self.command_stack.append(
427
- PipelineCommand(command=command, args=args, options=options, callback=callback)
428
- )
429
-
430
- return self
519
+ self.command_stack.append(command)
431
520
 
432
521
  async def _execute_transaction(
433
522
  self,
434
523
  connection: BaseConnection,
435
- commands: list[PipelineCommand],
524
+ commands: list[PipelineCommandRequest[Any]],
436
525
  raise_on_error: bool,
437
526
  ) -> tuple[Any, ...]:
438
- cmds = list(
439
- chain(
440
- [
441
- PipelineCommand(
442
- command=CommandName.MULTI,
443
- args=(),
444
- )
445
- ],
446
- commands,
447
- [
448
- PipelineCommand(
449
- command=CommandName.EXEC,
450
- args=(),
451
- )
452
- ],
453
- )
454
- )
455
- if self.watches:
456
- await self.watch(*self.watches)
457
-
527
+ multi_cmd = await connection.create_request(CommandName.MULTI, timeout=self.timeout)
458
528
  requests = await connection.create_requests(
459
529
  [
460
530
  CommandInvocation(
461
- cmd.command,
462
- cmd.args,
463
- (bool(cmd.options.get("decode")) if cmd.options.get("decode") else None),
531
+ cmd.name,
532
+ cmd.arguments,
533
+ (
534
+ bool(cmd.execution_parameters.get("decode"))
535
+ if cmd.execution_parameters.get("decode")
536
+ else None
537
+ ),
464
538
  None,
465
539
  )
466
- for cmd in cmds
540
+ for cmd in commands
467
541
  ],
468
542
  timeout=self.timeout,
469
543
  )
470
- for i, cmd in enumerate(cmds):
471
- cmd.request = requests[i]
544
+ exec_cmd = await connection.create_request(CommandName.EXEC, timeout=self.timeout)
545
+ for i, cmd in enumerate(commands):
546
+ cmd.queued_response = cast(Awaitable[StringT], requests[i])
472
547
 
473
548
  errors: list[tuple[int, RedisError | None]] = []
474
549
  multi_failed = False
@@ -478,30 +553,32 @@ class PipelineImpl(Client[AnyStr], metaclass=PipelineMeta):
478
553
  # so that we read all the additional command messages from
479
554
  # the socket
480
555
  try:
481
- await cmds[0].request if cmds[0].request else None
556
+ await multi_cmd
482
557
  except RedisError:
483
558
  multi_failed = True
484
559
  errors.append((0, cast(RedisError, sys.exc_info()[1])))
485
560
 
486
561
  # and all the other commands
487
- for i, cmd in enumerate(cmds[1:-1]):
562
+ for i, cmd in enumerate(commands):
488
563
  try:
489
- if cmd.request:
490
- assert (await cmd.request) in {b"QUEUED", "QUEUED"}
564
+ if cmd.queued_response:
565
+ assert (await cmd.queued_response) in {b"QUEUED", "QUEUED"}
491
566
  except RedisError:
492
567
  ex = cast(RedisError, sys.exc_info()[1])
493
- self.annotate_exception(ex, i + 1, cmd.command, cmd.args)
568
+ self.annotate_exception(ex, i + 1, cmd.name, cmd.arguments)
494
569
  errors.append((i, ex))
495
570
 
496
571
  response: list[ResponseType]
497
572
  try:
498
573
  response = cast(
499
574
  list[ResponseType],
500
- await cmds[-1].request if cmds[-1].request else None,
575
+ await exec_cmd if exec_cmd else None,
501
576
  )
502
577
  except (ExecAbortError, ResponseError):
503
578
  if self.explicit_transaction and not multi_failed:
504
- await self.immediate_execute_command(CommandName.DISCARD, callback=BoolCallback())
579
+ await self.immediate_execute_command(
580
+ RedisCommand(name=CommandName.DISCARD, arguments=()), callback=BoolCallback()
581
+ )
505
582
 
506
583
  if errors and errors[0][1]:
507
584
  raise errors[0][1]
@@ -529,24 +606,30 @@ class PipelineImpl(Client[AnyStr], metaclass=PipelineMeta):
529
606
  for r, cmd in zip(response, commands):
530
607
  if not isinstance(r, Exception):
531
608
  if isinstance(cmd.callback, AsyncPreProcessingCallback):
532
- await cmd.callback.pre_process(self.client, r, **cmd.options)
533
- r = cmd.callback(r, version=connection.protocol_version, **cmd.options)
609
+ await cmd.callback.pre_process(self.client, r)
610
+ r = cmd.callback(r, version=connection.protocol_version, **cmd.execution_parameters)
611
+ cmd.response = asyncio.get_running_loop().create_future()
612
+ cmd.response.set_result(r)
534
613
  data.append(r)
535
614
  return tuple(data)
536
615
 
537
616
  async def _execute_pipeline(
538
617
  self,
539
618
  connection: BaseConnection,
540
- commands: list[PipelineCommand],
619
+ commands: list[PipelineCommandRequest[Any]],
541
620
  raise_on_error: bool,
542
621
  ) -> tuple[Any, ...]:
543
622
  # build up all commands into a single request to increase network perf
544
623
  requests = await connection.create_requests(
545
624
  [
546
625
  CommandInvocation(
547
- cmd.command,
548
- cmd.args,
549
- (bool(cmd.options.get("decode")) if cmd.options.get("decode") else None),
626
+ cmd.name,
627
+ cmd.arguments,
628
+ (
629
+ bool(cmd.execution_parameters.get("decode"))
630
+ if cmd.execution_parameters.get("decode")
631
+ else None
632
+ ),
550
633
  None,
551
634
  )
552
635
  for cmd in commands
@@ -554,35 +637,38 @@ class PipelineImpl(Client[AnyStr], metaclass=PipelineMeta):
554
637
  timeout=self.timeout,
555
638
  )
556
639
  for i, cmd in enumerate(commands):
557
- cmd.request = requests[i]
640
+ cmd.response = requests[i]
558
641
 
559
642
  response: list[Any] = []
560
-
561
643
  for cmd in commands:
562
644
  try:
563
- res = await cmd.request if cmd.request else None
645
+ res = await cmd.response if cmd.response else None
564
646
  if isinstance(cmd.callback, AsyncPreProcessingCallback):
565
- await cmd.callback.pre_process(self.client, res, **cmd.options)
566
- response.append(
567
- cmd.callback(
568
- res,
569
- version=connection.protocol_version,
570
- **cmd.options,
571
- )
647
+ await cmd.callback.pre_process(self.client, res, **cmd.execution_parameters)
648
+ resp = cmd.callback(
649
+ res,
650
+ version=connection.protocol_version,
651
+ **cmd.execution_parameters,
572
652
  )
573
- except ResponseError:
653
+ cmd.response = asyncio.get_event_loop().create_future()
654
+ cmd.response.set_result(resp)
655
+ response.append(resp)
656
+ except ResponseError as re:
657
+ cmd.response = asyncio.get_event_loop().create_future()
658
+ cmd.response.set_exception(re)
574
659
  response.append(sys.exc_info()[1])
575
-
576
660
  if raise_on_error:
577
661
  self.raise_first_error(commands, response)
578
662
 
579
663
  return tuple(response)
580
664
 
581
- def raise_first_error(self, commands: list[PipelineCommand], response: ResponseType) -> None:
665
+ def raise_first_error(
666
+ self, commands: list[PipelineCommandRequest[Any]], response: ResponseType
667
+ ) -> None:
582
668
  assert isinstance(response, list)
583
669
  for i, r in enumerate(response):
584
670
  if isinstance(r, RedisError):
585
- self.annotate_exception(r, i + 1, commands[i].command, commands[i].args)
671
+ self.annotate_exception(r, i + 1, commands[i].name, commands[i].arguments)
586
672
  raise r
587
673
 
588
674
  def annotate_exception(
@@ -590,7 +676,7 @@ class PipelineImpl(Client[AnyStr], metaclass=PipelineMeta):
590
676
  exception: RedisError | None,
591
677
  number: int,
592
678
  command: bytes,
593
- args: Iterable[ValueT],
679
+ args: Iterable[RedisValueT],
594
680
  ) -> None:
595
681
  if exception:
596
682
  cmd = command.decode("latin-1")
@@ -598,21 +684,22 @@ class PipelineImpl(Client[AnyStr], metaclass=PipelineMeta):
598
684
  msg = f"Command # {number} ({cmd} {args}) of pipeline caused error: {str(exception.args[0])}"
599
685
  exception.args = (msg,) + exception.args[1:]
600
686
 
601
- async def load_scripts(self):
687
+ async def load_scripts(self) -> None:
602
688
  # make sure all scripts that are about to be run on this pipeline exist
603
689
  scripts = list(self.scripts)
604
690
  immediate = self.immediate_execute_command
605
691
  shas = [s.sha for s in scripts]
606
692
  # we can't use the normal script_* methods because they would just
607
693
  # get buffered in the pipeline.
608
- exists = await immediate(CommandName.SCRIPT_EXISTS, *shas, callback=BoolsCallback())
694
+ exists = await immediate(
695
+ RedisCommand(CommandName.SCRIPT_EXISTS, tuple(shas)), callback=BoolsCallback()
696
+ )
609
697
 
610
698
  if not all(exists):
611
699
  for s, exist in zip(scripts, exists):
612
700
  if not exist:
613
701
  s.sha = await immediate(
614
- CommandName.SCRIPT_LOAD,
615
- s.script,
702
+ RedisCommand(CommandName.SCRIPT_LOAD, (s.script,)),
616
703
  callback=AnyStrCallback[AnyStr](),
617
704
  )
618
705
 
@@ -635,7 +722,7 @@ class PipelineImpl(Client[AnyStr], metaclass=PipelineMeta):
635
722
 
636
723
  if not conn:
637
724
  conn = await self.connection_pool.get_connection()
638
- # assign to self.connection so reset_pipeline() releases the connection
725
+ # assign to self.connection so clear() releases the connection
639
726
  # back to the pool after we're done
640
727
  self.connection = conn
641
728
 
@@ -657,36 +744,31 @@ class PipelineImpl(Client[AnyStr], metaclass=PipelineMeta):
657
744
 
658
745
  return await exec(conn, stack, raise_on_error)
659
746
  finally:
660
- await self.reset_pipeline()
747
+ await self.clear()
661
748
 
662
- async def watch(self, *keys: KeyT) -> bool:
749
+ def watch(self, *keys: KeyT) -> CommandRequest[bool]:
663
750
  """
664
- Watches the values at keys ``keys``
751
+ Watches the values at ``keys`` for change. Commands issues after this call
752
+ will be executed immediately and should be awaited. To switch back to
753
+ pipeline buffering mode, call :meth:`multi`.
665
754
  """
666
-
667
755
  if self.explicit_transaction:
668
756
  raise RedisError("Cannot issue a WATCH after a MULTI")
669
757
 
670
- return await self.immediate_execute_command(
671
- CommandName.WATCH, *keys, callback=SimpleStringCallback()
672
- )
673
-
674
- async def unwatch(self) -> bool:
675
- """Unwatches all previously specified keys"""
758
+ return self.create_request(CommandName.WATCH, *keys, callback=SimpleStringCallback())
676
759
 
677
- return (
678
- await self.immediate_execute_command(
679
- CommandName.UNWATCH, callback=SimpleStringCallback()
680
- )
681
- if self.watching
682
- else True
683
- )
760
+ def unwatch(self) -> CommandRequest[bool]:
761
+ """
762
+ Removes watches from any previously specified keys and returns the pipeline
763
+ to buffered mode.
764
+ """
765
+ return self.create_request(CommandName.UNWATCH, callback=SimpleStringCallback())
684
766
 
685
767
 
686
- class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
768
+ class ClusterPipeline(Client[AnyStr], metaclass=ClusterPipelineMeta):
687
769
  client: RedisCluster[AnyStr]
688
770
  connection_pool: ClusterConnectionPool
689
- command_stack: list[ClusterPipelineCommand]
771
+ command_stack: list[ClusterPipelineCommandRequest[Any]]
690
772
 
691
773
  RESULT_CALLBACKS: dict[str, Callable[..., Any]] = {}
692
774
  NODES_FLAGS: dict[str, NodeFlag] = {}
@@ -711,16 +793,38 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
711
793
  self.explicit_transaction = False
712
794
  self.cache = None # not implemented.
713
795
  self.timeout = timeout
796
+ self.type_adapter = client.type_adapter
714
797
 
715
- async def watch(self, *keys: KeyT) -> bool:
798
+ def create_request(
799
+ self,
800
+ name: bytes,
801
+ *arguments: ValueT,
802
+ callback: Callable[..., T_co],
803
+ execution_parameters: ExecutionParameters | None = None,
804
+ ) -> CommandRequest[T_co]:
805
+ """
806
+ :meta private:
807
+ """
808
+ return ClusterPipelineCommandRequest(
809
+ self, name, *arguments, callback=callback, execution_parameters=execution_parameters
810
+ )
811
+
812
+ def watch(self, *keys: KeyT) -> CommandRequest[bool]:
813
+ """
814
+ Watches the values at ``keys`` for change. Commands issues after this call
815
+ will be executed immediately and should be awaited. To switch back to
816
+ pipeline buffering mode, call :meth:`multi`.
817
+ """
716
818
  if self.explicit_transaction:
717
819
  raise RedisError("Cannot issue a WATCH after a MULTI")
718
820
 
719
- return await self.immediate_execute_command(
720
- CommandName.WATCH, *keys, callback=SimpleStringCallback()
721
- )
821
+ return self.create_request(CommandName.WATCH, *keys, callback=SimpleStringCallback())
722
822
 
723
823
  async def unwatch(self) -> bool:
824
+ """
825
+ Removes watches from any previously specified keys and returns the pipeline
826
+ to buffered mode.
827
+ """
724
828
  if self._watched_connection:
725
829
  try:
726
830
  return await self._unwatch(self._watched_connection)
@@ -732,20 +836,21 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
732
836
  self._watched_connection = None
733
837
  return True
734
838
 
735
- def __repr__(self):
736
- return f"{type(self).__name__}"
737
-
738
- def __del__(self):
839
+ def __del__(self) -> None:
739
840
  if self._watched_connection:
740
841
  self.connection_pool.release(self._watched_connection)
741
842
 
742
- def __len__(self):
843
+ def __len__(self) -> int:
743
844
  return len(self.command_stack)
744
845
 
745
846
  def __bool__(self) -> bool:
746
847
  return True
747
848
 
748
- async def __aenter__(self) -> ClusterPipelineImpl[AnyStr]:
849
+ def __await__(self) -> Generator[None, None, Self]:
850
+ yield
851
+ return self
852
+
853
+ async def __aenter__(self) -> ClusterPipeline[AnyStr]:
749
854
  return self
750
855
 
751
856
  async def __aexit__(
@@ -754,46 +859,29 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
754
859
  exc_value: BaseException | None,
755
860
  traceback: TracebackType | None,
756
861
  ) -> None:
757
- await self.reset_pipeline()
862
+ await self.clear()
758
863
 
759
- async def execute_command(
864
+ def execute_command(
760
865
  self,
761
- command: bytes,
762
- *args: ValueT,
763
- callback: Callable[..., Any] = NoopCallback(), # type: ignore
764
- **options: ValueT | None,
765
- ) -> ClusterPipelineImpl[AnyStr]: # type: ignore
766
- if (self.watching or command == CommandName.WATCH) and not self.explicit_transaction:
767
- return await self.immediate_execute_command(
768
- command, *args, callback=callback, **options
769
- ) # type: ignore
770
- return self.pipeline_execute_command(command, *args, callback=callback, **options)
866
+ command: RedisCommandP,
867
+ callback: Callable[..., R] = NoopCallback(),
868
+ **options: Unpack[ExecutionParameters],
869
+ ) -> Awaitable[R]:
870
+ raise NotImplementedError
771
871
 
772
872
  def pipeline_execute_command(
773
873
  self,
774
- command: bytes,
775
- *args: ValueT,
776
- callback: Callable[..., Any],
777
- **options: ValueT | None,
778
- ) -> ClusterPipelineImpl[AnyStr]:
779
- self.command_stack.append(
780
- ClusterPipelineCommand(
781
- command=command,
782
- args=args,
783
- options=options,
784
- callback=callback,
785
- position=len(self.command_stack),
786
- )
787
- )
788
-
789
- return self
874
+ command: ClusterPipelineCommandRequest[Any],
875
+ ) -> None:
876
+ command.position = len(self.command_stack)
877
+ self.command_stack.append(command)
790
878
 
791
879
  def raise_first_error(self) -> None:
792
880
  for c in self.command_stack:
793
881
  r = c.result
794
882
 
795
883
  if isinstance(r, RedisError):
796
- self.annotate_exception(r, c.position + 1, c.command, c.args)
884
+ self.annotate_exception(r, c.position + 1, c.name, c.arguments)
797
885
  raise r
798
886
 
799
887
  def annotate_exception(
@@ -801,7 +889,7 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
801
889
  exception: RedisError | None,
802
890
  number: int,
803
891
  command: bytes,
804
- args: Iterable[ValueT],
892
+ args: Iterable[RedisValueT],
805
893
  ) -> None:
806
894
  if exception:
807
895
  cmd = command.decode("latin-1")
@@ -810,6 +898,7 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
810
898
  exception.args = (msg,) + exception.args[1:]
811
899
 
812
900
  async def execute(self, raise_on_error: bool = True) -> tuple[object, ...]:
901
+ """Executes all the commands in the current pipeline"""
813
902
  await self.connection_pool.initialize()
814
903
 
815
904
  if not self.command_stack:
@@ -822,10 +911,13 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
822
911
  try:
823
912
  return await execute(raise_on_error)
824
913
  finally:
825
- await self.reset_pipeline()
914
+ await self.clear()
826
915
 
827
- async def reset_pipeline(self):
828
- """Empties pipeline"""
916
+ async def clear(self) -> None:
917
+ """
918
+ Empties the pipeline and resets / returns the connection
919
+ back to the pool
920
+ """
829
921
  self.command_stack = []
830
922
 
831
923
  self.scripts: set[Script[AnyStr]] = set()
@@ -837,32 +929,49 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
837
929
  self.connection_pool.release(self._watched_connection)
838
930
  self._watched_connection = None
839
931
 
932
+ #: :meta private:
933
+ reset_pipeline = clear
934
+
935
+ @deprecated(
936
+ "The reset method in pipelines clashes with the redis ``RESET`` command. Use :meth:`clear` instead",
937
+ "5.0.0",
938
+ )
939
+ def reset(self) -> CommandRequest[None]:
940
+ """
941
+ Empties the pipeline and resets / returns the connection
942
+ back to the pool
943
+
944
+ :meta private:
945
+ """
946
+ return self.clear() # type: ignore
947
+
840
948
  @retryable(policy=ConstantRetryPolicy((ClusterDownError,), 3, 0.1))
841
949
  async def send_cluster_transaction(self, raise_on_error: bool = True) -> tuple[object, ...]:
950
+ """
951
+ :meta private:
952
+ """
842
953
  attempt = sorted(self.command_stack, key=lambda x: x.position)
843
954
  slots: set[int] = set()
844
955
  for c in attempt:
845
- slot = self._determine_slot(c.command, *c.args, **c.options)
956
+ slot = self._determine_slot(c.name, *c.arguments, **c.execution_parameters)
846
957
  if slot:
847
958
  slots.add(slot)
848
959
 
849
960
  if len(slots) > 1:
850
- raise ClusterTransactionError("Multiple nodes involved in transaction")
961
+ raise ClusterTransactionError("Multiple slots involved in transaction")
851
962
  if not slots:
852
963
  raise ClusterTransactionError("No slots found for transaction")
853
964
  node = self.connection_pool.get_node_by_slot(slots.pop())
854
965
 
855
966
  if self._watched_node and node.name != self._watched_node.name:
856
- raise ClusterTransactionError("Multiple nodes involved in transaction")
967
+ raise ClusterTransactionError("Multiple slots involved in transaction")
857
968
 
858
969
  conn = self._watched_connection or await self.connection_pool.get_connection_by_node(node)
859
970
 
860
971
  if self.watches:
861
972
  await self._watch(node, conn, self.watches)
862
973
  node_commands = NodeCommands(self.client, conn, in_transaction=True, timeout=self.timeout)
863
- node_commands.append(ClusterPipelineCommand(CommandName.MULTI, ()))
864
974
  node_commands.extend(attempt)
865
- node_commands.append(ClusterPipelineCommand(CommandName.EXEC, ()))
866
975
  self.explicit_transaction = True
867
976
 
868
977
  await node_commands.write()
@@ -876,7 +985,7 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
876
985
  # the whole transaction aborts,
877
986
  # and EXEC returns a Null reply to notify that the transaction failed.
878
987
 
879
- if node_commands.commands[-1].result is None:
988
+ if node_commands.exec_cmd and await node_commands.exec_cmd is None:
880
989
  raise WatchError
881
990
  self.connection_pool.release(conn)
882
991
 
@@ -889,7 +998,7 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
889
998
  return tuple(
890
999
  n.result
891
1000
  for n in node_commands.commands
892
- if n.command not in {CommandName.MULTI, CommandName.EXEC}
1001
+ if n.name not in {CommandName.MULTI, CommandName.EXEC}
893
1002
  )
894
1003
 
895
1004
  @retryable(policy=ConstantRetryPolicy((ClusterDownError,), 3, 0.1))
@@ -901,6 +1010,8 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
901
1010
 
902
1011
  `allow_redirections` If the pipeline should follow `ASK` & `MOVED` responses
903
1012
  automatically. If set to false it will raise RedisClusterException.
1013
+
1014
+ :meta private:
904
1015
  """
905
1016
  # the first time sending the commands we send all of the commands that were queued up.
906
1017
  # if we have to run through it again, we only retry the commands that failed.
@@ -914,7 +1025,7 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
914
1025
  for c in attempt:
915
1026
  # refer to our internal node -> slot table that tells us where a given
916
1027
  # command should route to.
917
- slot = self._determine_slot(c.command, *c.args)
1028
+ slot = self._determine_slot(c.name, *c.arguments)
918
1029
  node = self.connection_pool.get_node_by_slot(slot)
919
1030
 
920
1031
  if node.name not in nodes:
@@ -985,7 +1096,9 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
985
1096
  for c in attempt:
986
1097
  try:
987
1098
  # send each command individually like we do in the main client.
988
- c.result = await self.client.execute_command(c.command, *c.args, **c.options)
1099
+ c.result = await self.client.execute_command(
1100
+ RedisCommand(c.name, c.arguments), **c.execution_parameters
1101
+ )
989
1102
  except RedisError as e:
990
1103
  c.result = e
991
1104
 
@@ -996,8 +1109,8 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
996
1109
  r = c.result
997
1110
  if not isinstance(c.result, RedisError):
998
1111
  if isinstance(c.callback, AsyncPreProcessingCallback):
999
- await c.callback.pre_process(self.client, c.result, **c.options)
1000
- r = c.callback(c.result, version=protocol_version, **c.options)
1112
+ await c.callback.pre_process(self.client, c.result)
1113
+ r = c.callback(c.result, version=protocol_version)
1001
1114
  response.append(r)
1002
1115
 
1003
1116
  if raise_on_error:
@@ -1005,16 +1118,18 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
1005
1118
 
1006
1119
  return tuple(response)
1007
1120
 
1008
- def _determine_slot(self, command: bytes, *args: ValueT, **options: ValueT) -> int:
1121
+ def _determine_slot(
1122
+ self, command: bytes, *args: ValueT, **options: Unpack[ExecutionParameters]
1123
+ ) -> int:
1009
1124
  """Figure out what slot based on command and args"""
1010
1125
 
1011
- keys: tuple[ValueT, ...] = cast(
1012
- tuple[ValueT, ...], options.get("keys")
1013
- ) or KeySpec.extract_keys(command, *args)
1126
+ keys: tuple[RedisValueT, ...] = cast(
1127
+ tuple[RedisValueT, ...], options.get("keys")
1128
+ ) or KeySpec.extract_keys(command, *args) # type: ignore
1014
1129
 
1015
1130
  if not keys:
1016
1131
  raise RedisClusterException(
1017
- f"No way to dispatch {command} to Redis Cluster. Missing key"
1132
+ f"No way to dispatch {nativestr(command)} to Redis Cluster. Missing key"
1018
1133
  )
1019
1134
  slots = {hash_slot(b(key)) for key in keys}
1020
1135
 
@@ -1027,6 +1142,10 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
1027
1142
  raise RedisClusterException("ASK & MOVED redirection not allowed in this pipeline")
1028
1143
 
1029
1144
  def multi(self) -> None:
1145
+ """
1146
+ Starts a transactional block of the pipeline after WATCH commands
1147
+ are issued. End the transactional block with `execute`.
1148
+ """
1030
1149
  if self.explicit_transaction:
1031
1150
  raise RedisError("Cannot issue nested calls to MULTI")
1032
1151
 
@@ -1036,14 +1155,13 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
1036
1155
 
1037
1156
  async def immediate_execute_command(
1038
1157
  self,
1039
- command: bytes,
1040
- *args: ValueT,
1041
- callback: Callable[..., Any] = NoopCallback(),
1042
- **kwargs: ValueT | None,
1043
- ) -> Any:
1044
- slot = self._determine_slot(command, *args)
1158
+ command: RedisCommandP,
1159
+ callback: Callable[..., R] = NoopCallback(),
1160
+ **kwargs: Unpack[ExecutionParameters],
1161
+ ) -> R:
1162
+ slot = self._determine_slot(command.name, *command.arguments)
1045
1163
  node = self.connection_pool.get_node_by_slot(slot)
1046
- if command == CommandName.WATCH:
1164
+ if command.name == CommandName.WATCH:
1047
1165
  if self._watched_node and node.name != self._watched_node.name:
1048
1166
  raise ClusterTransactionError(
1049
1167
  "Cannot issue a watch on a different node in the same transaction"
@@ -1057,40 +1175,44 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
1057
1175
  conn = await self.connection_pool.get_connection_by_node(node)
1058
1176
 
1059
1177
  try:
1060
- request = await conn.create_request(command, *args, decode=kwargs.get("decode"))
1178
+ request = await conn.create_request(
1179
+ command.name, *command.arguments, decode=kwargs.get("decode")
1180
+ )
1061
1181
 
1062
1182
  return callback(
1063
1183
  await request,
1064
1184
  version=conn.protocol_version,
1065
- **kwargs,
1066
1185
  )
1067
1186
  except (ConnectionError, TimeoutError):
1068
1187
  conn.disconnect()
1069
1188
 
1070
1189
  try:
1071
1190
  if not self.watching:
1072
- request = await conn.create_request(command, *args, decode=kwargs.get("decode"))
1073
- return callback(await request, version=conn.protocol_version, **kwargs)
1191
+ request = await conn.create_request(
1192
+ command.name, *command.arguments, decode=kwargs.get("decode")
1193
+ )
1194
+ return callback(await request, version=conn.protocol_version)
1195
+ else:
1196
+ raise
1074
1197
  except ConnectionError:
1075
1198
  # the retry failed so cleanup.
1076
1199
  conn.disconnect()
1077
- await self.reset_pipeline()
1200
+ await self.clear()
1078
1201
  raise
1079
1202
  finally:
1080
- if command in UNWATCH_COMMANDS:
1203
+ release = True
1204
+ if command.name in UNWATCH_COMMANDS:
1081
1205
  self.watching = False
1082
- elif command == CommandName.WATCH:
1206
+ elif command.name == CommandName.WATCH:
1083
1207
  self.watching = True
1084
- # don't release the connection if the command was a watch
1085
- return
1086
- self.connection_pool.release(conn)
1208
+ release = False
1209
+ if release:
1210
+ self.connection_pool.release(conn)
1087
1211
 
1088
- def load_scripts(self):
1212
+ def load_scripts(self) -> None:
1089
1213
  raise RedisClusterException("method load_scripts() is not implemented")
1090
1214
 
1091
1215
  async def _watch(self, node: ManagedNode, conn: BaseConnection, keys: Parameters[KeyT]) -> bool:
1092
- "Watches the values at keys ``keys``"
1093
-
1094
1216
  for key in keys:
1095
1217
  slot = self._determine_slot(CommandName.WATCH, key)
1096
1218
  dist_node = self.connection_pool.get_node_by_slot(slot)
@@ -1109,161 +1231,8 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
1109
1231
 
1110
1232
  async def _unwatch(self, conn: BaseConnection) -> bool:
1111
1233
  """Unwatches all previously specified keys"""
1234
+ if not self.watching:
1235
+ return True
1112
1236
  request = await conn.create_request(CommandName.UNWATCH, decode=False)
1113
- res = cast(StringT, await request)
1114
- return res == b"OK" if self.watching else True
1115
-
1116
-
1117
- class Pipeline(ObjectProxy, Generic[AnyStr]): # type: ignore
1118
- """
1119
- Class returned by :meth:`coredis.Redis.pipeline`
1120
-
1121
- The class exposes the redis command methods available in
1122
- :class:`~coredis.Redis`, however each of those methods returns
1123
- the instance itself and the results of the batched commands
1124
- can be retrieved by calling :meth:`execute`.
1125
- """
1126
-
1127
- __wrapped__: PipelineImpl[AnyStr]
1128
-
1129
- async def __aenter__(self) -> Pipeline[AnyStr]:
1130
- return cast(Pipeline[AnyStr], await self.__wrapped__.__aenter__())
1131
-
1132
- async def __aexit__(
1133
- self,
1134
- exc_type: type[BaseException] | None,
1135
- exc_value: BaseException | None,
1136
- traceback: TracebackType | None,
1137
- ) -> None:
1138
- await self.__wrapped__.__aexit__(exc_type, exc_value, traceback)
1139
-
1140
- @classmethod
1141
- def proxy(
1142
- cls,
1143
- client: Redis[AnyStr],
1144
- transaction: bool | None = None,
1145
- watches: Parameters[KeyT] | None = None,
1146
- timeout: float | None = None,
1147
- ) -> Pipeline[AnyStr]:
1148
- return cls(
1149
- PipelineImpl(
1150
- client,
1151
- transaction=transaction,
1152
- watches=watches,
1153
- timeout=timeout,
1154
- )
1155
- )
1156
-
1157
- def multi(self) -> None:
1158
- """
1159
- Starts a transactional block of the pipeline after WATCH commands
1160
- are issued. End the transactional block with :meth:`execute`
1161
- """
1162
- self.__wrapped__.multi() # Only here for documentation purposes.
1163
-
1164
- async def watch(self, *keys: KeyT) -> bool: # noqa
1165
- """
1166
- Watches the values at keys ``keys``
1167
- """
1168
- return await self.__wrapped__.watch(*keys) # Only here for documentation purposes.
1169
-
1170
- async def unwatch(self) -> bool: # noqa
1171
- """
1172
- Unwatches all previously specified keys
1173
- """
1174
- return await self.__wrapped__.unwatch() # Only here for documentation purposes.
1175
-
1176
- async def execute(self, raise_on_error: bool = True) -> tuple[object, ...]:
1177
- """
1178
- Executes all the commands in the current pipeline
1179
- and return the results of the individual batched commands
1180
- """
1181
-
1182
- # Only here for documentation purposes.
1183
- return await self.__wrapped__.execute(raise_on_error=raise_on_error)
1184
-
1185
- async def reset(self) -> None:
1186
- """
1187
- Resets the command stack and releases any connections acquired from the
1188
- pool
1189
- """
1190
- await self.__wrapped__.reset_pipeline()
1191
-
1192
-
1193
- class ClusterPipeline(ObjectProxy, Generic[AnyStr]): # type: ignore
1194
- """
1195
- Class returned by :meth:`coredis.RedisCluster.pipeline`
1196
-
1197
- The class exposes the redis command methods available in
1198
- :class:`~coredis.Redis`, however each of those methods returns
1199
- the instance itself and the results of the batched commands
1200
- can be retrieved by calling :meth:`execute`.
1201
- """
1202
-
1203
- __wrapped__: ClusterPipelineImpl[AnyStr]
1204
-
1205
- async def __aenter__(self) -> ClusterPipeline[AnyStr]:
1206
- return cast(ClusterPipeline[AnyStr], await self.__wrapped__.__aenter__())
1207
-
1208
- async def __aexit__(
1209
- self,
1210
- exc_type: type[BaseException] | None,
1211
- exc_value: BaseException | None,
1212
- traceback: TracebackType | None,
1213
- ) -> None:
1214
- await self.__wrapped__.__aexit__(exc_type, exc_value, traceback)
1215
-
1216
- @classmethod
1217
- def proxy(
1218
- cls,
1219
- client: RedisCluster[AnyStr],
1220
- transaction: bool | None = False,
1221
- watches: Parameters[KeyT] | None = None,
1222
- timeout: float | None = None,
1223
- ) -> ClusterPipeline[AnyStr]:
1224
- return cls(
1225
- ClusterPipelineImpl(
1226
- client,
1227
- transaction=transaction,
1228
- watches=watches,
1229
- timeout=timeout,
1230
- )
1231
- )
1232
-
1233
- def multi(self) -> None:
1234
- """
1235
- Starts a transactional block of the pipeline after WATCH commands
1236
- are issued. End the transactional block with :meth:`execute`
1237
- """
1238
- self.__wrapped__.multi() # Only here for documentation purposes.
1239
-
1240
- async def watch(self, *keys: KeyT) -> bool: # noqa
1241
- """
1242
- Watches the values at keys ``keys``
1243
-
1244
- :raises: :exc:`~coredis.exceptions.ClusterTransactionError`
1245
- if a watch is issued on a key that resides on a different
1246
- cluster node than a previous watch.
1247
- """
1248
- return await self.__wrapped__.watch(*keys) # Only here for documentation purposes.
1249
-
1250
- async def unwatch(self) -> bool: # noqa
1251
- """
1252
- Unwatches all previously specified keys
1253
- """
1254
- return await self.__wrapped__.unwatch() # Only here for documentation purposes.
1255
-
1256
- async def execute(self, raise_on_error: bool = True) -> tuple[object, ...]:
1257
- """
1258
- Executes all the commands in the current pipeline
1259
- and return the results of the individual batched commands
1260
- """
1261
- # Only here for documentation purposes.
1262
- return await self.__wrapped__.execute(raise_on_error=raise_on_error)
1263
-
1264
- async def reset(self) -> None:
1265
- """
1266
- Resets the command stack and releases any connections acquired from the
1267
- pool
1268
- """
1269
- await self.__wrapped__.reset_pipeline()
1237
+ res = cast(bytes, await request)
1238
+ return res == b"OK"