coredis 4.23.1__py3-none-any.whl → 5.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of coredis might be problematic. Click here for more details.
- coredis/__init__.py +1 -3
- coredis/_packer.py +10 -10
- coredis/_protocols.py +19 -51
- coredis/_py_311_typing.py +20 -0
- coredis/_py_312_typing.py +17 -0
- coredis/_utils.py +49 -55
- coredis/_version.py +3 -3
- coredis/cache.py +57 -82
- coredis/client/__init__.py +1 -2
- coredis/client/basic.py +129 -56
- coredis/client/cluster.py +147 -70
- coredis/commands/__init__.py +27 -7
- coredis/commands/_key_spec.py +11 -10
- coredis/commands/_utils.py +1 -1
- coredis/commands/_validators.py +30 -20
- coredis/commands/_wrappers.py +19 -99
- coredis/commands/bitfield.py +10 -2
- coredis/commands/constants.py +20 -3
- coredis/commands/core.py +1674 -1251
- coredis/commands/function.py +29 -22
- coredis/commands/monitor.py +0 -71
- coredis/commands/pubsub.py +7 -142
- coredis/commands/request.py +108 -0
- coredis/commands/script.py +21 -22
- coredis/commands/sentinel.py +60 -49
- coredis/connection.py +14 -15
- coredis/exceptions.py +2 -2
- coredis/experimental/__init__.py +0 -4
- coredis/globals.py +3 -0
- coredis/modules/autocomplete.py +28 -30
- coredis/modules/base.py +15 -31
- coredis/modules/filters.py +269 -245
- coredis/modules/graph.py +61 -62
- coredis/modules/json.py +172 -140
- coredis/modules/response/_callbacks/autocomplete.py +5 -4
- coredis/modules/response/_callbacks/graph.py +34 -29
- coredis/modules/response/_callbacks/json.py +5 -3
- coredis/modules/response/_callbacks/search.py +49 -53
- coredis/modules/response/_callbacks/timeseries.py +18 -30
- coredis/modules/response/types.py +1 -5
- coredis/modules/search.py +186 -169
- coredis/modules/timeseries.py +184 -164
- coredis/parser.py +6 -19
- coredis/pipeline.py +477 -521
- coredis/pool/basic.py +7 -7
- coredis/pool/cluster.py +3 -3
- coredis/pool/nodemanager.py +10 -3
- coredis/response/_callbacks/__init__.py +76 -57
- coredis/response/_callbacks/acl.py +0 -3
- coredis/response/_callbacks/cluster.py +25 -16
- coredis/response/_callbacks/command.py +8 -6
- coredis/response/_callbacks/connection.py +4 -3
- coredis/response/_callbacks/geo.py +17 -13
- coredis/response/_callbacks/hash.py +13 -11
- coredis/response/_callbacks/keys.py +9 -5
- coredis/response/_callbacks/module.py +2 -3
- coredis/response/_callbacks/script.py +6 -8
- coredis/response/_callbacks/sentinel.py +21 -17
- coredis/response/_callbacks/server.py +36 -14
- coredis/response/_callbacks/sets.py +3 -4
- coredis/response/_callbacks/sorted_set.py +27 -24
- coredis/response/_callbacks/streams.py +22 -13
- coredis/response/_callbacks/strings.py +7 -6
- coredis/response/_callbacks/vector_sets.py +159 -0
- coredis/response/types.py +13 -4
- coredis/retry.py +12 -13
- coredis/sentinel.py +11 -1
- coredis/stream.py +4 -3
- coredis/tokens.py +348 -16
- coredis/typing.py +432 -81
- {coredis-4.23.1.dist-info → coredis-5.0.0.dist-info}/METADATA +4 -9
- coredis-5.0.0.dist-info/RECORD +95 -0
- coredis/client/keydb.py +0 -336
- coredis/pipeline.pyi +0 -2103
- coredis-4.23.1.dist-info/RECORD +0 -93
- {coredis-4.23.1.dist-info → coredis-5.0.0.dist-info}/WHEEL +0 -0
- {coredis-4.23.1.dist-info → coredis-5.0.0.dist-info}/licenses/LICENSE +0 -0
- {coredis-4.23.1.dist-info → coredis-5.0.0.dist-info}/top_level.txt +0 -0
coredis/pipeline.py
CHANGED
|
@@ -5,21 +5,22 @@ import functools
|
|
|
5
5
|
import inspect
|
|
6
6
|
import sys
|
|
7
7
|
import textwrap
|
|
8
|
+
import warnings
|
|
8
9
|
from abc import ABCMeta
|
|
9
10
|
from concurrent.futures import CancelledError
|
|
10
|
-
from dataclasses import dataclass, field
|
|
11
|
-
from itertools import chain
|
|
12
11
|
from types import TracebackType
|
|
13
12
|
from typing import Any, cast
|
|
14
13
|
|
|
15
|
-
from
|
|
14
|
+
from deprecated.sphinx import deprecated
|
|
16
15
|
|
|
17
|
-
from coredis._utils import b, hash_slot
|
|
18
|
-
from coredis.client import Client,
|
|
16
|
+
from coredis._utils import b, hash_slot, nativestr
|
|
17
|
+
from coredis.client import Client, RedisCluster
|
|
18
|
+
from coredis.commands import CommandRequest, CommandResponseT
|
|
19
19
|
from coredis.commands._key_spec import KeySpec
|
|
20
20
|
from coredis.commands.constants import CommandName, NodeFlag
|
|
21
|
+
from coredis.commands.request import TransformedResponse
|
|
21
22
|
from coredis.commands.script import Script
|
|
22
|
-
from coredis.connection import BaseConnection, ClusterConnection, CommandInvocation
|
|
23
|
+
from coredis.connection import BaseConnection, ClusterConnection, CommandInvocation, Connection
|
|
23
24
|
from coredis.exceptions import (
|
|
24
25
|
AskError,
|
|
25
26
|
ClusterCrossSlotError,
|
|
@@ -48,16 +49,23 @@ from coredis.response._callbacks import (
|
|
|
48
49
|
from coredis.retry import ConstantRetryPolicy, retryable
|
|
49
50
|
from coredis.typing import (
|
|
50
51
|
AnyStr,
|
|
52
|
+
Awaitable,
|
|
51
53
|
Callable,
|
|
52
|
-
|
|
53
|
-
|
|
54
|
+
ExecutionParameters,
|
|
55
|
+
Generator,
|
|
54
56
|
Iterable,
|
|
55
57
|
KeyT,
|
|
56
58
|
Parameters,
|
|
57
59
|
ParamSpec,
|
|
60
|
+
RedisCommand,
|
|
61
|
+
RedisCommandP,
|
|
62
|
+
RedisValueT,
|
|
58
63
|
ResponseType,
|
|
64
|
+
Self,
|
|
59
65
|
StringT,
|
|
66
|
+
T_co,
|
|
60
67
|
TypeVar,
|
|
68
|
+
Unpack,
|
|
61
69
|
ValueT,
|
|
62
70
|
)
|
|
63
71
|
|
|
@@ -75,45 +83,140 @@ UNWATCH_COMMANDS = {CommandName.DISCARD, CommandName.EXEC, CommandName.UNWATCH}
|
|
|
75
83
|
|
|
76
84
|
|
|
77
85
|
def wrap_pipeline_method(
|
|
78
|
-
kls: PipelineMeta, func: Callable[P,
|
|
79
|
-
) -> Callable[P,
|
|
86
|
+
kls: PipelineMeta, func: Callable[P, Awaitable[R]]
|
|
87
|
+
) -> Callable[P, Awaitable[R]]:
|
|
80
88
|
@functools.wraps(func)
|
|
81
|
-
|
|
82
|
-
return
|
|
89
|
+
def wrapper(*args: P.args, **kwargs: P.kwargs) -> Awaitable[R]:
|
|
90
|
+
return func(*args, **kwargs)
|
|
83
91
|
|
|
84
|
-
wrapper.__annotations__ = wrapper.__annotations__.copy()
|
|
85
|
-
wrapper.__annotations__["return"] = kls
|
|
86
92
|
wrapper.__doc__ = textwrap.dedent(wrapper.__doc__ or "")
|
|
87
93
|
wrapper.__doc__ = f"""
|
|
88
|
-
Pipeline variant of :meth:`coredis.Redis.{func.__name__}` that does not execute
|
|
89
|
-
immediately and instead pushes the command into a stack for batch send
|
|
90
|
-
and returns the instance of :class:`{kls.__name__}` itself.
|
|
94
|
+
.. note:: Pipeline variant of :meth:`coredis.Redis.{func.__name__}` that does not execute
|
|
95
|
+
immediately and instead pushes the command into a stack for batch send.
|
|
91
96
|
|
|
92
|
-
|
|
93
|
-
|
|
97
|
+
The return value can be retrieved either as part of the tuple returned by
|
|
98
|
+
:meth:`~{kls.__name__}.execute` or by awaiting the :class:`~coredis.commands.CommandRequest`
|
|
99
|
+
instance after calling :meth:`~{kls.__name__}.execute`
|
|
94
100
|
|
|
95
101
|
{wrapper.__doc__}
|
|
96
102
|
"""
|
|
97
103
|
return wrapper
|
|
98
104
|
|
|
99
105
|
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
106
|
+
class PipelineCommandRequest(CommandRequest[CommandResponseT]):
|
|
107
|
+
"""
|
|
108
|
+
Command request used within a pipeline. Handles immediate execution for WATCH or
|
|
109
|
+
watched commands outside explicit transactions, otherwise queues the command.
|
|
110
|
+
"""
|
|
111
|
+
|
|
112
|
+
client: Pipeline[Any] | ClusterPipeline[Any]
|
|
113
|
+
queued_response: Awaitable[bytes | str]
|
|
114
|
+
|
|
115
|
+
def __init__(
|
|
116
|
+
self,
|
|
117
|
+
client: Pipeline[Any] | ClusterPipeline[Any],
|
|
118
|
+
name: bytes,
|
|
119
|
+
*arguments: ValueT,
|
|
120
|
+
callback: Callable[..., CommandResponseT],
|
|
121
|
+
execution_parameters: ExecutionParameters | None = None,
|
|
122
|
+
parent: CommandRequest[Any] | None = None,
|
|
123
|
+
) -> None:
|
|
124
|
+
super().__init__(
|
|
125
|
+
client,
|
|
126
|
+
name,
|
|
127
|
+
*arguments,
|
|
128
|
+
callback=callback,
|
|
129
|
+
execution_parameters=execution_parameters,
|
|
130
|
+
)
|
|
131
|
+
if not parent:
|
|
132
|
+
if (client.watching or name == CommandName.WATCH) and not client.explicit_transaction:
|
|
133
|
+
self.response = client.immediate_execute_command(
|
|
134
|
+
self, callback=callback, **self.execution_parameters
|
|
135
|
+
)
|
|
136
|
+
else:
|
|
137
|
+
client.pipeline_execute_command(self) # type: ignore[arg-type]
|
|
138
|
+
self.parent = parent
|
|
139
|
+
|
|
140
|
+
def transform(
|
|
141
|
+
self, transformer: type[TransformedResponse]
|
|
142
|
+
) -> CommandRequest[TransformedResponse]:
|
|
143
|
+
transform_func = functools.partial(
|
|
144
|
+
self.type_adapter.deserialize,
|
|
145
|
+
return_type=transformer,
|
|
146
|
+
)
|
|
147
|
+
return cast(type[PipelineCommandRequest[TransformedResponse]], self.__class__)(
|
|
148
|
+
self.client,
|
|
149
|
+
self.name,
|
|
150
|
+
*self.arguments,
|
|
151
|
+
callback=lambda resp, **k: transform_func(resp),
|
|
152
|
+
execution_parameters=self.execution_parameters,
|
|
153
|
+
parent=self,
|
|
154
|
+
)
|
|
107
155
|
|
|
156
|
+
async def __backward_compatibility_return(self) -> Pipeline[Any] | ClusterPipeline[Any]:
|
|
157
|
+
"""
|
|
158
|
+
For backward compatibility: returns the pipeline instance when awaited before execute().
|
|
159
|
+
"""
|
|
160
|
+
return self.client
|
|
108
161
|
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
162
|
+
def __await__(self) -> Generator[None, None, CommandResponseT]:
|
|
163
|
+
if hasattr(self, "response"):
|
|
164
|
+
return self.response.__await__()
|
|
165
|
+
elif self.parent:
|
|
166
|
+
|
|
167
|
+
async def _transformed() -> CommandResponseT:
|
|
168
|
+
if (r := await self.parent) == self.client: # type: ignore
|
|
169
|
+
return r # type: ignore
|
|
170
|
+
else:
|
|
171
|
+
return self.callback(r)
|
|
172
|
+
|
|
173
|
+
return _transformed().__await__()
|
|
174
|
+
else:
|
|
175
|
+
warnings.warn(
|
|
176
|
+
"""
|
|
177
|
+
Awaiting a pipeline command response before calling `execute()` on the pipeline instance
|
|
178
|
+
has no effect and returns the pipeline instance itself for backward compatibility.
|
|
179
|
+
|
|
180
|
+
To add commands to a pipeline simply call the methods synchronously. The awaitable response
|
|
181
|
+
can be awaited after calling `execute()` to retrieve a statically typed response if required.
|
|
182
|
+
""",
|
|
183
|
+
stacklevel=2,
|
|
184
|
+
)
|
|
185
|
+
return self.__backward_compatibility_return().__await__() # type: ignore[return-value]
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
class ClusterPipelineCommandRequest(PipelineCommandRequest[CommandResponseT]):
|
|
189
|
+
"""
|
|
190
|
+
Command request for cluster pipelines, tracks position and result for cluster routing.
|
|
191
|
+
"""
|
|
192
|
+
|
|
193
|
+
def __init__(
|
|
194
|
+
self,
|
|
195
|
+
client: ClusterPipeline[Any],
|
|
196
|
+
name: bytes,
|
|
197
|
+
*arguments: ValueT,
|
|
198
|
+
callback: Callable[..., CommandResponseT],
|
|
199
|
+
execution_parameters: ExecutionParameters | None = None,
|
|
200
|
+
parent: CommandRequest[Any] | None = None,
|
|
201
|
+
) -> None:
|
|
202
|
+
self.position: int = 0
|
|
203
|
+
self.result: Any | None = None
|
|
204
|
+
self.asking: bool = False
|
|
205
|
+
super().__init__(
|
|
206
|
+
client,
|
|
207
|
+
name,
|
|
208
|
+
*arguments,
|
|
209
|
+
callback=callback,
|
|
210
|
+
execution_parameters=execution_parameters,
|
|
211
|
+
parent=parent,
|
|
212
|
+
)
|
|
114
213
|
|
|
115
214
|
|
|
116
215
|
class NodeCommands:
|
|
216
|
+
"""
|
|
217
|
+
Helper for grouping and executing commands on a single cluster node, handling transactions if needed.
|
|
218
|
+
"""
|
|
219
|
+
|
|
117
220
|
def __init__(
|
|
118
221
|
self,
|
|
119
222
|
client: RedisCluster[AnyStr],
|
|
@@ -121,45 +224,56 @@ class NodeCommands:
|
|
|
121
224
|
in_transaction: bool = False,
|
|
122
225
|
timeout: float | None = None,
|
|
123
226
|
):
|
|
124
|
-
self.client = client
|
|
227
|
+
self.client: RedisCluster[Any] = client
|
|
125
228
|
self.connection = connection
|
|
126
|
-
self.commands: list[
|
|
229
|
+
self.commands: list[ClusterPipelineCommandRequest[Any]] = []
|
|
127
230
|
self.in_transaction = in_transaction
|
|
128
231
|
self.timeout = timeout
|
|
232
|
+
self.multi_cmd: asyncio.Future[ResponseType] | None = None
|
|
233
|
+
self.exec_cmd: asyncio.Future[ResponseType] | None = None
|
|
129
234
|
|
|
130
|
-
def extend(self, c: list[
|
|
235
|
+
def extend(self, c: list[ClusterPipelineCommandRequest[Any]]) -> None:
|
|
131
236
|
self.commands.extend(c)
|
|
132
237
|
|
|
133
|
-
def append(self, c:
|
|
238
|
+
def append(self, c: ClusterPipelineCommandRequest[Any]) -> None:
|
|
134
239
|
self.commands.append(c)
|
|
135
240
|
|
|
136
241
|
async def write(self) -> None:
|
|
137
242
|
connection = self.connection
|
|
138
243
|
commands = self.commands
|
|
139
244
|
|
|
140
|
-
#
|
|
141
|
-
# and ensure that nothing is sitting there from a previous run.
|
|
142
|
-
|
|
245
|
+
# Reset results for all commands before writing.
|
|
143
246
|
for c in commands:
|
|
144
247
|
c.result = None
|
|
145
248
|
|
|
146
|
-
#
|
|
147
|
-
# send all the commands and catch connection and timeout errors.
|
|
249
|
+
# Batch all commands into a single request for efficiency.
|
|
148
250
|
try:
|
|
251
|
+
if self.in_transaction:
|
|
252
|
+
self.multi_cmd = await connection.create_request(
|
|
253
|
+
CommandName.MULTI, timeout=self.timeout
|
|
254
|
+
)
|
|
149
255
|
requests = await connection.create_requests(
|
|
150
256
|
[
|
|
151
257
|
CommandInvocation(
|
|
152
|
-
cmd.
|
|
153
|
-
cmd.
|
|
154
|
-
(
|
|
258
|
+
cmd.name,
|
|
259
|
+
cmd.arguments,
|
|
260
|
+
(
|
|
261
|
+
bool(cmd.execution_parameters.get("decode"))
|
|
262
|
+
if cmd.execution_parameters.get("decode")
|
|
263
|
+
else None
|
|
264
|
+
),
|
|
155
265
|
None,
|
|
156
266
|
)
|
|
157
267
|
for cmd in commands
|
|
158
268
|
],
|
|
159
269
|
timeout=self.timeout,
|
|
160
270
|
)
|
|
271
|
+
if self.in_transaction:
|
|
272
|
+
self.exec_cmd = await connection.create_request(
|
|
273
|
+
CommandName.EXEC, timeout=self.timeout
|
|
274
|
+
)
|
|
161
275
|
for i, cmd in enumerate(commands):
|
|
162
|
-
cmd.
|
|
276
|
+
cmd.response = requests[i]
|
|
163
277
|
except (ConnectionError, TimeoutError) as e:
|
|
164
278
|
for c in commands:
|
|
165
279
|
c.result = e
|
|
@@ -167,51 +281,50 @@ class NodeCommands:
|
|
|
167
281
|
async def read(self) -> None:
|
|
168
282
|
connection = self.connection
|
|
169
283
|
success = True
|
|
170
|
-
|
|
284
|
+
multi_result = None
|
|
285
|
+
if self.multi_cmd:
|
|
286
|
+
multi_result = await self.multi_cmd
|
|
287
|
+
success = multi_result in {b"OK", "OK"}
|
|
171
288
|
for c in self.commands:
|
|
172
289
|
if c.result is None:
|
|
173
290
|
try:
|
|
174
|
-
c.result = await c.
|
|
291
|
+
c.result = await c.response if c.response else None
|
|
175
292
|
except ExecAbortError:
|
|
176
293
|
raise
|
|
177
294
|
except (ConnectionError, TimeoutError, RedisError) as e:
|
|
178
295
|
success = False
|
|
179
296
|
c.result = e
|
|
180
|
-
|
|
181
|
-
if self.in_transaction:
|
|
182
|
-
transaction_result = []
|
|
297
|
+
if self.in_transaction and self.exec_cmd:
|
|
183
298
|
if success:
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
raise WatchError("Watched variable changed.")
|
|
299
|
+
res = await self.exec_cmd
|
|
300
|
+
if res:
|
|
301
|
+
transaction_result = cast(list[ResponseType], res)
|
|
302
|
+
else:
|
|
303
|
+
raise WatchError("Watched variable changed.")
|
|
190
304
|
for idx, c in enumerate(
|
|
191
305
|
[
|
|
192
306
|
_c
|
|
193
307
|
for _c in sorted(self.commands, key=lambda x: x.position)
|
|
194
|
-
if _c.
|
|
308
|
+
if _c.name not in {CommandName.MULTI, CommandName.EXEC}
|
|
195
309
|
]
|
|
196
310
|
):
|
|
197
311
|
if isinstance(c.callback, AsyncPreProcessingCallback):
|
|
198
|
-
await c.callback.pre_process(
|
|
199
|
-
self.client, transaction_result[idx], **c.options
|
|
200
|
-
)
|
|
312
|
+
await c.callback.pre_process(self.client, transaction_result[idx])
|
|
201
313
|
c.result = c.callback(
|
|
202
314
|
transaction_result[idx],
|
|
203
315
|
version=connection.protocol_version,
|
|
204
|
-
**c.options,
|
|
205
316
|
)
|
|
206
|
-
elif isinstance(
|
|
207
|
-
raise
|
|
317
|
+
elif isinstance(multi_result, BaseException):
|
|
318
|
+
raise multi_result
|
|
208
319
|
|
|
209
320
|
|
|
210
321
|
class PipelineMeta(ABCMeta):
|
|
211
322
|
RESULT_CALLBACKS: dict[str, Callable[..., Any]]
|
|
212
323
|
NODES_FLAGS: dict[str, NodeFlag]
|
|
213
324
|
|
|
214
|
-
def __new__(
|
|
325
|
+
def __new__(
|
|
326
|
+
cls, name: str, bases: tuple[type, ...], namespace: dict[str, object]
|
|
327
|
+
) -> PipelineMeta:
|
|
215
328
|
kls = super().__new__(cls, name, bases, namespace)
|
|
216
329
|
|
|
217
330
|
for name, method in PipelineMeta.get_methods(kls).items():
|
|
@@ -226,7 +339,9 @@ class PipelineMeta(ABCMeta):
|
|
|
226
339
|
|
|
227
340
|
|
|
228
341
|
class ClusterPipelineMeta(PipelineMeta):
|
|
229
|
-
def __new__(
|
|
342
|
+
def __new__(
|
|
343
|
+
cls, name: str, bases: tuple[type, ...], namespace: dict[str, object]
|
|
344
|
+
) -> PipelineMeta:
|
|
230
345
|
kls = super().__new__(cls, name, bases, namespace)
|
|
231
346
|
for name, method in ClusterPipelineMeta.get_methods(kls).items():
|
|
232
347
|
cmd = getattr(method, "__coredis_command", None)
|
|
@@ -242,28 +357,20 @@ class ClusterPipelineMeta(PipelineMeta):
|
|
|
242
357
|
return kls
|
|
243
358
|
|
|
244
359
|
|
|
245
|
-
class
|
|
246
|
-
"""Pipeline for the Redis class"""
|
|
247
|
-
|
|
360
|
+
class Pipeline(Client[AnyStr], metaclass=PipelineMeta):
|
|
248
361
|
"""
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
saving all the values in a list to Redis.
|
|
362
|
+
Pipeline for batching multiple commands to a Redis server.
|
|
363
|
+
Supports transactions and command stacking.
|
|
252
364
|
|
|
253
365
|
All commands executed within a pipeline are wrapped with MULTI and EXEC
|
|
254
|
-
calls
|
|
255
|
-
executed atomically.
|
|
366
|
+
calls when :paramref:`transaction` is ``True``.
|
|
256
367
|
|
|
257
368
|
Any command raising an exception does *not* halt the execution of
|
|
258
369
|
subsequent commands in the pipeline. Instead, the exception is caught
|
|
259
|
-
and its instance is placed into the response list returned by execute
|
|
260
|
-
Code iterating over the response list should be able to deal with an
|
|
261
|
-
instance of an exception as a potential value. In general, these will be
|
|
262
|
-
ResponseError exceptions, such as those raised when issuing a command
|
|
263
|
-
on a key of a different datatype.
|
|
370
|
+
and its instance is placed into the response list returned by :meth:`execute`
|
|
264
371
|
"""
|
|
265
372
|
|
|
266
|
-
command_stack: list[
|
|
373
|
+
command_stack: list[PipelineCommandRequest[Any]]
|
|
267
374
|
connection_pool: ConnectionPool
|
|
268
375
|
|
|
269
376
|
def __init__(
|
|
@@ -273,20 +380,21 @@ class PipelineImpl(Client[AnyStr], metaclass=PipelineMeta):
|
|
|
273
380
|
watches: Parameters[KeyT] | None = None,
|
|
274
381
|
timeout: float | None = None,
|
|
275
382
|
) -> None:
|
|
276
|
-
self.client = client
|
|
383
|
+
self.client: Client[AnyStr] = client
|
|
277
384
|
self.connection_pool = client.connection_pool
|
|
278
|
-
self.connection = None
|
|
385
|
+
self.connection: Connection | None = None
|
|
279
386
|
self._transaction = transaction
|
|
280
387
|
self.watching = False
|
|
281
388
|
self.watches: Parameters[KeyT] | None = watches or None
|
|
282
389
|
self.command_stack = []
|
|
283
|
-
self.cache = None
|
|
390
|
+
self.cache = None
|
|
284
391
|
self.explicit_transaction = False
|
|
285
392
|
self.scripts: set[Script[AnyStr]] = set()
|
|
286
393
|
self.timeout = timeout
|
|
394
|
+
self.type_adapter = client.type_adapter
|
|
287
395
|
|
|
288
|
-
async def __aenter__(self) ->
|
|
289
|
-
return self
|
|
396
|
+
async def __aenter__(self) -> Pipeline[AnyStr]:
|
|
397
|
+
return await self.get_instance()
|
|
290
398
|
|
|
291
399
|
async def __aexit__(
|
|
292
400
|
self,
|
|
@@ -294,7 +402,10 @@ class PipelineImpl(Client[AnyStr], metaclass=PipelineMeta):
|
|
|
294
402
|
exc_value: BaseException | None,
|
|
295
403
|
traceback: TracebackType | None,
|
|
296
404
|
) -> None:
|
|
297
|
-
await self.
|
|
405
|
+
await self.clear()
|
|
406
|
+
|
|
407
|
+
def __await__(self) -> Generator[Any, Any, Pipeline[AnyStr]]:
|
|
408
|
+
return self.get_instance().__await__()
|
|
298
409
|
|
|
299
410
|
def __len__(self) -> int:
|
|
300
411
|
return len(self.command_stack)
|
|
@@ -302,38 +413,61 @@ class PipelineImpl(Client[AnyStr], metaclass=PipelineMeta):
|
|
|
302
413
|
def __bool__(self) -> bool:
|
|
303
414
|
return True
|
|
304
415
|
|
|
305
|
-
async def
|
|
306
|
-
self
|
|
307
|
-
self.scripts: set[Script[AnyStr]] = set()
|
|
308
|
-
# make sure to reset the connection state in the event that we were
|
|
309
|
-
# watching something
|
|
416
|
+
async def get_instance(self) -> Pipeline[AnyStr]:
|
|
417
|
+
return self
|
|
310
418
|
|
|
419
|
+
def create_request(
|
|
420
|
+
self,
|
|
421
|
+
name: bytes,
|
|
422
|
+
*arguments: ValueT,
|
|
423
|
+
callback: Callable[..., T_co],
|
|
424
|
+
execution_parameters: ExecutionParameters | None = None,
|
|
425
|
+
) -> CommandRequest[T_co]:
|
|
426
|
+
"""
|
|
427
|
+
:meta private:
|
|
428
|
+
"""
|
|
429
|
+
return PipelineCommandRequest(
|
|
430
|
+
self, name, *arguments, callback=callback, execution_parameters=execution_parameters
|
|
431
|
+
)
|
|
432
|
+
|
|
433
|
+
async def clear(self) -> None:
|
|
434
|
+
"""
|
|
435
|
+
Clear the pipeline, reset state, and release the connection back to the pool.
|
|
436
|
+
"""
|
|
437
|
+
self.command_stack.clear()
|
|
438
|
+
self.scripts = set()
|
|
439
|
+
# Reset connection state if we were watching something.
|
|
311
440
|
if self.watching and self.connection:
|
|
312
441
|
try:
|
|
313
|
-
# call this manually since our unwatch or
|
|
314
|
-
# immediate_execute_command methods can call reset_pipeline()
|
|
315
442
|
request = await self.connection.create_request(CommandName.UNWATCH, decode=False)
|
|
316
443
|
await request
|
|
317
444
|
except ConnectionError:
|
|
318
|
-
# disconnect will also remove any previous WATCHes
|
|
319
445
|
self.connection.disconnect()
|
|
320
|
-
#
|
|
446
|
+
# Reset pipeline state and release connection if needed.
|
|
321
447
|
self.watching = False
|
|
322
448
|
self.watches = []
|
|
323
449
|
self.explicit_transaction = False
|
|
324
|
-
# we can safely return the connection to the pool here since we're
|
|
325
|
-
# sure we're no longer WATCHing anything
|
|
326
|
-
|
|
327
450
|
if self.connection:
|
|
328
451
|
self.connection_pool.release(self.connection)
|
|
329
452
|
self.connection = None
|
|
330
453
|
|
|
331
|
-
|
|
454
|
+
#: :meta private:
|
|
455
|
+
reset_pipeline = clear
|
|
456
|
+
|
|
457
|
+
@deprecated(
|
|
458
|
+
"The reset method in pipelines clashes with the redis ``RESET`` command. Use :meth:`clear` instead",
|
|
459
|
+
"5.0.0",
|
|
460
|
+
)
|
|
461
|
+
def reset(self) -> CommandRequest[None]:
|
|
332
462
|
"""
|
|
333
|
-
|
|
334
|
-
are issued. End the transactional block with `execute`.
|
|
463
|
+
Deprecated. Use :meth:`clear` instead.
|
|
335
464
|
"""
|
|
465
|
+
return self.clear() # type: ignore
|
|
336
466
|
|
|
467
|
+
def multi(self) -> None:
|
|
468
|
+
"""
|
|
469
|
+
Start a transactional block after WATCH commands. End with `execute()`.
|
|
470
|
+
"""
|
|
337
471
|
if self.explicit_transaction:
|
|
338
472
|
raise RedisError("Cannot issue nested calls to MULTI")
|
|
339
473
|
|
|
@@ -341,27 +475,20 @@ class PipelineImpl(Client[AnyStr], metaclass=PipelineMeta):
|
|
|
341
475
|
raise RedisError("Commands without an initial WATCH have already been issued")
|
|
342
476
|
self.explicit_transaction = True
|
|
343
477
|
|
|
344
|
-
|
|
478
|
+
def execute_command(
|
|
345
479
|
self,
|
|
346
|
-
command:
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
if (self.watching or command == CommandName.WATCH) and not self.explicit_transaction:
|
|
352
|
-
return await self.immediate_execute_command(
|
|
353
|
-
command, *args, callback=callback, **options
|
|
354
|
-
) # type: ignore
|
|
355
|
-
|
|
356
|
-
return self.pipeline_execute_command(command, *args, callback=callback, **options)
|
|
480
|
+
command: RedisCommandP,
|
|
481
|
+
callback: Callable[..., R] = NoopCallback(),
|
|
482
|
+
**options: Unpack[ExecutionParameters],
|
|
483
|
+
) -> Awaitable[R]:
|
|
484
|
+
raise NotImplementedError
|
|
357
485
|
|
|
358
486
|
async def immediate_execute_command(
|
|
359
487
|
self,
|
|
360
|
-
command:
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
) -> Any: # type: ignore
|
|
488
|
+
command: RedisCommandP,
|
|
489
|
+
callback: Callable[..., R] = NoopCallback(),
|
|
490
|
+
**kwargs: Unpack[ExecutionParameters],
|
|
491
|
+
) -> R:
|
|
365
492
|
"""
|
|
366
493
|
Executes a command immediately, but don't auto-retry on a
|
|
367
494
|
ConnectionError if we're already WATCHing a variable. Used when
|
|
@@ -372,17 +499,16 @@ class PipelineImpl(Client[AnyStr], metaclass=PipelineMeta):
|
|
|
372
499
|
"""
|
|
373
500
|
conn = self.connection
|
|
374
501
|
# if this is the first call, we need a connection
|
|
375
|
-
|
|
376
502
|
if not conn:
|
|
377
503
|
conn = await self.connection_pool.get_connection()
|
|
378
504
|
self.connection = conn
|
|
379
505
|
try:
|
|
380
|
-
request = await conn.create_request(
|
|
381
|
-
|
|
506
|
+
request = await conn.create_request(
|
|
507
|
+
command.name, *command.arguments, decode=kwargs.get("decode")
|
|
508
|
+
)
|
|
382
509
|
return callback(
|
|
383
510
|
await request,
|
|
384
511
|
version=conn.protocol_version,
|
|
385
|
-
**kwargs,
|
|
386
512
|
)
|
|
387
513
|
except (ConnectionError, TimeoutError):
|
|
388
514
|
conn.disconnect()
|
|
@@ -390,85 +516,59 @@ class PipelineImpl(Client[AnyStr], metaclass=PipelineMeta):
|
|
|
390
516
|
# if we're not already watching, we can safely retry the command
|
|
391
517
|
try:
|
|
392
518
|
if not self.watching:
|
|
393
|
-
request = await conn.create_request(
|
|
394
|
-
|
|
519
|
+
request = await conn.create_request(
|
|
520
|
+
command.name, *command.arguments, decode=kwargs.get("decode")
|
|
521
|
+
)
|
|
522
|
+
return callback(await request, version=conn.protocol_version)
|
|
523
|
+
raise
|
|
395
524
|
except ConnectionError:
|
|
396
525
|
# the retry failed so cleanup.
|
|
397
526
|
conn.disconnect()
|
|
398
|
-
await self.
|
|
527
|
+
await self.clear()
|
|
399
528
|
raise
|
|
400
529
|
finally:
|
|
401
|
-
if command in UNWATCH_COMMANDS:
|
|
530
|
+
if command.name in UNWATCH_COMMANDS:
|
|
402
531
|
self.watching = False
|
|
403
|
-
elif command == CommandName.WATCH:
|
|
532
|
+
elif command.name == CommandName.WATCH:
|
|
404
533
|
self.watching = True
|
|
405
534
|
|
|
406
535
|
def pipeline_execute_command(
|
|
407
536
|
self,
|
|
408
|
-
command:
|
|
409
|
-
|
|
410
|
-
callback: Callable[..., Any],
|
|
411
|
-
**options: ValueT | None,
|
|
412
|
-
) -> PipelineImpl[AnyStr]:
|
|
537
|
+
command: PipelineCommandRequest[R],
|
|
538
|
+
) -> None:
|
|
413
539
|
"""
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
Returns the current Pipeline object back so commands can be
|
|
417
|
-
chained together, such as:
|
|
418
|
-
|
|
419
|
-
pipe = pipe.set('foo', 'bar').incr('baz').decr('bang')
|
|
420
|
-
|
|
421
|
-
At some other point, you can then run: pipe.execute(),
|
|
422
|
-
which will execute all commands queued in the pipe.
|
|
540
|
+
Queue a command for execution on the next `execute()` call.
|
|
423
541
|
|
|
424
542
|
:meta private:
|
|
425
543
|
"""
|
|
426
|
-
self.command_stack.append(
|
|
427
|
-
PipelineCommand(command=command, args=args, options=options, callback=callback)
|
|
428
|
-
)
|
|
429
|
-
|
|
430
|
-
return self
|
|
544
|
+
self.command_stack.append(command)
|
|
431
545
|
|
|
432
546
|
async def _execute_transaction(
|
|
433
547
|
self,
|
|
434
548
|
connection: BaseConnection,
|
|
435
|
-
commands: list[
|
|
549
|
+
commands: list[PipelineCommandRequest[Any]],
|
|
436
550
|
raise_on_error: bool,
|
|
437
551
|
) -> tuple[Any, ...]:
|
|
438
|
-
|
|
439
|
-
chain(
|
|
440
|
-
[
|
|
441
|
-
PipelineCommand(
|
|
442
|
-
command=CommandName.MULTI,
|
|
443
|
-
args=(),
|
|
444
|
-
)
|
|
445
|
-
],
|
|
446
|
-
commands,
|
|
447
|
-
[
|
|
448
|
-
PipelineCommand(
|
|
449
|
-
command=CommandName.EXEC,
|
|
450
|
-
args=(),
|
|
451
|
-
)
|
|
452
|
-
],
|
|
453
|
-
)
|
|
454
|
-
)
|
|
455
|
-
if self.watches:
|
|
456
|
-
await self.watch(*self.watches)
|
|
457
|
-
|
|
552
|
+
multi_cmd = await connection.create_request(CommandName.MULTI, timeout=self.timeout)
|
|
458
553
|
requests = await connection.create_requests(
|
|
459
554
|
[
|
|
460
555
|
CommandInvocation(
|
|
461
|
-
cmd.
|
|
462
|
-
cmd.
|
|
463
|
-
(
|
|
556
|
+
cmd.name,
|
|
557
|
+
cmd.arguments,
|
|
558
|
+
(
|
|
559
|
+
bool(cmd.execution_parameters.get("decode"))
|
|
560
|
+
if cmd.execution_parameters.get("decode")
|
|
561
|
+
else None
|
|
562
|
+
),
|
|
464
563
|
None,
|
|
465
564
|
)
|
|
466
|
-
for cmd in
|
|
565
|
+
for cmd in commands
|
|
467
566
|
],
|
|
468
567
|
timeout=self.timeout,
|
|
469
568
|
)
|
|
470
|
-
|
|
471
|
-
|
|
569
|
+
exec_cmd = await connection.create_request(CommandName.EXEC, timeout=self.timeout)
|
|
570
|
+
for i, cmd in enumerate(commands):
|
|
571
|
+
cmd.queued_response = cast(Awaitable[StringT], requests[i])
|
|
472
572
|
|
|
473
573
|
errors: list[tuple[int, RedisError | None]] = []
|
|
474
574
|
multi_failed = False
|
|
@@ -478,30 +578,32 @@ class PipelineImpl(Client[AnyStr], metaclass=PipelineMeta):
|
|
|
478
578
|
# so that we read all the additional command messages from
|
|
479
579
|
# the socket
|
|
480
580
|
try:
|
|
481
|
-
await
|
|
581
|
+
await multi_cmd
|
|
482
582
|
except RedisError:
|
|
483
583
|
multi_failed = True
|
|
484
584
|
errors.append((0, cast(RedisError, sys.exc_info()[1])))
|
|
485
585
|
|
|
486
586
|
# and all the other commands
|
|
487
|
-
for i, cmd in enumerate(
|
|
587
|
+
for i, cmd in enumerate(commands):
|
|
488
588
|
try:
|
|
489
|
-
if cmd.
|
|
490
|
-
assert (await cmd.
|
|
589
|
+
if cmd.queued_response:
|
|
590
|
+
assert (await cmd.queued_response) in {b"QUEUED", "QUEUED"}
|
|
491
591
|
except RedisError:
|
|
492
592
|
ex = cast(RedisError, sys.exc_info()[1])
|
|
493
|
-
self.annotate_exception(ex, i + 1, cmd.
|
|
593
|
+
self.annotate_exception(ex, i + 1, cmd.name, cmd.arguments)
|
|
494
594
|
errors.append((i, ex))
|
|
495
595
|
|
|
496
596
|
response: list[ResponseType]
|
|
497
597
|
try:
|
|
498
598
|
response = cast(
|
|
499
599
|
list[ResponseType],
|
|
500
|
-
await
|
|
600
|
+
await exec_cmd if exec_cmd else None,
|
|
501
601
|
)
|
|
502
602
|
except (ExecAbortError, ResponseError):
|
|
503
603
|
if self.explicit_transaction and not multi_failed:
|
|
504
|
-
await self.immediate_execute_command(
|
|
604
|
+
await self.immediate_execute_command(
|
|
605
|
+
RedisCommand(name=CommandName.DISCARD, arguments=()), callback=BoolCallback()
|
|
606
|
+
)
|
|
505
607
|
|
|
506
608
|
if errors and errors[0][1]:
|
|
507
609
|
raise errors[0][1]
|
|
@@ -529,24 +631,30 @@ class PipelineImpl(Client[AnyStr], metaclass=PipelineMeta):
|
|
|
529
631
|
for r, cmd in zip(response, commands):
|
|
530
632
|
if not isinstance(r, Exception):
|
|
531
633
|
if isinstance(cmd.callback, AsyncPreProcessingCallback):
|
|
532
|
-
await cmd.callback.pre_process(self.client, r
|
|
533
|
-
r = cmd.callback(r, version=connection.protocol_version, **cmd.
|
|
634
|
+
await cmd.callback.pre_process(self.client, r)
|
|
635
|
+
r = cmd.callback(r, version=connection.protocol_version, **cmd.execution_parameters)
|
|
636
|
+
cmd.response = asyncio.get_running_loop().create_future()
|
|
637
|
+
cmd.response.set_result(r)
|
|
534
638
|
data.append(r)
|
|
535
639
|
return tuple(data)
|
|
536
640
|
|
|
537
641
|
async def _execute_pipeline(
|
|
538
642
|
self,
|
|
539
643
|
connection: BaseConnection,
|
|
540
|
-
commands: list[
|
|
644
|
+
commands: list[PipelineCommandRequest[Any]],
|
|
541
645
|
raise_on_error: bool,
|
|
542
646
|
) -> tuple[Any, ...]:
|
|
543
647
|
# build up all commands into a single request to increase network perf
|
|
544
648
|
requests = await connection.create_requests(
|
|
545
649
|
[
|
|
546
650
|
CommandInvocation(
|
|
547
|
-
cmd.
|
|
548
|
-
cmd.
|
|
549
|
-
(
|
|
651
|
+
cmd.name,
|
|
652
|
+
cmd.arguments,
|
|
653
|
+
(
|
|
654
|
+
bool(cmd.execution_parameters.get("decode"))
|
|
655
|
+
if cmd.execution_parameters.get("decode")
|
|
656
|
+
else None
|
|
657
|
+
),
|
|
550
658
|
None,
|
|
551
659
|
)
|
|
552
660
|
for cmd in commands
|
|
@@ -554,35 +662,38 @@ class PipelineImpl(Client[AnyStr], metaclass=PipelineMeta):
|
|
|
554
662
|
timeout=self.timeout,
|
|
555
663
|
)
|
|
556
664
|
for i, cmd in enumerate(commands):
|
|
557
|
-
cmd.
|
|
665
|
+
cmd.response = requests[i]
|
|
558
666
|
|
|
559
667
|
response: list[Any] = []
|
|
560
|
-
|
|
561
668
|
for cmd in commands:
|
|
562
669
|
try:
|
|
563
|
-
res = await cmd.
|
|
670
|
+
res = await cmd.response if cmd.response else None
|
|
564
671
|
if isinstance(cmd.callback, AsyncPreProcessingCallback):
|
|
565
|
-
await cmd.callback.pre_process(self.client, res, **cmd.
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
**cmd.options,
|
|
571
|
-
)
|
|
672
|
+
await cmd.callback.pre_process(self.client, res, **cmd.execution_parameters)
|
|
673
|
+
resp = cmd.callback(
|
|
674
|
+
res,
|
|
675
|
+
version=connection.protocol_version,
|
|
676
|
+
**cmd.execution_parameters,
|
|
572
677
|
)
|
|
573
|
-
|
|
678
|
+
cmd.response = asyncio.get_event_loop().create_future()
|
|
679
|
+
cmd.response.set_result(resp)
|
|
680
|
+
response.append(resp)
|
|
681
|
+
except ResponseError as re:
|
|
682
|
+
cmd.response = asyncio.get_event_loop().create_future()
|
|
683
|
+
cmd.response.set_exception(re)
|
|
574
684
|
response.append(sys.exc_info()[1])
|
|
575
|
-
|
|
576
685
|
if raise_on_error:
|
|
577
686
|
self.raise_first_error(commands, response)
|
|
578
687
|
|
|
579
688
|
return tuple(response)
|
|
580
689
|
|
|
581
|
-
def raise_first_error(
|
|
690
|
+
def raise_first_error(
|
|
691
|
+
self, commands: list[PipelineCommandRequest[Any]], response: ResponseType
|
|
692
|
+
) -> None:
|
|
582
693
|
assert isinstance(response, list)
|
|
583
694
|
for i, r in enumerate(response):
|
|
584
695
|
if isinstance(r, RedisError):
|
|
585
|
-
self.annotate_exception(r, i + 1, commands[i].
|
|
696
|
+
self.annotate_exception(r, i + 1, commands[i].name, commands[i].arguments)
|
|
586
697
|
raise r
|
|
587
698
|
|
|
588
699
|
def annotate_exception(
|
|
@@ -590,7 +701,7 @@ class PipelineImpl(Client[AnyStr], metaclass=PipelineMeta):
|
|
|
590
701
|
exception: RedisError | None,
|
|
591
702
|
number: int,
|
|
592
703
|
command: bytes,
|
|
593
|
-
args: Iterable[
|
|
704
|
+
args: Iterable[RedisValueT],
|
|
594
705
|
) -> None:
|
|
595
706
|
if exception:
|
|
596
707
|
cmd = command.decode("latin-1")
|
|
@@ -598,26 +709,29 @@ class PipelineImpl(Client[AnyStr], metaclass=PipelineMeta):
|
|
|
598
709
|
msg = f"Command # {number} ({cmd} {args}) of pipeline caused error: {str(exception.args[0])}"
|
|
599
710
|
exception.args = (msg,) + exception.args[1:]
|
|
600
711
|
|
|
601
|
-
async def load_scripts(self):
|
|
712
|
+
async def load_scripts(self) -> None:
|
|
602
713
|
# make sure all scripts that are about to be run on this pipeline exist
|
|
603
714
|
scripts = list(self.scripts)
|
|
604
715
|
immediate = self.immediate_execute_command
|
|
605
716
|
shas = [s.sha for s in scripts]
|
|
606
717
|
# we can't use the normal script_* methods because they would just
|
|
607
718
|
# get buffered in the pipeline.
|
|
608
|
-
exists = await immediate(
|
|
719
|
+
exists = await immediate(
|
|
720
|
+
RedisCommand(CommandName.SCRIPT_EXISTS, tuple(shas)), callback=BoolsCallback()
|
|
721
|
+
)
|
|
609
722
|
|
|
610
723
|
if not all(exists):
|
|
611
724
|
for s, exist in zip(scripts, exists):
|
|
612
725
|
if not exist:
|
|
613
726
|
s.sha = await immediate(
|
|
614
|
-
CommandName.SCRIPT_LOAD,
|
|
615
|
-
s.script,
|
|
727
|
+
RedisCommand(CommandName.SCRIPT_LOAD, (s.script,)),
|
|
616
728
|
callback=AnyStrCallback[AnyStr](),
|
|
617
729
|
)
|
|
618
730
|
|
|
619
731
|
async def execute(self, raise_on_error: bool = True) -> tuple[Any, ...]:
|
|
620
|
-
"""
|
|
732
|
+
"""
|
|
733
|
+
Execute all queued commands in the pipeline. Returns a tuple of results.
|
|
734
|
+
"""
|
|
621
735
|
stack = self.command_stack
|
|
622
736
|
|
|
623
737
|
if not stack:
|
|
@@ -635,7 +749,7 @@ class PipelineImpl(Client[AnyStr], metaclass=PipelineMeta):
|
|
|
635
749
|
|
|
636
750
|
if not conn:
|
|
637
751
|
conn = await self.connection_pool.get_connection()
|
|
638
|
-
# assign to self.connection so
|
|
752
|
+
# assign to self.connection so clear() releases the connection
|
|
639
753
|
# back to the pool after we're done
|
|
640
754
|
self.connection = conn
|
|
641
755
|
|
|
@@ -657,36 +771,38 @@ class PipelineImpl(Client[AnyStr], metaclass=PipelineMeta):
|
|
|
657
771
|
|
|
658
772
|
return await exec(conn, stack, raise_on_error)
|
|
659
773
|
finally:
|
|
660
|
-
await self.
|
|
774
|
+
await self.clear()
|
|
661
775
|
|
|
662
|
-
|
|
776
|
+
def watch(self, *keys: KeyT) -> CommandRequest[bool]:
|
|
663
777
|
"""
|
|
664
|
-
|
|
778
|
+
Watch the given keys for changes. Switches to immediate execution mode
|
|
779
|
+
until :meth:`multi` is called.
|
|
665
780
|
"""
|
|
666
|
-
|
|
667
781
|
if self.explicit_transaction:
|
|
668
782
|
raise RedisError("Cannot issue a WATCH after a MULTI")
|
|
669
783
|
|
|
670
|
-
return
|
|
671
|
-
CommandName.WATCH, *keys, callback=SimpleStringCallback()
|
|
672
|
-
)
|
|
784
|
+
return self.create_request(CommandName.WATCH, *keys, callback=SimpleStringCallback())
|
|
673
785
|
|
|
674
|
-
|
|
675
|
-
"""
|
|
786
|
+
def unwatch(self) -> CommandRequest[bool]:
|
|
787
|
+
"""
|
|
788
|
+
Remove all key watches and return to buffered mode.
|
|
789
|
+
"""
|
|
790
|
+
return self.create_request(CommandName.UNWATCH, callback=SimpleStringCallback())
|
|
676
791
|
|
|
677
|
-
return (
|
|
678
|
-
await self.immediate_execute_command(
|
|
679
|
-
CommandName.UNWATCH, callback=SimpleStringCallback()
|
|
680
|
-
)
|
|
681
|
-
if self.watching
|
|
682
|
-
else True
|
|
683
|
-
)
|
|
684
792
|
|
|
793
|
+
class ClusterPipeline(Client[AnyStr], metaclass=ClusterPipelineMeta):
|
|
794
|
+
"""
|
|
795
|
+
Pipeline for batching commands to a Redis Cluster.
|
|
796
|
+
Handles routing, transactions, and error management across nodes.
|
|
797
|
+
|
|
798
|
+
.. warning:: Unlike :class:`Pipeline`, :paramref:`transaction` is ``False`` by
|
|
799
|
+
default as there is limited support for transactions in redis cluster
|
|
800
|
+
(only keys in the same slot can be part of a transaction).
|
|
801
|
+
"""
|
|
685
802
|
|
|
686
|
-
class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
|
|
687
803
|
client: RedisCluster[AnyStr]
|
|
688
804
|
connection_pool: ClusterConnectionPool
|
|
689
|
-
command_stack: list[
|
|
805
|
+
command_stack: list[ClusterPipelineCommandRequest[Any]]
|
|
690
806
|
|
|
691
807
|
RESULT_CALLBACKS: dict[str, Callable[..., Any]] = {}
|
|
692
808
|
NODES_FLAGS: dict[str, NodeFlag] = {}
|
|
@@ -709,18 +825,38 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
|
|
|
709
825
|
self.watches: Parameters[KeyT] | None = watches or None
|
|
710
826
|
self.watching = False
|
|
711
827
|
self.explicit_transaction = False
|
|
712
|
-
self.cache = None
|
|
828
|
+
self.cache = None
|
|
713
829
|
self.timeout = timeout
|
|
830
|
+
self.type_adapter = client.type_adapter
|
|
714
831
|
|
|
715
|
-
|
|
832
|
+
def create_request(
|
|
833
|
+
self,
|
|
834
|
+
name: bytes,
|
|
835
|
+
*arguments: ValueT,
|
|
836
|
+
callback: Callable[..., T_co],
|
|
837
|
+
execution_parameters: ExecutionParameters | None = None,
|
|
838
|
+
) -> CommandRequest[T_co]:
|
|
839
|
+
"""
|
|
840
|
+
:meta private:
|
|
841
|
+
"""
|
|
842
|
+
return ClusterPipelineCommandRequest(
|
|
843
|
+
self, name, *arguments, callback=callback, execution_parameters=execution_parameters
|
|
844
|
+
)
|
|
845
|
+
|
|
846
|
+
def watch(self, *keys: KeyT) -> CommandRequest[bool]:
|
|
847
|
+
"""
|
|
848
|
+
Watch the given keys for changes. Switches to immediate execution mode
|
|
849
|
+
until :meth:`multi` is called.
|
|
850
|
+
"""
|
|
716
851
|
if self.explicit_transaction:
|
|
717
852
|
raise RedisError("Cannot issue a WATCH after a MULTI")
|
|
718
853
|
|
|
719
|
-
return
|
|
720
|
-
CommandName.WATCH, *keys, callback=SimpleStringCallback()
|
|
721
|
-
)
|
|
854
|
+
return self.create_request(CommandName.WATCH, *keys, callback=SimpleStringCallback())
|
|
722
855
|
|
|
723
856
|
async def unwatch(self) -> bool:
|
|
857
|
+
"""
|
|
858
|
+
Remove all key watches and return to buffered mode.
|
|
859
|
+
"""
|
|
724
860
|
if self._watched_connection:
|
|
725
861
|
try:
|
|
726
862
|
return await self._unwatch(self._watched_connection)
|
|
@@ -732,20 +868,21 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
|
|
|
732
868
|
self._watched_connection = None
|
|
733
869
|
return True
|
|
734
870
|
|
|
735
|
-
def
|
|
736
|
-
return f"{type(self).__name__}"
|
|
737
|
-
|
|
738
|
-
def __del__(self):
|
|
871
|
+
def __del__(self) -> None:
|
|
739
872
|
if self._watched_connection:
|
|
740
873
|
self.connection_pool.release(self._watched_connection)
|
|
741
874
|
|
|
742
|
-
def __len__(self):
|
|
875
|
+
def __len__(self) -> int:
|
|
743
876
|
return len(self.command_stack)
|
|
744
877
|
|
|
745
878
|
def __bool__(self) -> bool:
|
|
746
879
|
return True
|
|
747
880
|
|
|
748
|
-
|
|
881
|
+
def __await__(self) -> Generator[None, None, Self]:
|
|
882
|
+
yield
|
|
883
|
+
return self
|
|
884
|
+
|
|
885
|
+
async def __aenter__(self) -> ClusterPipeline[AnyStr]:
|
|
749
886
|
return self
|
|
750
887
|
|
|
751
888
|
async def __aexit__(
|
|
@@ -754,46 +891,29 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
|
|
|
754
891
|
exc_value: BaseException | None,
|
|
755
892
|
traceback: TracebackType | None,
|
|
756
893
|
) -> None:
|
|
757
|
-
await self.
|
|
894
|
+
await self.clear()
|
|
758
895
|
|
|
759
|
-
|
|
896
|
+
def execute_command(
|
|
760
897
|
self,
|
|
761
|
-
command:
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
if (self.watching or command == CommandName.WATCH) and not self.explicit_transaction:
|
|
767
|
-
return await self.immediate_execute_command(
|
|
768
|
-
command, *args, callback=callback, **options
|
|
769
|
-
) # type: ignore
|
|
770
|
-
return self.pipeline_execute_command(command, *args, callback=callback, **options)
|
|
898
|
+
command: RedisCommandP,
|
|
899
|
+
callback: Callable[..., R] = NoopCallback(),
|
|
900
|
+
**options: Unpack[ExecutionParameters],
|
|
901
|
+
) -> Awaitable[R]:
|
|
902
|
+
raise NotImplementedError
|
|
771
903
|
|
|
772
904
|
def pipeline_execute_command(
|
|
773
905
|
self,
|
|
774
|
-
command:
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
) -> ClusterPipelineImpl[AnyStr]:
|
|
779
|
-
self.command_stack.append(
|
|
780
|
-
ClusterPipelineCommand(
|
|
781
|
-
command=command,
|
|
782
|
-
args=args,
|
|
783
|
-
options=options,
|
|
784
|
-
callback=callback,
|
|
785
|
-
position=len(self.command_stack),
|
|
786
|
-
)
|
|
787
|
-
)
|
|
788
|
-
|
|
789
|
-
return self
|
|
906
|
+
command: ClusterPipelineCommandRequest[Any],
|
|
907
|
+
) -> None:
|
|
908
|
+
command.position = len(self.command_stack)
|
|
909
|
+
self.command_stack.append(command)
|
|
790
910
|
|
|
791
911
|
def raise_first_error(self) -> None:
|
|
792
912
|
for c in self.command_stack:
|
|
793
913
|
r = c.result
|
|
794
914
|
|
|
795
915
|
if isinstance(r, RedisError):
|
|
796
|
-
self.annotate_exception(r, c.position + 1, c.
|
|
916
|
+
self.annotate_exception(r, c.position + 1, c.name, c.arguments)
|
|
797
917
|
raise r
|
|
798
918
|
|
|
799
919
|
def annotate_exception(
|
|
@@ -801,7 +921,7 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
|
|
|
801
921
|
exception: RedisError | None,
|
|
802
922
|
number: int,
|
|
803
923
|
command: bytes,
|
|
804
|
-
args: Iterable[
|
|
924
|
+
args: Iterable[RedisValueT],
|
|
805
925
|
) -> None:
|
|
806
926
|
if exception:
|
|
807
927
|
cmd = command.decode("latin-1")
|
|
@@ -810,6 +930,9 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
|
|
|
810
930
|
exception.args = (msg,) + exception.args[1:]
|
|
811
931
|
|
|
812
932
|
async def execute(self, raise_on_error: bool = True) -> tuple[object, ...]:
|
|
933
|
+
"""
|
|
934
|
+
Execute all queued commands in the cluster pipeline. Returns a tuple of results.
|
|
935
|
+
"""
|
|
813
936
|
await self.connection_pool.initialize()
|
|
814
937
|
|
|
815
938
|
if not self.command_stack:
|
|
@@ -822,10 +945,12 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
|
|
|
822
945
|
try:
|
|
823
946
|
return await execute(raise_on_error)
|
|
824
947
|
finally:
|
|
825
|
-
await self.
|
|
948
|
+
await self.clear()
|
|
826
949
|
|
|
827
|
-
async def
|
|
828
|
-
"""
|
|
950
|
+
async def clear(self) -> None:
|
|
951
|
+
"""
|
|
952
|
+
Clear the pipeline, reset state, and release any held connections.
|
|
953
|
+
"""
|
|
829
954
|
self.command_stack = []
|
|
830
955
|
|
|
831
956
|
self.scripts: set[Script[AnyStr]] = set()
|
|
@@ -837,32 +962,49 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
|
|
|
837
962
|
self.connection_pool.release(self._watched_connection)
|
|
838
963
|
self._watched_connection = None
|
|
839
964
|
|
|
965
|
+
#: :meta private:
|
|
966
|
+
reset_pipeline = clear
|
|
967
|
+
|
|
968
|
+
@deprecated(
|
|
969
|
+
"The reset method in pipelines clashes with the redis ``RESET`` command. Use :meth:`clear` instead",
|
|
970
|
+
"5.0.0",
|
|
971
|
+
)
|
|
972
|
+
def reset(self) -> CommandRequest[None]:
|
|
973
|
+
"""
|
|
974
|
+
Empties the pipeline and resets / returns the connection
|
|
975
|
+
back to the pool
|
|
976
|
+
|
|
977
|
+
:meta private:
|
|
978
|
+
"""
|
|
979
|
+
return self.clear() # type: ignore
|
|
980
|
+
|
|
840
981
|
@retryable(policy=ConstantRetryPolicy((ClusterDownError,), 3, 0.1))
|
|
841
982
|
async def send_cluster_transaction(self, raise_on_error: bool = True) -> tuple[object, ...]:
|
|
983
|
+
"""
|
|
984
|
+
:meta private:
|
|
985
|
+
"""
|
|
842
986
|
attempt = sorted(self.command_stack, key=lambda x: x.position)
|
|
843
987
|
slots: set[int] = set()
|
|
844
988
|
for c in attempt:
|
|
845
|
-
slot = self._determine_slot(c.
|
|
989
|
+
slot = self._determine_slot(c.name, *c.arguments, **c.execution_parameters)
|
|
846
990
|
if slot:
|
|
847
991
|
slots.add(slot)
|
|
848
992
|
|
|
849
993
|
if len(slots) > 1:
|
|
850
|
-
raise ClusterTransactionError("Multiple
|
|
994
|
+
raise ClusterTransactionError("Multiple slots involved in transaction")
|
|
851
995
|
if not slots:
|
|
852
996
|
raise ClusterTransactionError("No slots found for transaction")
|
|
853
997
|
node = self.connection_pool.get_node_by_slot(slots.pop())
|
|
854
998
|
|
|
855
999
|
if self._watched_node and node.name != self._watched_node.name:
|
|
856
|
-
raise ClusterTransactionError("Multiple
|
|
1000
|
+
raise ClusterTransactionError("Multiple slots involved in transaction")
|
|
857
1001
|
|
|
858
1002
|
conn = self._watched_connection or await self.connection_pool.get_connection_by_node(node)
|
|
859
1003
|
|
|
860
1004
|
if self.watches:
|
|
861
1005
|
await self._watch(node, conn, self.watches)
|
|
862
1006
|
node_commands = NodeCommands(self.client, conn, in_transaction=True, timeout=self.timeout)
|
|
863
|
-
node_commands.append(ClusterPipelineCommand(CommandName.MULTI, ()))
|
|
864
1007
|
node_commands.extend(attempt)
|
|
865
|
-
node_commands.append(ClusterPipelineCommand(CommandName.EXEC, ()))
|
|
866
1008
|
self.explicit_transaction = True
|
|
867
1009
|
|
|
868
1010
|
await node_commands.write()
|
|
@@ -872,11 +1014,9 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
|
|
|
872
1014
|
if self.explicit_transaction:
|
|
873
1015
|
request = await conn.create_request(CommandName.DISCARD)
|
|
874
1016
|
await request
|
|
875
|
-
# If at least one watched key is modified before the EXEC
|
|
876
|
-
# the whole transaction aborts,
|
|
877
|
-
# and EXEC returns a Null reply to notify that the transaction failed.
|
|
1017
|
+
# If at least one watched key is modified before EXEC, the transaction aborts and EXEC returns null.
|
|
878
1018
|
|
|
879
|
-
if node_commands.
|
|
1019
|
+
if node_commands.exec_cmd and await node_commands.exec_cmd is None:
|
|
880
1020
|
raise WatchError
|
|
881
1021
|
self.connection_pool.release(conn)
|
|
882
1022
|
|
|
@@ -889,7 +1029,7 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
|
|
|
889
1029
|
return tuple(
|
|
890
1030
|
n.result
|
|
891
1031
|
for n in node_commands.commands
|
|
892
|
-
if n.
|
|
1032
|
+
if n.name not in {CommandName.MULTI, CommandName.EXEC}
|
|
893
1033
|
)
|
|
894
1034
|
|
|
895
1035
|
@retryable(policy=ConstantRetryPolicy((ClusterDownError,), 3, 0.1))
|
|
@@ -897,107 +1037,64 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
|
|
|
897
1037
|
self, raise_on_error: bool = True, allow_redirections: bool = True
|
|
898
1038
|
) -> tuple[object, ...]:
|
|
899
1039
|
"""
|
|
900
|
-
|
|
1040
|
+
Execute all queued commands in the cluster pipeline, handling redirections
|
|
1041
|
+
and retries as needed.
|
|
901
1042
|
|
|
902
|
-
|
|
903
|
-
automatically. If set to false it will raise RedisClusterException.
|
|
1043
|
+
:meta private:
|
|
904
1044
|
"""
|
|
905
|
-
#
|
|
906
|
-
# if we have to run through it again, we only retry the commands that failed.
|
|
1045
|
+
# On first send, queue all commands. On retry, only failed ones.
|
|
907
1046
|
attempt = sorted(self.command_stack, key=lambda x: x.position)
|
|
908
1047
|
|
|
909
|
-
|
|
910
|
-
# build a list of node objects based on node names we need to
|
|
1048
|
+
# Group commands by node for efficient network usage.
|
|
911
1049
|
nodes: dict[str, NodeCommands] = {}
|
|
912
|
-
# as we move through each command that still needs to be processed,
|
|
913
|
-
# we figure out the slot number that command maps to, then from the slot determine the node.
|
|
914
1050
|
for c in attempt:
|
|
915
|
-
|
|
916
|
-
# command should route to.
|
|
917
|
-
slot = self._determine_slot(c.command, *c.args)
|
|
1051
|
+
slot = self._determine_slot(c.name, *c.arguments)
|
|
918
1052
|
node = self.connection_pool.get_node_by_slot(slot)
|
|
919
|
-
|
|
920
1053
|
if node.name not in nodes:
|
|
921
1054
|
nodes[node.name] = NodeCommands(
|
|
922
1055
|
self.client,
|
|
923
1056
|
await self.connection_pool.get_connection_by_node(node),
|
|
924
1057
|
timeout=self.timeout,
|
|
925
1058
|
)
|
|
926
|
-
|
|
927
1059
|
nodes[node.name].append(c)
|
|
928
1060
|
|
|
929
|
-
#
|
|
930
|
-
# we write to all the open sockets for each node first, before reading anything
|
|
931
|
-
# this allows us to flush all the requests out across the network essentially in parallel
|
|
932
|
-
# so that we can read them all in parallel as they come back.
|
|
933
|
-
# we dont' multiplex on the sockets as they come available, but that shouldn't make
|
|
934
|
-
# too much difference.
|
|
1061
|
+
# Write to all nodes, then read from all nodes in sequence.
|
|
935
1062
|
node_commands = nodes.values()
|
|
936
|
-
|
|
937
1063
|
for n in node_commands:
|
|
938
1064
|
await n.write()
|
|
939
|
-
|
|
940
1065
|
for n in node_commands:
|
|
941
1066
|
await n.read()
|
|
942
1067
|
|
|
943
|
-
#
|
|
944
|
-
#
|
|
945
|
-
# release connections back into the pool if for some reason the socket has data still left
|
|
946
|
-
# in it from a previous operation. The write and read operations already have try/catch
|
|
947
|
-
# around them for all known types of errors including connection and socket level errors.
|
|
948
|
-
# So if we hit an exception, something really bad happened and putting any of
|
|
949
|
-
# these connections back into the pool is a very bad idea.
|
|
950
|
-
# the socket might have unread buffer still sitting in it, and then the
|
|
951
|
-
# next time we read from it we pass the buffered result back from a previous
|
|
952
|
-
# command and every single request after to that connection will always get
|
|
953
|
-
# a mismatched result. (not just theoretical, I saw this happen on production x.x).
|
|
1068
|
+
# Release all connections back to the pool only if safe (no unread buffer).
|
|
1069
|
+
# If an error occurred, do not release to avoid buffer mismatches.
|
|
954
1070
|
for n in nodes.values():
|
|
955
1071
|
protocol_version = n.connection.protocol_version
|
|
956
1072
|
self.connection_pool.release(n.connection)
|
|
957
|
-
|
|
958
|
-
#
|
|
959
|
-
# if we have more commands to attempt, we've run into problems.
|
|
960
|
-
# collect all the commands we are allowed to retry.
|
|
961
|
-
# (MOVED, ASK, or connection errors or timeout errors)
|
|
1073
|
+
|
|
1074
|
+
# Retry MOVED/ASK/connection errors one by one if allowed.
|
|
962
1075
|
attempt = sorted(
|
|
963
1076
|
(c for c in attempt if isinstance(c.result, ERRORS_ALLOW_RETRY)),
|
|
964
1077
|
key=lambda x: x.position,
|
|
965
1078
|
)
|
|
966
1079
|
|
|
967
1080
|
if attempt and allow_redirections:
|
|
968
|
-
# RETRY MAGIC HAPPENS HERE!
|
|
969
|
-
# send these remaing comamnds one at a time using `execute_command`
|
|
970
|
-
# in the main client. This keeps our retry logic in one place mostly,
|
|
971
|
-
# and allows us to be more confident in correctness of behavior.
|
|
972
|
-
# at this point any speed gains from pipelining have been lost
|
|
973
|
-
# anyway, so we might as well make the best attempt to get the correct
|
|
974
|
-
# behavior.
|
|
975
|
-
#
|
|
976
|
-
# The client command will handle retries for each individual command
|
|
977
|
-
# sequentially as we pass each one into `execute_command`. Any exceptions
|
|
978
|
-
# that bubble out should only appear once all retries have been exhausted.
|
|
979
|
-
#
|
|
980
|
-
# If a lot of commands have failed, we'll be setting the
|
|
981
|
-
# flag to rebuild the slots table from scratch. So MOVED errors should
|
|
982
|
-
# correct .commandsthemselves fairly quickly.
|
|
983
1081
|
await self.connection_pool.nodes.increment_reinitialize_counter(len(attempt))
|
|
984
|
-
|
|
985
1082
|
for c in attempt:
|
|
986
1083
|
try:
|
|
987
|
-
|
|
988
|
-
|
|
1084
|
+
c.result = await self.client.execute_command(
|
|
1085
|
+
RedisCommand(c.name, c.arguments), **c.execution_parameters
|
|
1086
|
+
)
|
|
989
1087
|
except RedisError as e:
|
|
990
1088
|
c.result = e
|
|
991
1089
|
|
|
992
|
-
#
|
|
993
|
-
# to the sequence of commands issued in the stack in pipeline.execute()
|
|
1090
|
+
# Flatten results to match the original command order.
|
|
994
1091
|
response = []
|
|
995
1092
|
for c in sorted(self.command_stack, key=lambda x: x.position):
|
|
996
1093
|
r = c.result
|
|
997
1094
|
if not isinstance(c.result, RedisError):
|
|
998
1095
|
if isinstance(c.callback, AsyncPreProcessingCallback):
|
|
999
|
-
await c.callback.pre_process(self.client, c.result
|
|
1000
|
-
r = c.callback(c.result, version=protocol_version
|
|
1096
|
+
await c.callback.pre_process(self.client, c.result)
|
|
1097
|
+
r = c.callback(c.result, version=protocol_version)
|
|
1001
1098
|
response.append(r)
|
|
1002
1099
|
|
|
1003
1100
|
if raise_on_error:
|
|
@@ -1005,16 +1102,19 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
|
|
|
1005
1102
|
|
|
1006
1103
|
return tuple(response)
|
|
1007
1104
|
|
|
1008
|
-
def _determine_slot(
|
|
1009
|
-
|
|
1010
|
-
|
|
1011
|
-
|
|
1012
|
-
|
|
1013
|
-
|
|
1105
|
+
def _determine_slot(
|
|
1106
|
+
self, command: bytes, *args: ValueT, **options: Unpack[ExecutionParameters]
|
|
1107
|
+
) -> int:
|
|
1108
|
+
"""
|
|
1109
|
+
Determine the hash slot for the given command and arguments.
|
|
1110
|
+
"""
|
|
1111
|
+
keys: tuple[RedisValueT, ...] = cast(
|
|
1112
|
+
tuple[RedisValueT, ...], options.get("keys")
|
|
1113
|
+
) or KeySpec.extract_keys(command, *args) # type: ignore
|
|
1014
1114
|
|
|
1015
1115
|
if not keys:
|
|
1016
1116
|
raise RedisClusterException(
|
|
1017
|
-
f"No way to dispatch {command} to Redis Cluster. Missing key"
|
|
1117
|
+
f"No way to dispatch {nativestr(command)} to Redis Cluster. Missing key"
|
|
1018
1118
|
)
|
|
1019
1119
|
slots = {hash_slot(b(key)) for key in keys}
|
|
1020
1120
|
|
|
@@ -1023,10 +1123,16 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
|
|
|
1023
1123
|
return slots.pop()
|
|
1024
1124
|
|
|
1025
1125
|
def _fail_on_redirect(self, allow_redirections: bool) -> None:
|
|
1126
|
+
"""
|
|
1127
|
+
Raise if redirections are not allowed in the pipeline.
|
|
1128
|
+
"""
|
|
1026
1129
|
if not allow_redirections:
|
|
1027
1130
|
raise RedisClusterException("ASK & MOVED redirection not allowed in this pipeline")
|
|
1028
1131
|
|
|
1029
1132
|
def multi(self) -> None:
|
|
1133
|
+
"""
|
|
1134
|
+
Start a transactional block after WATCH commands. End with `execute()`.
|
|
1135
|
+
"""
|
|
1030
1136
|
if self.explicit_transaction:
|
|
1031
1137
|
raise RedisError("Cannot issue nested calls to MULTI")
|
|
1032
1138
|
|
|
@@ -1036,14 +1142,13 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
|
|
|
1036
1142
|
|
|
1037
1143
|
async def immediate_execute_command(
|
|
1038
1144
|
self,
|
|
1039
|
-
command:
|
|
1040
|
-
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
|
-
slot = self._determine_slot(command, *args)
|
|
1145
|
+
command: RedisCommandP,
|
|
1146
|
+
callback: Callable[..., R] = NoopCallback(),
|
|
1147
|
+
**kwargs: Unpack[ExecutionParameters],
|
|
1148
|
+
) -> R:
|
|
1149
|
+
slot = self._determine_slot(command.name, *command.arguments)
|
|
1045
1150
|
node = self.connection_pool.get_node_by_slot(slot)
|
|
1046
|
-
if command == CommandName.WATCH:
|
|
1151
|
+
if command.name == CommandName.WATCH:
|
|
1047
1152
|
if self._watched_node and node.name != self._watched_node.name:
|
|
1048
1153
|
raise ClusterTransactionError(
|
|
1049
1154
|
"Cannot issue a watch on a different node in the same transaction"
|
|
@@ -1057,40 +1162,44 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
|
|
|
1057
1162
|
conn = await self.connection_pool.get_connection_by_node(node)
|
|
1058
1163
|
|
|
1059
1164
|
try:
|
|
1060
|
-
request = await conn.create_request(
|
|
1165
|
+
request = await conn.create_request(
|
|
1166
|
+
command.name, *command.arguments, decode=kwargs.get("decode")
|
|
1167
|
+
)
|
|
1061
1168
|
|
|
1062
1169
|
return callback(
|
|
1063
1170
|
await request,
|
|
1064
1171
|
version=conn.protocol_version,
|
|
1065
|
-
**kwargs,
|
|
1066
1172
|
)
|
|
1067
1173
|
except (ConnectionError, TimeoutError):
|
|
1068
1174
|
conn.disconnect()
|
|
1069
1175
|
|
|
1070
1176
|
try:
|
|
1071
1177
|
if not self.watching:
|
|
1072
|
-
request = await conn.create_request(
|
|
1073
|
-
|
|
1178
|
+
request = await conn.create_request(
|
|
1179
|
+
command.name, *command.arguments, decode=kwargs.get("decode")
|
|
1180
|
+
)
|
|
1181
|
+
return callback(await request, version=conn.protocol_version)
|
|
1182
|
+
else:
|
|
1183
|
+
raise
|
|
1074
1184
|
except ConnectionError:
|
|
1075
1185
|
# the retry failed so cleanup.
|
|
1076
1186
|
conn.disconnect()
|
|
1077
|
-
await self.
|
|
1187
|
+
await self.clear()
|
|
1078
1188
|
raise
|
|
1079
1189
|
finally:
|
|
1080
|
-
|
|
1190
|
+
release = True
|
|
1191
|
+
if command.name in UNWATCH_COMMANDS:
|
|
1081
1192
|
self.watching = False
|
|
1082
|
-
elif command == CommandName.WATCH:
|
|
1193
|
+
elif command.name == CommandName.WATCH:
|
|
1083
1194
|
self.watching = True
|
|
1084
|
-
|
|
1085
|
-
|
|
1086
|
-
|
|
1195
|
+
release = False
|
|
1196
|
+
if release:
|
|
1197
|
+
self.connection_pool.release(conn)
|
|
1087
1198
|
|
|
1088
|
-
def load_scripts(self):
|
|
1199
|
+
def load_scripts(self) -> None:
|
|
1089
1200
|
raise RedisClusterException("method load_scripts() is not implemented")
|
|
1090
1201
|
|
|
1091
1202
|
async def _watch(self, node: ManagedNode, conn: BaseConnection, keys: Parameters[KeyT]) -> bool:
|
|
1092
|
-
"Watches the values at keys ``keys``"
|
|
1093
|
-
|
|
1094
1203
|
for key in keys:
|
|
1095
1204
|
slot = self._determine_slot(CommandName.WATCH, key)
|
|
1096
1205
|
dist_node = self.connection_pool.get_node_by_slot(slot)
|
|
@@ -1109,161 +1218,8 @@ class ClusterPipelineImpl(Client[AnyStr], metaclass=ClusterPipelineMeta):
|
|
|
1109
1218
|
|
|
1110
1219
|
async def _unwatch(self, conn: BaseConnection) -> bool:
|
|
1111
1220
|
"""Unwatches all previously specified keys"""
|
|
1221
|
+
if not self.watching:
|
|
1222
|
+
return True
|
|
1112
1223
|
request = await conn.create_request(CommandName.UNWATCH, decode=False)
|
|
1113
|
-
res = cast(
|
|
1114
|
-
return res == b"OK"
|
|
1115
|
-
|
|
1116
|
-
|
|
1117
|
-
class Pipeline(ObjectProxy, Generic[AnyStr]): # type: ignore
|
|
1118
|
-
"""
|
|
1119
|
-
Class returned by :meth:`coredis.Redis.pipeline`
|
|
1120
|
-
|
|
1121
|
-
The class exposes the redis command methods available in
|
|
1122
|
-
:class:`~coredis.Redis`, however each of those methods returns
|
|
1123
|
-
the instance itself and the results of the batched commands
|
|
1124
|
-
can be retrieved by calling :meth:`execute`.
|
|
1125
|
-
"""
|
|
1126
|
-
|
|
1127
|
-
__wrapped__: PipelineImpl[AnyStr]
|
|
1128
|
-
|
|
1129
|
-
async def __aenter__(self) -> Pipeline[AnyStr]:
|
|
1130
|
-
return cast(Pipeline[AnyStr], await self.__wrapped__.__aenter__())
|
|
1131
|
-
|
|
1132
|
-
async def __aexit__(
|
|
1133
|
-
self,
|
|
1134
|
-
exc_type: type[BaseException] | None,
|
|
1135
|
-
exc_value: BaseException | None,
|
|
1136
|
-
traceback: TracebackType | None,
|
|
1137
|
-
) -> None:
|
|
1138
|
-
await self.__wrapped__.__aexit__(exc_type, exc_value, traceback)
|
|
1139
|
-
|
|
1140
|
-
@classmethod
|
|
1141
|
-
def proxy(
|
|
1142
|
-
cls,
|
|
1143
|
-
client: Redis[AnyStr],
|
|
1144
|
-
transaction: bool | None = None,
|
|
1145
|
-
watches: Parameters[KeyT] | None = None,
|
|
1146
|
-
timeout: float | None = None,
|
|
1147
|
-
) -> Pipeline[AnyStr]:
|
|
1148
|
-
return cls(
|
|
1149
|
-
PipelineImpl(
|
|
1150
|
-
client,
|
|
1151
|
-
transaction=transaction,
|
|
1152
|
-
watches=watches,
|
|
1153
|
-
timeout=timeout,
|
|
1154
|
-
)
|
|
1155
|
-
)
|
|
1156
|
-
|
|
1157
|
-
def multi(self) -> None:
|
|
1158
|
-
"""
|
|
1159
|
-
Starts a transactional block of the pipeline after WATCH commands
|
|
1160
|
-
are issued. End the transactional block with :meth:`execute`
|
|
1161
|
-
"""
|
|
1162
|
-
self.__wrapped__.multi() # Only here for documentation purposes.
|
|
1163
|
-
|
|
1164
|
-
async def watch(self, *keys: KeyT) -> bool: # noqa
|
|
1165
|
-
"""
|
|
1166
|
-
Watches the values at keys ``keys``
|
|
1167
|
-
"""
|
|
1168
|
-
return await self.__wrapped__.watch(*keys) # Only here for documentation purposes.
|
|
1169
|
-
|
|
1170
|
-
async def unwatch(self) -> bool: # noqa
|
|
1171
|
-
"""
|
|
1172
|
-
Unwatches all previously specified keys
|
|
1173
|
-
"""
|
|
1174
|
-
return await self.__wrapped__.unwatch() # Only here for documentation purposes.
|
|
1175
|
-
|
|
1176
|
-
async def execute(self, raise_on_error: bool = True) -> tuple[object, ...]:
|
|
1177
|
-
"""
|
|
1178
|
-
Executes all the commands in the current pipeline
|
|
1179
|
-
and return the results of the individual batched commands
|
|
1180
|
-
"""
|
|
1181
|
-
|
|
1182
|
-
# Only here for documentation purposes.
|
|
1183
|
-
return await self.__wrapped__.execute(raise_on_error=raise_on_error)
|
|
1184
|
-
|
|
1185
|
-
async def reset(self) -> None:
|
|
1186
|
-
"""
|
|
1187
|
-
Resets the command stack and releases any connections acquired from the
|
|
1188
|
-
pool
|
|
1189
|
-
"""
|
|
1190
|
-
await self.__wrapped__.reset_pipeline()
|
|
1191
|
-
|
|
1192
|
-
|
|
1193
|
-
class ClusterPipeline(ObjectProxy, Generic[AnyStr]): # type: ignore
|
|
1194
|
-
"""
|
|
1195
|
-
Class returned by :meth:`coredis.RedisCluster.pipeline`
|
|
1196
|
-
|
|
1197
|
-
The class exposes the redis command methods available in
|
|
1198
|
-
:class:`~coredis.Redis`, however each of those methods returns
|
|
1199
|
-
the instance itself and the results of the batched commands
|
|
1200
|
-
can be retrieved by calling :meth:`execute`.
|
|
1201
|
-
"""
|
|
1202
|
-
|
|
1203
|
-
__wrapped__: ClusterPipelineImpl[AnyStr]
|
|
1204
|
-
|
|
1205
|
-
async def __aenter__(self) -> ClusterPipeline[AnyStr]:
|
|
1206
|
-
return cast(ClusterPipeline[AnyStr], await self.__wrapped__.__aenter__())
|
|
1207
|
-
|
|
1208
|
-
async def __aexit__(
|
|
1209
|
-
self,
|
|
1210
|
-
exc_type: type[BaseException] | None,
|
|
1211
|
-
exc_value: BaseException | None,
|
|
1212
|
-
traceback: TracebackType | None,
|
|
1213
|
-
) -> None:
|
|
1214
|
-
await self.__wrapped__.__aexit__(exc_type, exc_value, traceback)
|
|
1215
|
-
|
|
1216
|
-
@classmethod
|
|
1217
|
-
def proxy(
|
|
1218
|
-
cls,
|
|
1219
|
-
client: RedisCluster[AnyStr],
|
|
1220
|
-
transaction: bool | None = False,
|
|
1221
|
-
watches: Parameters[KeyT] | None = None,
|
|
1222
|
-
timeout: float | None = None,
|
|
1223
|
-
) -> ClusterPipeline[AnyStr]:
|
|
1224
|
-
return cls(
|
|
1225
|
-
ClusterPipelineImpl(
|
|
1226
|
-
client,
|
|
1227
|
-
transaction=transaction,
|
|
1228
|
-
watches=watches,
|
|
1229
|
-
timeout=timeout,
|
|
1230
|
-
)
|
|
1231
|
-
)
|
|
1232
|
-
|
|
1233
|
-
def multi(self) -> None:
|
|
1234
|
-
"""
|
|
1235
|
-
Starts a transactional block of the pipeline after WATCH commands
|
|
1236
|
-
are issued. End the transactional block with :meth:`execute`
|
|
1237
|
-
"""
|
|
1238
|
-
self.__wrapped__.multi() # Only here for documentation purposes.
|
|
1239
|
-
|
|
1240
|
-
async def watch(self, *keys: KeyT) -> bool: # noqa
|
|
1241
|
-
"""
|
|
1242
|
-
Watches the values at keys ``keys``
|
|
1243
|
-
|
|
1244
|
-
:raises: :exc:`~coredis.exceptions.ClusterTransactionError`
|
|
1245
|
-
if a watch is issued on a key that resides on a different
|
|
1246
|
-
cluster node than a previous watch.
|
|
1247
|
-
"""
|
|
1248
|
-
return await self.__wrapped__.watch(*keys) # Only here for documentation purposes.
|
|
1249
|
-
|
|
1250
|
-
async def unwatch(self) -> bool: # noqa
|
|
1251
|
-
"""
|
|
1252
|
-
Unwatches all previously specified keys
|
|
1253
|
-
"""
|
|
1254
|
-
return await self.__wrapped__.unwatch() # Only here for documentation purposes.
|
|
1255
|
-
|
|
1256
|
-
async def execute(self, raise_on_error: bool = True) -> tuple[object, ...]:
|
|
1257
|
-
"""
|
|
1258
|
-
Executes all the commands in the current pipeline
|
|
1259
|
-
and return the results of the individual batched commands
|
|
1260
|
-
"""
|
|
1261
|
-
# Only here for documentation purposes.
|
|
1262
|
-
return await self.__wrapped__.execute(raise_on_error=raise_on_error)
|
|
1263
|
-
|
|
1264
|
-
async def reset(self) -> None:
|
|
1265
|
-
"""
|
|
1266
|
-
Resets the command stack and releases any connections acquired from the
|
|
1267
|
-
pool
|
|
1268
|
-
"""
|
|
1269
|
-
await self.__wrapped__.reset_pipeline()
|
|
1224
|
+
res = cast(bytes, await request)
|
|
1225
|
+
return res == b"OK"
|