coredis 5.0.0rc1__tar.gz → 5.0.0rc2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of coredis might be problematic. Click here for more details.
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/HISTORY.rst +9 -0
- {coredis-5.0.0rc1/coredis.egg-info → coredis-5.0.0rc2}/PKG-INFO +1 -4
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/README.md +0 -3
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/_protocols.py +5 -5
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/_version.py +3 -3
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/commands/script.py +16 -13
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/pipeline.py +125 -137
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/retry.py +12 -13
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/typing.py +1 -3
- {coredis-5.0.0rc1 → coredis-5.0.0rc2/coredis.egg-info}/PKG-INFO +1 -4
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/LICENSE +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/MANIFEST.in +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/__init__.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/_json.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/_packer.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/_py_311_typing.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/_py_312_typing.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/_sidecar.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/_utils.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/cache.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/client/__init__.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/client/basic.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/client/cluster.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/commands/__init__.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/commands/_key_spec.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/commands/_utils.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/commands/_validators.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/commands/_wrappers.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/commands/bitfield.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/commands/constants.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/commands/core.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/commands/function.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/commands/monitor.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/commands/pubsub.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/commands/request.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/commands/sentinel.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/config.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/connection.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/constants.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/credentials.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/exceptions.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/experimental/__init__.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/globals.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/modules/__init__.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/modules/autocomplete.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/modules/base.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/modules/filters.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/modules/graph.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/modules/json.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/modules/response/__init__.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/modules/response/_callbacks/__init__.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/modules/response/_callbacks/autocomplete.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/modules/response/_callbacks/graph.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/modules/response/_callbacks/json.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/modules/response/_callbacks/search.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/modules/response/_callbacks/timeseries.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/modules/response/types.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/modules/search.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/modules/timeseries.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/parser.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/pool/__init__.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/pool/basic.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/pool/cluster.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/pool/nodemanager.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/py.typed +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/recipes/__init__.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/recipes/credentials/__init__.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/recipes/credentials/iam_provider.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/recipes/locks/__init__.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/recipes/locks/extend.lua +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/recipes/locks/lua_lock.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/recipes/locks/release.lua +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/response/__init__.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/response/_callbacks/__init__.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/response/_callbacks/acl.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/response/_callbacks/cluster.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/response/_callbacks/command.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/response/_callbacks/connection.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/response/_callbacks/geo.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/response/_callbacks/hash.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/response/_callbacks/keys.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/response/_callbacks/module.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/response/_callbacks/script.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/response/_callbacks/sentinel.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/response/_callbacks/server.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/response/_callbacks/sets.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/response/_callbacks/sorted_set.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/response/_callbacks/streams.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/response/_callbacks/strings.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/response/_callbacks/vector_sets.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/response/_utils.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/response/types.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/sentinel.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/speedups.c +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/speedups.pyi +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/stream.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis/tokens.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis.egg-info/SOURCES.txt +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis.egg-info/dependency_links.txt +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis.egg-info/requires.txt +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/coredis.egg-info/top_level.txt +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/pyproject.toml +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/requirements/ci.txt +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/requirements/dev.txt +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/requirements/dev_extra.txt +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/requirements/docs.txt +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/requirements/main.txt +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/requirements/publishing.txt +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/requirements/recipes.txt +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/requirements/test.txt +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/setup.cfg +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/setup.py +0 -0
- {coredis-5.0.0rc1 → coredis-5.0.0rc2}/versioneer.py +0 -0
|
@@ -3,6 +3,14 @@
|
|
|
3
3
|
Changelog
|
|
4
4
|
=========
|
|
5
5
|
|
|
6
|
+
v5.0.0rc2
|
|
7
|
+
---------
|
|
8
|
+
Release Date: 2025-07-10
|
|
9
|
+
|
|
10
|
+
* Bug Fix
|
|
11
|
+
|
|
12
|
+
* Fix duplicate command error in using ``transform`` with pipeline
|
|
13
|
+
|
|
6
14
|
v5.0.0rc1
|
|
7
15
|
---------
|
|
8
16
|
Release Date: 2025-07-07
|
|
@@ -1963,3 +1971,4 @@ v1.0.1
|
|
|
1963
1971
|
|
|
1964
1972
|
|
|
1965
1973
|
|
|
1974
|
+
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: coredis
|
|
3
|
-
Version: 5.0.
|
|
3
|
+
Version: 5.0.0rc2
|
|
4
4
|
Summary: Python async client for Redis key-value store
|
|
5
5
|
Home-page: https://github.com/alisaifee/coredis
|
|
6
6
|
Author: Ali-Akber Saifee
|
|
@@ -80,9 +80,6 @@ coredis is an async redis client with support for redis server, cluster & sentin
|
|
|
80
80
|
and the [API Documentation](https://coredis.readthedocs.io/en/latest/api/index.html)
|
|
81
81
|
for more details.
|
|
82
82
|
|
|
83
|
-
> **Warning**
|
|
84
|
-
> The command API does NOT mirror the official python [redis client](https://github.com/redis/redis-py). For details about the high level differences refer to [Divergence from aredis & redis-py](https://coredis.readthedocs.io/en/latest/history.html#divergence-from-aredis-redis-py)
|
|
85
|
-
|
|
86
83
|
______________________________________________________________________
|
|
87
84
|
|
|
88
85
|
<!-- TOC depthFrom:2 depthTo:6 withLinks:1 updateOnSave:1 orderedList:0 -->
|
|
@@ -27,9 +27,6 @@ coredis is an async redis client with support for redis server, cluster & sentin
|
|
|
27
27
|
and the [API Documentation](https://coredis.readthedocs.io/en/latest/api/index.html)
|
|
28
28
|
for more details.
|
|
29
29
|
|
|
30
|
-
> **Warning**
|
|
31
|
-
> The command API does NOT mirror the official python [redis client](https://github.com/redis/redis-py). For details about the high level differences refer to [Divergence from aredis & redis-py](https://coredis.readthedocs.io/en/latest/history.html#divergence-from-aredis-redis-py)
|
|
32
|
-
|
|
33
30
|
______________________________________________________________________
|
|
34
31
|
|
|
35
32
|
<!-- TOC depthFrom:2 depthTo:6 withLinks:1 updateOnSave:1 orderedList:0 -->
|
|
@@ -49,21 +49,21 @@ class AbstractExecutor(Protocol):
|
|
|
49
49
|
|
|
50
50
|
@runtime_checkable
|
|
51
51
|
class SupportsScript(Protocol[T_co]): # noqa
|
|
52
|
-
|
|
52
|
+
def evalsha(
|
|
53
53
|
self,
|
|
54
54
|
sha1: StringT,
|
|
55
55
|
keys: Parameters[KeyT] | None = ...,
|
|
56
56
|
args: Parameters[RedisValueT] | None = ...,
|
|
57
|
-
) -> ResponseType: ...
|
|
57
|
+
) -> CommandRequest[ResponseType]: ...
|
|
58
58
|
|
|
59
|
-
|
|
59
|
+
def evalsha_ro(
|
|
60
60
|
self,
|
|
61
61
|
sha1: StringT,
|
|
62
62
|
keys: Parameters[KeyT] | None = ...,
|
|
63
63
|
args: Parameters[RedisValueT] | None = ...,
|
|
64
|
-
) -> ResponseType: ...
|
|
64
|
+
) -> CommandRequest[ResponseType]: ...
|
|
65
65
|
|
|
66
|
-
|
|
66
|
+
def script_load(self, script: StringT) -> CommandRequest[T_co]: ...
|
|
67
67
|
|
|
68
68
|
|
|
69
69
|
@runtime_checkable
|
|
@@ -8,11 +8,11 @@ import json
|
|
|
8
8
|
|
|
9
9
|
version_json = '''
|
|
10
10
|
{
|
|
11
|
-
"date": "2025-07-
|
|
11
|
+
"date": "2025-07-10T16:37:52-0700",
|
|
12
12
|
"dirty": false,
|
|
13
13
|
"error": null,
|
|
14
|
-
"full-revisionid": "
|
|
15
|
-
"version": "5.0.
|
|
14
|
+
"full-revisionid": "500ec3f7c2c644928da3459bf3242872e672728c",
|
|
15
|
+
"version": "5.0.0rc2"
|
|
16
16
|
}
|
|
17
17
|
''' # END VERSION_JSON
|
|
18
18
|
|
|
@@ -10,7 +10,9 @@ from deprecated.sphinx import versionadded
|
|
|
10
10
|
|
|
11
11
|
from coredis._protocols import SupportsScript
|
|
12
12
|
from coredis._utils import b
|
|
13
|
+
from coredis.commands import CommandRequest
|
|
13
14
|
from coredis.exceptions import NoScriptError
|
|
15
|
+
from coredis.retry import ConstantRetryPolicy, retryable
|
|
14
16
|
from coredis.typing import (
|
|
15
17
|
AnyStr,
|
|
16
18
|
Awaitable,
|
|
@@ -72,16 +74,16 @@ class Script(Generic[AnyStr]):
|
|
|
72
74
|
self.sha = hashlib.sha1(b(script)).hexdigest() # type: ignore
|
|
73
75
|
self.readonly = readonly
|
|
74
76
|
|
|
75
|
-
|
|
77
|
+
def __call__(
|
|
76
78
|
self,
|
|
77
79
|
keys: Parameters[KeyT] | None = None,
|
|
78
80
|
args: Parameters[RedisValueT] | None = None,
|
|
79
81
|
client: SupportsScript[AnyStr] | None = None,
|
|
80
82
|
readonly: bool | None = None,
|
|
81
|
-
) -> ResponseType:
|
|
83
|
+
) -> CommandRequest[ResponseType]:
|
|
82
84
|
"""
|
|
83
85
|
Executes the script registered in :paramref:`Script.script` using
|
|
84
|
-
:meth:`coredis.Redis.evalsha`. Additionally if the script was not yet
|
|
86
|
+
:meth:`coredis.Redis.evalsha`. Additionally, if the script was not yet
|
|
85
87
|
registered on the instance, it will automatically do that as well
|
|
86
88
|
and cache the sha at :data:`Script.sha`
|
|
87
89
|
|
|
@@ -103,20 +105,21 @@ class Script(Generic[AnyStr]):
|
|
|
103
105
|
if readonly is None:
|
|
104
106
|
readonly = self.readonly
|
|
105
107
|
|
|
108
|
+
method = client.evalsha_ro if readonly else client.evalsha
|
|
109
|
+
|
|
106
110
|
# make sure the Redis server knows about the script
|
|
107
111
|
if isinstance(client, Pipeline):
|
|
108
112
|
# make sure this script is good to go on pipeline
|
|
109
113
|
cast(Pipeline[AnyStr], client).scripts.add(self)
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
return await method(self.sha, keys=keys, args=args)
|
|
114
|
+
return method(self.sha, keys=keys, args=args)
|
|
115
|
+
else:
|
|
116
|
+
return cast(
|
|
117
|
+
CommandRequest[ResponseType],
|
|
118
|
+
retryable(
|
|
119
|
+
ConstantRetryPolicy((NoScriptError,), 1, 0),
|
|
120
|
+
failure_hook=lambda _: client.script_load(self.script),
|
|
121
|
+
)(method)(self.sha, keys=keys, args=args),
|
|
122
|
+
)
|
|
120
123
|
|
|
121
124
|
async def execute(
|
|
122
125
|
self,
|
|
@@ -18,6 +18,7 @@ from coredis.client import Client, RedisCluster
|
|
|
18
18
|
from coredis.commands import CommandRequest, CommandResponseT
|
|
19
19
|
from coredis.commands._key_spec import KeySpec
|
|
20
20
|
from coredis.commands.constants import CommandName, NodeFlag
|
|
21
|
+
from coredis.commands.request import TransformedResponse
|
|
21
22
|
from coredis.commands.script import Script
|
|
22
23
|
from coredis.connection import BaseConnection, ClusterConnection, CommandInvocation, Connection
|
|
23
24
|
from coredis.exceptions import (
|
|
@@ -92,12 +93,12 @@ def wrap_pipeline_method(
|
|
|
92
93
|
wrapper.__annotations__["return"] = kls
|
|
93
94
|
wrapper.__doc__ = textwrap.dedent(wrapper.__doc__ or "")
|
|
94
95
|
wrapper.__doc__ = f"""
|
|
95
|
-
Pipeline variant of :meth:`coredis.Redis.{func.__name__}` that does not execute
|
|
96
|
-
immediately and instead pushes the command into a stack for batch send.
|
|
96
|
+
.. note:: Pipeline variant of :meth:`coredis.Redis.{func.__name__}` that does not execute
|
|
97
|
+
immediately and instead pushes the command into a stack for batch send.
|
|
97
98
|
|
|
98
|
-
The return value can be retrieved either as part of the tuple returned by
|
|
99
|
-
:meth:`~{kls.__name__}.execute` or by awaiting the :class:`~coredis.commands.CommandRequest`
|
|
100
|
-
instance after calling :meth:`~{kls.__name__}.execute`
|
|
99
|
+
The return value can be retrieved either as part of the tuple returned by
|
|
100
|
+
:meth:`~{kls.__name__}.execute` or by awaiting the :class:`~coredis.commands.CommandRequest`
|
|
101
|
+
instance after calling :meth:`~{kls.__name__}.execute`
|
|
101
102
|
|
|
102
103
|
{wrapper.__doc__}
|
|
103
104
|
"""
|
|
@@ -105,6 +106,11 @@ instance after calling :meth:`~{kls.__name__}.execute`
|
|
|
105
106
|
|
|
106
107
|
|
|
107
108
|
class PipelineCommandRequest(CommandRequest[CommandResponseT]):
|
|
109
|
+
"""
|
|
110
|
+
Command request used within a pipeline. Handles immediate execution for WATCH or
|
|
111
|
+
watched commands outside explicit transactions, otherwise queues the command.
|
|
112
|
+
"""
|
|
113
|
+
|
|
108
114
|
client: Pipeline[Any] | ClusterPipeline[Any]
|
|
109
115
|
queued_response: Awaitable[bytes | str]
|
|
110
116
|
|
|
@@ -115,23 +121,59 @@ class PipelineCommandRequest(CommandRequest[CommandResponseT]):
|
|
|
115
121
|
*arguments: ValueT,
|
|
116
122
|
callback: Callable[..., CommandResponseT],
|
|
117
123
|
execution_parameters: ExecutionParameters | None = None,
|
|
124
|
+
parent: CommandRequest[Any] | None = None,
|
|
118
125
|
) -> None:
|
|
119
126
|
super().__init__(
|
|
120
|
-
client,
|
|
127
|
+
client,
|
|
128
|
+
name,
|
|
129
|
+
*arguments,
|
|
130
|
+
callback=callback,
|
|
131
|
+
execution_parameters=execution_parameters,
|
|
132
|
+
)
|
|
133
|
+
if not parent:
|
|
134
|
+
if (client.watching or name == CommandName.WATCH) and not client.explicit_transaction:
|
|
135
|
+
self.response = client.immediate_execute_command(
|
|
136
|
+
self, callback=callback, **self.execution_parameters
|
|
137
|
+
)
|
|
138
|
+
else:
|
|
139
|
+
client.pipeline_execute_command(self) # type: ignore[arg-type]
|
|
140
|
+
self.parent = parent
|
|
141
|
+
|
|
142
|
+
def transform(
|
|
143
|
+
self, transformer: type[TransformedResponse]
|
|
144
|
+
) -> CommandRequest[TransformedResponse]:
|
|
145
|
+
transform_func = functools.partial(
|
|
146
|
+
self.type_adapter.deserialize,
|
|
147
|
+
return_type=transformer,
|
|
148
|
+
)
|
|
149
|
+
return cast(type[PipelineCommandRequest[TransformedResponse]], self.__class__)(
|
|
150
|
+
self.client,
|
|
151
|
+
self.name,
|
|
152
|
+
*self.arguments,
|
|
153
|
+
callback=lambda resp, **k: transform_func(resp),
|
|
154
|
+
execution_parameters=self.execution_parameters,
|
|
155
|
+
parent=self,
|
|
121
156
|
)
|
|
122
|
-
if (client.watching or name == CommandName.WATCH) and not client.explicit_transaction:
|
|
123
|
-
self.response = client.immediate_execute_command(
|
|
124
|
-
self, callback=callback, **self.execution_parameters
|
|
125
|
-
)
|
|
126
|
-
else:
|
|
127
|
-
client.pipeline_execute_command(self) # type: ignore[arg-type]
|
|
128
157
|
|
|
129
158
|
async def __backward_compatibility_return(self) -> Pipeline[Any] | ClusterPipeline[Any]:
|
|
159
|
+
"""
|
|
160
|
+
For backward compatibility: returns the pipeline instance when awaited before execute().
|
|
161
|
+
"""
|
|
130
162
|
return self.client
|
|
131
163
|
|
|
132
164
|
def __await__(self) -> Generator[None, None, CommandResponseT]:
|
|
133
165
|
if hasattr(self, "response"):
|
|
134
166
|
return self.response.__await__()
|
|
167
|
+
elif self.parent:
|
|
168
|
+
parent = self.parent
|
|
169
|
+
|
|
170
|
+
async def _transformed() -> CommandResponseT:
|
|
171
|
+
if hasattr(parent, "response"):
|
|
172
|
+
return self.callback(await parent.response)
|
|
173
|
+
else:
|
|
174
|
+
return await parent # type: ignore[no-any-return]
|
|
175
|
+
|
|
176
|
+
return _transformed().__await__()
|
|
135
177
|
else:
|
|
136
178
|
warnings.warn(
|
|
137
179
|
"""
|
|
@@ -140,12 +182,17 @@ has no effect and returns the pipeline instance itself for backward compatibilit
|
|
|
140
182
|
|
|
141
183
|
To add commands to a pipeline simply call the methods synchronously. The awaitable response
|
|
142
184
|
can be awaited after calling `execute()` to retrieve a statically typed response if required.
|
|
143
|
-
"""
|
|
185
|
+
""",
|
|
186
|
+
stacklevel=2,
|
|
144
187
|
)
|
|
145
188
|
return self.__backward_compatibility_return().__await__() # type: ignore[return-value]
|
|
146
189
|
|
|
147
190
|
|
|
148
191
|
class ClusterPipelineCommandRequest(PipelineCommandRequest[CommandResponseT]):
|
|
192
|
+
"""
|
|
193
|
+
Command request for cluster pipelines, tracks position and result for cluster routing.
|
|
194
|
+
"""
|
|
195
|
+
|
|
149
196
|
def __init__(
|
|
150
197
|
self,
|
|
151
198
|
client: ClusterPipeline[Any],
|
|
@@ -153,16 +200,26 @@ class ClusterPipelineCommandRequest(PipelineCommandRequest[CommandResponseT]):
|
|
|
153
200
|
*arguments: ValueT,
|
|
154
201
|
callback: Callable[..., CommandResponseT],
|
|
155
202
|
execution_parameters: ExecutionParameters | None = None,
|
|
203
|
+
parent: CommandRequest[Any] | None = None,
|
|
156
204
|
) -> None:
|
|
157
205
|
self.position: int = 0
|
|
158
206
|
self.result: Any | None = None
|
|
159
207
|
self.asking: bool = False
|
|
160
208
|
super().__init__(
|
|
161
|
-
client,
|
|
209
|
+
client,
|
|
210
|
+
name,
|
|
211
|
+
*arguments,
|
|
212
|
+
callback=callback,
|
|
213
|
+
execution_parameters=execution_parameters,
|
|
214
|
+
parent=parent,
|
|
162
215
|
)
|
|
163
216
|
|
|
164
217
|
|
|
165
218
|
class NodeCommands:
|
|
219
|
+
"""
|
|
220
|
+
Helper for grouping and executing commands on a single cluster node, handling transactions if needed.
|
|
221
|
+
"""
|
|
222
|
+
|
|
166
223
|
def __init__(
|
|
167
224
|
self,
|
|
168
225
|
client: RedisCluster[AnyStr],
|
|
@@ -188,14 +245,11 @@ class NodeCommands:
|
|
|
188
245
|
connection = self.connection
|
|
189
246
|
commands = self.commands
|
|
190
247
|
|
|
191
|
-
#
|
|
192
|
-
# and ensure that nothing is sitting there from a previous run.
|
|
193
|
-
|
|
248
|
+
# Reset results for all commands before writing.
|
|
194
249
|
for c in commands:
|
|
195
250
|
c.result = None
|
|
196
251
|
|
|
197
|
-
#
|
|
198
|
-
# send all the commands and catch connection and timeout errors.
|
|
252
|
+
# Batch all commands into a single request for efficiency.
|
|
199
253
|
try:
|
|
200
254
|
if self.in_transaction:
|
|
201
255
|
self.multi_cmd = await connection.create_request(
|
|
@@ -307,20 +361,16 @@ class ClusterPipelineMeta(PipelineMeta):
|
|
|
307
361
|
|
|
308
362
|
|
|
309
363
|
class Pipeline(Client[AnyStr], metaclass=PipelineMeta):
|
|
310
|
-
"""Pipeline for the Redis class"""
|
|
311
|
-
|
|
312
364
|
"""
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
saving all the values in a list to Redis.
|
|
365
|
+
Pipeline for batching multiple commands to a Redis server.
|
|
366
|
+
Supports transactions and command stacking.
|
|
316
367
|
|
|
317
368
|
All commands executed within a pipeline are wrapped with MULTI and EXEC
|
|
318
|
-
calls
|
|
319
|
-
executed atomically.
|
|
369
|
+
calls when :paramref:`transaction` is ``True``.
|
|
320
370
|
|
|
321
371
|
Any command raising an exception does *not* halt the execution of
|
|
322
372
|
subsequent commands in the pipeline. Instead, the exception is caught
|
|
323
|
-
and its instance is placed into the response list returned by
|
|
373
|
+
and its instance is placed into the response list returned by :meth:`execute`
|
|
324
374
|
"""
|
|
325
375
|
|
|
326
376
|
command_stack: list[PipelineCommandRequest[Any]]
|
|
@@ -340,7 +390,7 @@ class Pipeline(Client[AnyStr], metaclass=PipelineMeta):
|
|
|
340
390
|
self.watching = False
|
|
341
391
|
self.watches: Parameters[KeyT] | None = watches or None
|
|
342
392
|
self.command_stack = []
|
|
343
|
-
self.cache = None
|
|
393
|
+
self.cache = None
|
|
344
394
|
self.explicit_transaction = False
|
|
345
395
|
self.scripts: set[Script[AnyStr]] = set()
|
|
346
396
|
self.timeout = timeout
|
|
@@ -385,30 +435,21 @@ class Pipeline(Client[AnyStr], metaclass=PipelineMeta):
|
|
|
385
435
|
|
|
386
436
|
async def clear(self) -> None:
|
|
387
437
|
"""
|
|
388
|
-
|
|
389
|
-
back to the pool
|
|
438
|
+
Clear the pipeline, reset state, and release the connection back to the pool.
|
|
390
439
|
"""
|
|
391
440
|
self.command_stack.clear()
|
|
392
441
|
self.scripts = set()
|
|
393
|
-
#
|
|
394
|
-
# watching something
|
|
395
|
-
|
|
442
|
+
# Reset connection state if we were watching something.
|
|
396
443
|
if self.watching and self.connection:
|
|
397
444
|
try:
|
|
398
|
-
# call this manually since our unwatch or
|
|
399
|
-
# immediate_execute_command methods can call clear()
|
|
400
445
|
request = await self.connection.create_request(CommandName.UNWATCH, decode=False)
|
|
401
446
|
await request
|
|
402
447
|
except ConnectionError:
|
|
403
|
-
# disconnect will also remove any previous WATCHes
|
|
404
448
|
self.connection.disconnect()
|
|
405
|
-
#
|
|
449
|
+
# Reset pipeline state and release connection if needed.
|
|
406
450
|
self.watching = False
|
|
407
451
|
self.watches = []
|
|
408
452
|
self.explicit_transaction = False
|
|
409
|
-
# we can safely return the connection to the pool here since we're
|
|
410
|
-
# sure we're no longer WATCHing anything
|
|
411
|
-
|
|
412
453
|
if self.connection:
|
|
413
454
|
self.connection_pool.release(self.connection)
|
|
414
455
|
self.connection = None
|
|
@@ -422,19 +463,14 @@ class Pipeline(Client[AnyStr], metaclass=PipelineMeta):
|
|
|
422
463
|
)
|
|
423
464
|
def reset(self) -> CommandRequest[None]:
|
|
424
465
|
"""
|
|
425
|
-
|
|
426
|
-
back to the pool
|
|
427
|
-
|
|
428
|
-
:meta private:
|
|
466
|
+
Deprecated. Use :meth:`clear` instead.
|
|
429
467
|
"""
|
|
430
468
|
return self.clear() # type: ignore
|
|
431
469
|
|
|
432
470
|
def multi(self) -> None:
|
|
433
471
|
"""
|
|
434
|
-
|
|
435
|
-
are issued. End the transactional block with `execute`.
|
|
472
|
+
Start a transactional block after WATCH commands. End with `execute()`.
|
|
436
473
|
"""
|
|
437
|
-
|
|
438
474
|
if self.explicit_transaction:
|
|
439
475
|
raise RedisError("Cannot issue nested calls to MULTI")
|
|
440
476
|
|
|
@@ -504,17 +540,7 @@ class Pipeline(Client[AnyStr], metaclass=PipelineMeta):
|
|
|
504
540
|
command: PipelineCommandRequest[R],
|
|
505
541
|
) -> None:
|
|
506
542
|
"""
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
Returns the current Pipeline object back so commands can be
|
|
510
|
-
chained together, such as:
|
|
511
|
-
|
|
512
|
-
pipe = pipe.set('foo', 'bar').incr('baz').decr('bang')
|
|
513
|
-
|
|
514
|
-
At some other point, you can then run: pipe.execute(),
|
|
515
|
-
which will execute all commands queued in the pipe.
|
|
516
|
-
|
|
517
|
-
:meta private:
|
|
543
|
+
Queue a command for execution on the next `execute()` call.
|
|
518
544
|
"""
|
|
519
545
|
self.command_stack.append(command)
|
|
520
546
|
|
|
@@ -704,7 +730,9 @@ class Pipeline(Client[AnyStr], metaclass=PipelineMeta):
|
|
|
704
730
|
)
|
|
705
731
|
|
|
706
732
|
async def execute(self, raise_on_error: bool = True) -> tuple[Any, ...]:
|
|
707
|
-
"""
|
|
733
|
+
"""
|
|
734
|
+
Execute all queued commands in the pipeline. Returns a tuple of results.
|
|
735
|
+
"""
|
|
708
736
|
stack = self.command_stack
|
|
709
737
|
|
|
710
738
|
if not stack:
|
|
@@ -748,9 +776,8 @@ class Pipeline(Client[AnyStr], metaclass=PipelineMeta):
|
|
|
748
776
|
|
|
749
777
|
def watch(self, *keys: KeyT) -> CommandRequest[bool]:
|
|
750
778
|
"""
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
pipeline buffering mode, call :meth:`multi`.
|
|
779
|
+
Watch the given keys for changes. Switches to immediate execution mode
|
|
780
|
+
until :meth:`multi` is called.
|
|
754
781
|
"""
|
|
755
782
|
if self.explicit_transaction:
|
|
756
783
|
raise RedisError("Cannot issue a WATCH after a MULTI")
|
|
@@ -759,13 +786,21 @@ class Pipeline(Client[AnyStr], metaclass=PipelineMeta):
|
|
|
759
786
|
|
|
760
787
|
def unwatch(self) -> CommandRequest[bool]:
|
|
761
788
|
"""
|
|
762
|
-
|
|
763
|
-
to buffered mode.
|
|
789
|
+
Remove all key watches and return to buffered mode.
|
|
764
790
|
"""
|
|
765
791
|
return self.create_request(CommandName.UNWATCH, callback=SimpleStringCallback())
|
|
766
792
|
|
|
767
793
|
|
|
768
794
|
class ClusterPipeline(Client[AnyStr], metaclass=ClusterPipelineMeta):
|
|
795
|
+
"""
|
|
796
|
+
Pipeline for batching commands to a Redis Cluster.
|
|
797
|
+
Handles routing, transactions, and error management across nodes.
|
|
798
|
+
|
|
799
|
+
.. warning:: Unlike :class:`Pipeline`, :paramref:`transaction` is ``False`` by
|
|
800
|
+
default as there is limited support for transactions in redis cluster
|
|
801
|
+
(only keys in the same slot can be part of a transaction).
|
|
802
|
+
"""
|
|
803
|
+
|
|
769
804
|
client: RedisCluster[AnyStr]
|
|
770
805
|
connection_pool: ClusterConnectionPool
|
|
771
806
|
command_stack: list[ClusterPipelineCommandRequest[Any]]
|
|
@@ -791,7 +826,7 @@ class ClusterPipeline(Client[AnyStr], metaclass=ClusterPipelineMeta):
|
|
|
791
826
|
self.watches: Parameters[KeyT] | None = watches or None
|
|
792
827
|
self.watching = False
|
|
793
828
|
self.explicit_transaction = False
|
|
794
|
-
self.cache = None
|
|
829
|
+
self.cache = None
|
|
795
830
|
self.timeout = timeout
|
|
796
831
|
self.type_adapter = client.type_adapter
|
|
797
832
|
|
|
@@ -811,9 +846,8 @@ class ClusterPipeline(Client[AnyStr], metaclass=ClusterPipelineMeta):
|
|
|
811
846
|
|
|
812
847
|
def watch(self, *keys: KeyT) -> CommandRequest[bool]:
|
|
813
848
|
"""
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
pipeline buffering mode, call :meth:`multi`.
|
|
849
|
+
Watch the given keys for changes. Switches to immediate execution mode
|
|
850
|
+
until :meth:`multi` is called.
|
|
817
851
|
"""
|
|
818
852
|
if self.explicit_transaction:
|
|
819
853
|
raise RedisError("Cannot issue a WATCH after a MULTI")
|
|
@@ -822,8 +856,7 @@ class ClusterPipeline(Client[AnyStr], metaclass=ClusterPipelineMeta):
|
|
|
822
856
|
|
|
823
857
|
async def unwatch(self) -> bool:
|
|
824
858
|
"""
|
|
825
|
-
|
|
826
|
-
to buffered mode.
|
|
859
|
+
Remove all key watches and return to buffered mode.
|
|
827
860
|
"""
|
|
828
861
|
if self._watched_connection:
|
|
829
862
|
try:
|
|
@@ -898,7 +931,9 @@ class ClusterPipeline(Client[AnyStr], metaclass=ClusterPipelineMeta):
|
|
|
898
931
|
exception.args = (msg,) + exception.args[1:]
|
|
899
932
|
|
|
900
933
|
async def execute(self, raise_on_error: bool = True) -> tuple[object, ...]:
|
|
901
|
-
"""
|
|
934
|
+
"""
|
|
935
|
+
Execute all queued commands in the cluster pipeline. Returns a tuple of results.
|
|
936
|
+
"""
|
|
902
937
|
await self.connection_pool.initialize()
|
|
903
938
|
|
|
904
939
|
if not self.command_stack:
|
|
@@ -915,8 +950,7 @@ class ClusterPipeline(Client[AnyStr], metaclass=ClusterPipelineMeta):
|
|
|
915
950
|
|
|
916
951
|
async def clear(self) -> None:
|
|
917
952
|
"""
|
|
918
|
-
|
|
919
|
-
back to the pool
|
|
953
|
+
Clear the pipeline, reset state, and release any held connections.
|
|
920
954
|
"""
|
|
921
955
|
self.command_stack = []
|
|
922
956
|
|
|
@@ -981,9 +1015,7 @@ class ClusterPipeline(Client[AnyStr], metaclass=ClusterPipelineMeta):
|
|
|
981
1015
|
if self.explicit_transaction:
|
|
982
1016
|
request = await conn.create_request(CommandName.DISCARD)
|
|
983
1017
|
await request
|
|
984
|
-
# If at least one watched key is modified before the EXEC
|
|
985
|
-
# the whole transaction aborts,
|
|
986
|
-
# and EXEC returns a Null reply to notify that the transaction failed.
|
|
1018
|
+
# If at least one watched key is modified before EXEC, the transaction aborts and EXEC returns null.
|
|
987
1019
|
|
|
988
1020
|
if node_commands.exec_cmd and await node_commands.exec_cmd is None:
|
|
989
1021
|
raise WatchError
|
|
@@ -1006,104 +1038,57 @@ class ClusterPipeline(Client[AnyStr], metaclass=ClusterPipelineMeta):
|
|
|
1006
1038
|
self, raise_on_error: bool = True, allow_redirections: bool = True
|
|
1007
1039
|
) -> tuple[object, ...]:
|
|
1008
1040
|
"""
|
|
1009
|
-
|
|
1010
|
-
|
|
1011
|
-
`allow_redirections` If the pipeline should follow `ASK` & `MOVED` responses
|
|
1012
|
-
automatically. If set to false it will raise RedisClusterException.
|
|
1041
|
+
Execute all queued commands in the cluster pipeline, handling redirections
|
|
1042
|
+
and retries as needed.
|
|
1013
1043
|
|
|
1014
1044
|
:meta private:
|
|
1015
1045
|
"""
|
|
1016
|
-
#
|
|
1017
|
-
# if we have to run through it again, we only retry the commands that failed.
|
|
1046
|
+
# On first send, queue all commands. On retry, only failed ones.
|
|
1018
1047
|
attempt = sorted(self.command_stack, key=lambda x: x.position)
|
|
1019
1048
|
|
|
1020
|
-
|
|
1021
|
-
# build a list of node objects based on node names we need to
|
|
1049
|
+
# Group commands by node for efficient network usage.
|
|
1022
1050
|
nodes: dict[str, NodeCommands] = {}
|
|
1023
|
-
# as we move through each command that still needs to be processed,
|
|
1024
|
-
# we figure out the slot number that command maps to, then from the slot determine the node.
|
|
1025
1051
|
for c in attempt:
|
|
1026
|
-
# refer to our internal node -> slot table that tells us where a given
|
|
1027
|
-
# command should route to.
|
|
1028
1052
|
slot = self._determine_slot(c.name, *c.arguments)
|
|
1029
1053
|
node = self.connection_pool.get_node_by_slot(slot)
|
|
1030
|
-
|
|
1031
1054
|
if node.name not in nodes:
|
|
1032
1055
|
nodes[node.name] = NodeCommands(
|
|
1033
1056
|
self.client,
|
|
1034
1057
|
await self.connection_pool.get_connection_by_node(node),
|
|
1035
1058
|
timeout=self.timeout,
|
|
1036
1059
|
)
|
|
1037
|
-
|
|
1038
1060
|
nodes[node.name].append(c)
|
|
1039
1061
|
|
|
1040
|
-
#
|
|
1041
|
-
# we write to all the open sockets for each node first, before reading anything
|
|
1042
|
-
# this allows us to flush all the requests out across the network essentially in parallel
|
|
1043
|
-
# so that we can read them all in parallel as they come back.
|
|
1044
|
-
# we dont' multiplex on the sockets as they come available, but that shouldn't make
|
|
1045
|
-
# too much difference.
|
|
1062
|
+
# Write to all nodes, then read from all nodes in sequence.
|
|
1046
1063
|
node_commands = nodes.values()
|
|
1047
|
-
|
|
1048
1064
|
for n in node_commands:
|
|
1049
1065
|
await n.write()
|
|
1050
|
-
|
|
1051
1066
|
for n in node_commands:
|
|
1052
1067
|
await n.read()
|
|
1053
1068
|
|
|
1054
|
-
#
|
|
1055
|
-
#
|
|
1056
|
-
# release connections back into the pool if for some reason the socket has data still left
|
|
1057
|
-
# in it from a previous operation. The write and read operations already have try/catch
|
|
1058
|
-
# around them for all known types of errors including connection and socket level errors.
|
|
1059
|
-
# So if we hit an exception, something really bad happened and putting any of
|
|
1060
|
-
# these connections back into the pool is a very bad idea.
|
|
1061
|
-
# the socket might have unread buffer still sitting in it, and then the
|
|
1062
|
-
# next time we read from it we pass the buffered result back from a previous
|
|
1063
|
-
# command and every single request after to that connection will always get
|
|
1064
|
-
# a mismatched result. (not just theoretical, I saw this happen on production x.x).
|
|
1069
|
+
# Release all connections back to the pool only if safe (no unread buffer).
|
|
1070
|
+
# If an error occurred, do not release to avoid buffer mismatches.
|
|
1065
1071
|
for n in nodes.values():
|
|
1066
1072
|
protocol_version = n.connection.protocol_version
|
|
1067
1073
|
self.connection_pool.release(n.connection)
|
|
1068
|
-
|
|
1069
|
-
#
|
|
1070
|
-
# if we have more commands to attempt, we've run into problems.
|
|
1071
|
-
# collect all the commands we are allowed to retry.
|
|
1072
|
-
# (MOVED, ASK, or connection errors or timeout errors)
|
|
1074
|
+
|
|
1075
|
+
# Retry MOVED/ASK/connection errors one by one if allowed.
|
|
1073
1076
|
attempt = sorted(
|
|
1074
1077
|
(c for c in attempt if isinstance(c.result, ERRORS_ALLOW_RETRY)),
|
|
1075
1078
|
key=lambda x: x.position,
|
|
1076
1079
|
)
|
|
1077
1080
|
|
|
1078
1081
|
if attempt and allow_redirections:
|
|
1079
|
-
# RETRY MAGIC HAPPENS HERE!
|
|
1080
|
-
# send these remaing comamnds one at a time using `execute_command`
|
|
1081
|
-
# in the main client. This keeps our retry logic in one place mostly,
|
|
1082
|
-
# and allows us to be more confident in correctness of behavior.
|
|
1083
|
-
# at this point any speed gains from pipelining have been lost
|
|
1084
|
-
# anyway, so we might as well make the best attempt to get the correct
|
|
1085
|
-
# behavior.
|
|
1086
|
-
#
|
|
1087
|
-
# The client command will handle retries for each individual command
|
|
1088
|
-
# sequentially as we pass each one into `execute_command`. Any exceptions
|
|
1089
|
-
# that bubble out should only appear once all retries have been exhausted.
|
|
1090
|
-
#
|
|
1091
|
-
# If a lot of commands have failed, we'll be setting the
|
|
1092
|
-
# flag to rebuild the slots table from scratch. So MOVED errors should
|
|
1093
|
-
# correct .commandsthemselves fairly quickly.
|
|
1094
1082
|
await self.connection_pool.nodes.increment_reinitialize_counter(len(attempt))
|
|
1095
|
-
|
|
1096
1083
|
for c in attempt:
|
|
1097
1084
|
try:
|
|
1098
|
-
# send each command individually like we do in the main client.
|
|
1099
1085
|
c.result = await self.client.execute_command(
|
|
1100
1086
|
RedisCommand(c.name, c.arguments), **c.execution_parameters
|
|
1101
1087
|
)
|
|
1102
1088
|
except RedisError as e:
|
|
1103
1089
|
c.result = e
|
|
1104
1090
|
|
|
1105
|
-
#
|
|
1106
|
-
# to the sequence of commands issued in the stack in pipeline.execute()
|
|
1091
|
+
# Flatten results to match the original command order.
|
|
1107
1092
|
response = []
|
|
1108
1093
|
for c in sorted(self.command_stack, key=lambda x: x.position):
|
|
1109
1094
|
r = c.result
|
|
@@ -1121,8 +1106,9 @@ class ClusterPipeline(Client[AnyStr], metaclass=ClusterPipelineMeta):
|
|
|
1121
1106
|
def _determine_slot(
|
|
1122
1107
|
self, command: bytes, *args: ValueT, **options: Unpack[ExecutionParameters]
|
|
1123
1108
|
) -> int:
|
|
1124
|
-
"""
|
|
1125
|
-
|
|
1109
|
+
"""
|
|
1110
|
+
Determine the hash slot for the given command and arguments.
|
|
1111
|
+
"""
|
|
1126
1112
|
keys: tuple[RedisValueT, ...] = cast(
|
|
1127
1113
|
tuple[RedisValueT, ...], options.get("keys")
|
|
1128
1114
|
) or KeySpec.extract_keys(command, *args) # type: ignore
|
|
@@ -1138,13 +1124,15 @@ class ClusterPipeline(Client[AnyStr], metaclass=ClusterPipelineMeta):
|
|
|
1138
1124
|
return slots.pop()
|
|
1139
1125
|
|
|
1140
1126
|
def _fail_on_redirect(self, allow_redirections: bool) -> None:
|
|
1127
|
+
"""
|
|
1128
|
+
Raise if redirections are not allowed in the pipeline.
|
|
1129
|
+
"""
|
|
1141
1130
|
if not allow_redirections:
|
|
1142
1131
|
raise RedisClusterException("ASK & MOVED redirection not allowed in this pipeline")
|
|
1143
1132
|
|
|
1144
1133
|
def multi(self) -> None:
|
|
1145
1134
|
"""
|
|
1146
|
-
|
|
1147
|
-
are issued. End the transactional block with `execute`.
|
|
1135
|
+
Start a transactional block after WATCH commands. End with `execute()`.
|
|
1148
1136
|
"""
|
|
1149
1137
|
if self.explicit_transaction:
|
|
1150
1138
|
raise RedisError("Cannot issue nested calls to MULTI")
|
|
@@ -6,7 +6,7 @@ from abc import ABC, abstractmethod
|
|
|
6
6
|
from functools import wraps
|
|
7
7
|
from typing import Any
|
|
8
8
|
|
|
9
|
-
from coredis.typing import
|
|
9
|
+
from coredis.typing import Awaitable, Callable, P, R
|
|
10
10
|
|
|
11
11
|
logger = logging.getLogger(__name__)
|
|
12
12
|
|
|
@@ -31,10 +31,10 @@ class RetryPolicy(ABC):
|
|
|
31
31
|
|
|
32
32
|
async def call_with_retries(
|
|
33
33
|
self,
|
|
34
|
-
func: Callable[...,
|
|
35
|
-
before_hook: Callable[...,
|
|
36
|
-
failure_hook: Callable[...,
|
|
37
|
-
| dict[type[BaseException], Callable[...,
|
|
34
|
+
func: Callable[..., Awaitable[R]],
|
|
35
|
+
before_hook: Callable[..., Awaitable[Any]] | None = None,
|
|
36
|
+
failure_hook: Callable[..., Awaitable[Any]]
|
|
37
|
+
| dict[type[BaseException], Callable[..., Awaitable[None]]]
|
|
38
38
|
| None = None,
|
|
39
39
|
) -> R:
|
|
40
40
|
"""
|
|
@@ -159,12 +159,11 @@ class CompositeRetryPolicy(RetryPolicy):
|
|
|
159
159
|
|
|
160
160
|
async def call_with_retries(
|
|
161
161
|
self,
|
|
162
|
-
func: Callable[...,
|
|
163
|
-
before_hook: Callable[...,
|
|
162
|
+
func: Callable[..., Awaitable[R]],
|
|
163
|
+
before_hook: Callable[..., Awaitable[Any]] | None = None,
|
|
164
164
|
failure_hook: None
|
|
165
165
|
| (
|
|
166
|
-
Callable[...,
|
|
167
|
-
| dict[type[BaseException], Callable[..., Coroutine[Any, Any, None]]]
|
|
166
|
+
Callable[..., Awaitable[Any]] | dict[type[BaseException], Callable[..., Awaitable[Any]]]
|
|
168
167
|
) = None,
|
|
169
168
|
) -> R:
|
|
170
169
|
"""
|
|
@@ -214,15 +213,15 @@ class CompositeRetryPolicy(RetryPolicy):
|
|
|
214
213
|
|
|
215
214
|
def retryable(
|
|
216
215
|
policy: RetryPolicy,
|
|
217
|
-
failure_hook: Callable[...,
|
|
218
|
-
) -> Callable[[Callable[P,
|
|
216
|
+
failure_hook: Callable[..., Awaitable[Any]] | None = None,
|
|
217
|
+
) -> Callable[[Callable[P, Awaitable[R]]], Callable[P, Awaitable[R]]]:
|
|
219
218
|
"""
|
|
220
219
|
Decorator to be used to apply a retry policy to a coroutine
|
|
221
220
|
"""
|
|
222
221
|
|
|
223
222
|
def inner(
|
|
224
|
-
func: Callable[P,
|
|
225
|
-
) -> Callable[P,
|
|
223
|
+
func: Callable[P, Awaitable[R]],
|
|
224
|
+
) -> Callable[P, Awaitable[R]]:
|
|
226
225
|
@wraps(func)
|
|
227
226
|
async def _inner(*args: P.args, **kwargs: P.kwargs) -> R:
|
|
228
227
|
return await policy.call_with_retries(
|
|
@@ -53,8 +53,6 @@ from typing_extensions import (
|
|
|
53
53
|
|
|
54
54
|
from coredis.config import Config
|
|
55
55
|
|
|
56
|
-
_runtime_checks = False
|
|
57
|
-
|
|
58
56
|
RUNTIME_TYPECHECKS = Config.runtime_checks and not TYPE_CHECKING
|
|
59
57
|
|
|
60
58
|
P = ParamSpec("P")
|
|
@@ -63,7 +61,7 @@ R = TypeVar("R")
|
|
|
63
61
|
|
|
64
62
|
|
|
65
63
|
def safe_beartype(func: Callable[P, R]) -> Callable[P, R]:
|
|
66
|
-
return beartype(func)
|
|
64
|
+
return beartype(func)
|
|
67
65
|
|
|
68
66
|
|
|
69
67
|
def add_runtime_checks(func: Callable[P, R]) -> Callable[P, R]:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: coredis
|
|
3
|
-
Version: 5.0.
|
|
3
|
+
Version: 5.0.0rc2
|
|
4
4
|
Summary: Python async client for Redis key-value store
|
|
5
5
|
Home-page: https://github.com/alisaifee/coredis
|
|
6
6
|
Author: Ali-Akber Saifee
|
|
@@ -80,9 +80,6 @@ coredis is an async redis client with support for redis server, cluster & sentin
|
|
|
80
80
|
and the [API Documentation](https://coredis.readthedocs.io/en/latest/api/index.html)
|
|
81
81
|
for more details.
|
|
82
82
|
|
|
83
|
-
> **Warning**
|
|
84
|
-
> The command API does NOT mirror the official python [redis client](https://github.com/redis/redis-py). For details about the high level differences refer to [Divergence from aredis & redis-py](https://coredis.readthedocs.io/en/latest/history.html#divergence-from-aredis-redis-py)
|
|
85
|
-
|
|
86
83
|
______________________________________________________________________
|
|
87
84
|
|
|
88
85
|
<!-- TOC depthFrom:2 depthTo:6 withLinks:1 updateOnSave:1 orderedList:0 -->
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|