redis 6.4.0__tar.gz → 7.0.0b1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {redis-6.4.0 → redis-7.0.0b1}/PKG-INFO +1 -1
- {redis-6.4.0 → redis-7.0.0b1}/redis/__init__.py +1 -1
- {redis-6.4.0 → redis-7.0.0b1}/redis/_parsers/base.py +173 -8
- {redis-6.4.0 → redis-7.0.0b1}/redis/_parsers/hiredis.py +16 -10
- {redis-6.4.0 → redis-7.0.0b1}/redis/_parsers/resp3.py +11 -5
- {redis-6.4.0 → redis-7.0.0b1}/redis/asyncio/client.py +45 -2
- {redis-6.4.0 → redis-7.0.0b1}/redis/asyncio/cluster.py +46 -3
- {redis-6.4.0 → redis-7.0.0b1}/redis/cache.py +1 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/client.py +68 -13
- {redis-6.4.0 → redis-7.0.0b1}/redis/cluster.py +3 -2
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/core.py +285 -285
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/helpers.py +0 -20
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/search/query.py +12 -12
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/vectorset/commands.py +43 -25
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/vectorset/utils.py +40 -4
- {redis-6.4.0 → redis-7.0.0b1}/redis/connection.py +828 -59
- redis-7.0.0b1/redis/maintenance_events.py +785 -0
- redis-7.0.0b1/tests/test_asyncio/test_usage_counter.py +16 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/test_vsets.py +113 -1
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_cluster.py +16 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_connection_pool.py +9 -1
- redis-7.0.0b1/tests/test_maintenance_events.py +869 -0
- redis-7.0.0b1/tests/test_maintenance_events_handling.py +2175 -0
- redis-7.0.0b1/tests/test_scenario/__init__.py +0 -0
- redis-7.0.0b1/tests/test_scenario/conftest.py +120 -0
- redis-7.0.0b1/tests/test_scenario/fault_injector_client.py +149 -0
- redis-7.0.0b1/tests/test_scenario/hitless_upgrade_helpers.py +287 -0
- redis-7.0.0b1/tests/test_scenario/test_hitless_upgrade.py +795 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_vsets.py +111 -1
- {redis-6.4.0 → redis-7.0.0b1}/.gitignore +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/LICENSE +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/README.md +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/dev_requirements.txt +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/pyproject.toml +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/_parsers/__init__.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/_parsers/commands.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/_parsers/encoders.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/_parsers/helpers.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/_parsers/resp2.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/_parsers/socket.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/asyncio/__init__.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/asyncio/connection.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/asyncio/lock.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/asyncio/retry.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/asyncio/sentinel.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/asyncio/utils.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/auth/__init__.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/auth/err.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/auth/idp.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/auth/token.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/auth/token_manager.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/backoff.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/__init__.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/bf/__init__.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/bf/commands.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/bf/info.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/cluster.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/json/__init__.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/json/_util.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/json/commands.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/json/decoders.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/json/path.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/redismodules.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/search/__init__.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/search/_util.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/search/aggregation.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/search/commands.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/search/dialect.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/search/document.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/search/field.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/search/index_definition.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/search/profile_information.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/search/querystring.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/search/reducers.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/search/result.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/search/suggestion.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/sentinel.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/timeseries/__init__.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/timeseries/commands.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/timeseries/info.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/timeseries/utils.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/commands/vectorset/__init__.py +1 -1
- {redis-6.4.0 → redis-7.0.0b1}/redis/crc.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/credentials.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/event.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/exceptions.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/lock.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/ocsp.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/py.typed +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/retry.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/sentinel.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/typing.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/redis/utils.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/__init__.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/conftest.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/entraid_utils.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/mocks.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/ssl_utils.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/__init__.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/compat.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/conftest.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/mocks.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/test_bloom.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/test_cluster.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/test_cluster_transaction.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/test_commands.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/test_connect.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/test_connection.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/test_connection_pool.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/test_credentials.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/test_cwe_404.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/test_encoding.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/test_hash.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/test_json.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/test_lock.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/test_monitor.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/test_pipeline.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/test_pubsub.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/test_retry.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/test_scripting.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/test_search.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/test_sentinel.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/test_sentinel_managed_connection.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/test_ssl.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/test_timeseries.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/test_utils.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/testdata/jsontestdata.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/testdata/titles.csv +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_asyncio/testdata/will_play_text.csv.bz2 +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_auth/__init__.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_auth/test_token.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_auth/test_token_manager.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_backoff.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_bloom.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_cache.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_cluster_transaction.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_command_parser.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_commands.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_connect.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_connection.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_credentials.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_encoding.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_function.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_hash.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_helpers.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_json.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_lock.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_max_connections_error.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_monitor.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_multiprocessing.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_parsers/test_helpers.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_pipeline.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_pubsub.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_retry.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_scripting.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_search.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_sentinel.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_sentinel_managed_connection.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_ssl.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_timeseries.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/test_utils.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/testdata/jsontestdata.py +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/testdata/titles.csv +0 -0
- {redis-6.4.0 → redis-7.0.0b1}/tests/testdata/will_play_text.csv.bz2 +0 -0
|
@@ -1,7 +1,17 @@
|
|
|
1
|
+
import logging
|
|
1
2
|
import sys
|
|
2
3
|
from abc import ABC
|
|
3
4
|
from asyncio import IncompleteReadError, StreamReader, TimeoutError
|
|
4
|
-
from typing import Callable, List, Optional, Protocol, Union
|
|
5
|
+
from typing import Awaitable, Callable, List, Optional, Protocol, Union
|
|
6
|
+
|
|
7
|
+
from redis.maintenance_events import (
|
|
8
|
+
MaintenanceEvent,
|
|
9
|
+
NodeFailedOverEvent,
|
|
10
|
+
NodeFailingOverEvent,
|
|
11
|
+
NodeMigratedEvent,
|
|
12
|
+
NodeMigratingEvent,
|
|
13
|
+
NodeMovingEvent,
|
|
14
|
+
)
|
|
5
15
|
|
|
6
16
|
if sys.version_info.major >= 3 and sys.version_info.minor >= 11:
|
|
7
17
|
from asyncio import timeout as async_timeout
|
|
@@ -50,6 +60,8 @@ NO_AUTH_SET_ERROR = {
|
|
|
50
60
|
"Client sent AUTH, but no password is set": AuthenticationError,
|
|
51
61
|
}
|
|
52
62
|
|
|
63
|
+
logger = logging.getLogger(__name__)
|
|
64
|
+
|
|
53
65
|
|
|
54
66
|
class BaseParser(ABC):
|
|
55
67
|
EXCEPTION_CLASSES = {
|
|
@@ -158,7 +170,75 @@ class AsyncBaseParser(BaseParser):
|
|
|
158
170
|
raise NotImplementedError()
|
|
159
171
|
|
|
160
172
|
|
|
161
|
-
|
|
173
|
+
class MaintenanceNotificationsParser:
|
|
174
|
+
"""Protocol defining maintenance push notification parsing functionality"""
|
|
175
|
+
|
|
176
|
+
@staticmethod
|
|
177
|
+
def parse_maintenance_start_msg(response, notification_type):
|
|
178
|
+
# Expected message format is: <event_type> <seq_number> <time>
|
|
179
|
+
id = response[1]
|
|
180
|
+
ttl = response[2]
|
|
181
|
+
return notification_type(id, ttl)
|
|
182
|
+
|
|
183
|
+
@staticmethod
|
|
184
|
+
def parse_maintenance_completed_msg(response, notification_type):
|
|
185
|
+
# Expected message format is: <event_type> <seq_number>
|
|
186
|
+
id = response[1]
|
|
187
|
+
return notification_type(id)
|
|
188
|
+
|
|
189
|
+
@staticmethod
|
|
190
|
+
def parse_moving_msg(response):
|
|
191
|
+
# Expected message format is: MOVING <seq_number> <time> <endpoint>
|
|
192
|
+
id = response[1]
|
|
193
|
+
ttl = response[2]
|
|
194
|
+
if response[3] is None:
|
|
195
|
+
host, port = None, None
|
|
196
|
+
else:
|
|
197
|
+
value = response[3]
|
|
198
|
+
if isinstance(value, bytes):
|
|
199
|
+
value = value.decode()
|
|
200
|
+
host, port = value.split(":")
|
|
201
|
+
port = int(port) if port is not None else None
|
|
202
|
+
|
|
203
|
+
return NodeMovingEvent(id, host, port, ttl)
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
_INVALIDATION_MESSAGE = "invalidate"
|
|
207
|
+
_MOVING_MESSAGE = "MOVING"
|
|
208
|
+
_MIGRATING_MESSAGE = "MIGRATING"
|
|
209
|
+
_MIGRATED_MESSAGE = "MIGRATED"
|
|
210
|
+
_FAILING_OVER_MESSAGE = "FAILING_OVER"
|
|
211
|
+
_FAILED_OVER_MESSAGE = "FAILED_OVER"
|
|
212
|
+
|
|
213
|
+
_MAINTENANCE_MESSAGES = (
|
|
214
|
+
_MIGRATING_MESSAGE,
|
|
215
|
+
_MIGRATED_MESSAGE,
|
|
216
|
+
_FAILING_OVER_MESSAGE,
|
|
217
|
+
_FAILED_OVER_MESSAGE,
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
MSG_TYPE_TO_EVENT_PARSER_MAPPING: dict[str, tuple[type[MaintenanceEvent], Callable]] = {
|
|
221
|
+
_MIGRATING_MESSAGE: (
|
|
222
|
+
NodeMigratingEvent,
|
|
223
|
+
MaintenanceNotificationsParser.parse_maintenance_start_msg,
|
|
224
|
+
),
|
|
225
|
+
_MIGRATED_MESSAGE: (
|
|
226
|
+
NodeMigratedEvent,
|
|
227
|
+
MaintenanceNotificationsParser.parse_maintenance_completed_msg,
|
|
228
|
+
),
|
|
229
|
+
_FAILING_OVER_MESSAGE: (
|
|
230
|
+
NodeFailingOverEvent,
|
|
231
|
+
MaintenanceNotificationsParser.parse_maintenance_start_msg,
|
|
232
|
+
),
|
|
233
|
+
_FAILED_OVER_MESSAGE: (
|
|
234
|
+
NodeFailedOverEvent,
|
|
235
|
+
MaintenanceNotificationsParser.parse_maintenance_completed_msg,
|
|
236
|
+
),
|
|
237
|
+
_MOVING_MESSAGE: (
|
|
238
|
+
NodeMovingEvent,
|
|
239
|
+
MaintenanceNotificationsParser.parse_moving_msg,
|
|
240
|
+
),
|
|
241
|
+
}
|
|
162
242
|
|
|
163
243
|
|
|
164
244
|
class PushNotificationsParser(Protocol):
|
|
@@ -166,16 +246,51 @@ class PushNotificationsParser(Protocol):
|
|
|
166
246
|
|
|
167
247
|
pubsub_push_handler_func: Callable
|
|
168
248
|
invalidation_push_handler_func: Optional[Callable] = None
|
|
249
|
+
node_moving_push_handler_func: Optional[Callable] = None
|
|
250
|
+
maintenance_push_handler_func: Optional[Callable] = None
|
|
169
251
|
|
|
170
252
|
def handle_pubsub_push_response(self, response):
|
|
171
253
|
"""Handle pubsub push responses"""
|
|
172
254
|
raise NotImplementedError()
|
|
173
255
|
|
|
174
256
|
def handle_push_response(self, response, **kwargs):
|
|
175
|
-
|
|
257
|
+
msg_type = response[0]
|
|
258
|
+
if isinstance(msg_type, bytes):
|
|
259
|
+
msg_type = msg_type.decode()
|
|
260
|
+
|
|
261
|
+
if msg_type not in (
|
|
262
|
+
_INVALIDATION_MESSAGE,
|
|
263
|
+
*_MAINTENANCE_MESSAGES,
|
|
264
|
+
_MOVING_MESSAGE,
|
|
265
|
+
):
|
|
176
266
|
return self.pubsub_push_handler_func(response)
|
|
177
|
-
|
|
178
|
-
|
|
267
|
+
|
|
268
|
+
try:
|
|
269
|
+
if (
|
|
270
|
+
msg_type == _INVALIDATION_MESSAGE
|
|
271
|
+
and self.invalidation_push_handler_func
|
|
272
|
+
):
|
|
273
|
+
return self.invalidation_push_handler_func(response)
|
|
274
|
+
|
|
275
|
+
if msg_type == _MOVING_MESSAGE and self.node_moving_push_handler_func:
|
|
276
|
+
parser_function = MSG_TYPE_TO_EVENT_PARSER_MAPPING[msg_type][1]
|
|
277
|
+
|
|
278
|
+
notification = parser_function(response)
|
|
279
|
+
return self.node_moving_push_handler_func(notification)
|
|
280
|
+
|
|
281
|
+
if msg_type in _MAINTENANCE_MESSAGES and self.maintenance_push_handler_func:
|
|
282
|
+
parser_function = MSG_TYPE_TO_EVENT_PARSER_MAPPING[msg_type][1]
|
|
283
|
+
notification_type = MSG_TYPE_TO_EVENT_PARSER_MAPPING[msg_type][0]
|
|
284
|
+
notification = parser_function(response, notification_type)
|
|
285
|
+
|
|
286
|
+
if notification is not None:
|
|
287
|
+
return self.maintenance_push_handler_func(notification)
|
|
288
|
+
except Exception as e:
|
|
289
|
+
logger.error(
|
|
290
|
+
"Error handling {} message ({}): {}".format(msg_type, response, e)
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
return None
|
|
179
294
|
|
|
180
295
|
def set_pubsub_push_handler(self, pubsub_push_handler_func):
|
|
181
296
|
self.pubsub_push_handler_func = pubsub_push_handler_func
|
|
@@ -183,12 +298,20 @@ class PushNotificationsParser(Protocol):
|
|
|
183
298
|
def set_invalidation_push_handler(self, invalidation_push_handler_func):
|
|
184
299
|
self.invalidation_push_handler_func = invalidation_push_handler_func
|
|
185
300
|
|
|
301
|
+
def set_node_moving_push_handler(self, node_moving_push_handler_func):
|
|
302
|
+
self.node_moving_push_handler_func = node_moving_push_handler_func
|
|
303
|
+
|
|
304
|
+
def set_maintenance_push_handler(self, maintenance_push_handler_func):
|
|
305
|
+
self.maintenance_push_handler_func = maintenance_push_handler_func
|
|
306
|
+
|
|
186
307
|
|
|
187
308
|
class AsyncPushNotificationsParser(Protocol):
|
|
188
309
|
"""Protocol defining async RESP3-specific parsing functionality"""
|
|
189
310
|
|
|
190
311
|
pubsub_push_handler_func: Callable
|
|
191
312
|
invalidation_push_handler_func: Optional[Callable] = None
|
|
313
|
+
node_moving_push_handler_func: Optional[Callable[..., Awaitable[None]]] = None
|
|
314
|
+
maintenance_push_handler_func: Optional[Callable[..., Awaitable[None]]] = None
|
|
192
315
|
|
|
193
316
|
async def handle_pubsub_push_response(self, response):
|
|
194
317
|
"""Handle pubsub push responses asynchronously"""
|
|
@@ -196,10 +319,46 @@ class AsyncPushNotificationsParser(Protocol):
|
|
|
196
319
|
|
|
197
320
|
async def handle_push_response(self, response, **kwargs):
|
|
198
321
|
"""Handle push responses asynchronously"""
|
|
199
|
-
|
|
322
|
+
|
|
323
|
+
msg_type = response[0]
|
|
324
|
+
if isinstance(msg_type, bytes):
|
|
325
|
+
msg_type = msg_type.decode()
|
|
326
|
+
|
|
327
|
+
if msg_type not in (
|
|
328
|
+
_INVALIDATION_MESSAGE,
|
|
329
|
+
*_MAINTENANCE_MESSAGES,
|
|
330
|
+
_MOVING_MESSAGE,
|
|
331
|
+
):
|
|
200
332
|
return await self.pubsub_push_handler_func(response)
|
|
201
|
-
|
|
202
|
-
|
|
333
|
+
|
|
334
|
+
try:
|
|
335
|
+
if (
|
|
336
|
+
msg_type == _INVALIDATION_MESSAGE
|
|
337
|
+
and self.invalidation_push_handler_func
|
|
338
|
+
):
|
|
339
|
+
return await self.invalidation_push_handler_func(response)
|
|
340
|
+
|
|
341
|
+
if isinstance(msg_type, bytes):
|
|
342
|
+
msg_type = msg_type.decode()
|
|
343
|
+
|
|
344
|
+
if msg_type == _MOVING_MESSAGE and self.node_moving_push_handler_func:
|
|
345
|
+
parser_function = MSG_TYPE_TO_EVENT_PARSER_MAPPING[msg_type][1]
|
|
346
|
+
notification = parser_function(response)
|
|
347
|
+
return await self.node_moving_push_handler_func(notification)
|
|
348
|
+
|
|
349
|
+
if msg_type in _MAINTENANCE_MESSAGES and self.maintenance_push_handler_func:
|
|
350
|
+
parser_function = MSG_TYPE_TO_EVENT_PARSER_MAPPING[msg_type][1]
|
|
351
|
+
notification_type = MSG_TYPE_TO_EVENT_PARSER_MAPPING[msg_type][0]
|
|
352
|
+
notification = parser_function(response, notification_type)
|
|
353
|
+
|
|
354
|
+
if notification is not None:
|
|
355
|
+
return await self.maintenance_push_handler_func(notification)
|
|
356
|
+
except Exception as e:
|
|
357
|
+
logger.error(
|
|
358
|
+
"Error handling {} message ({}): {}".format(msg_type, response, e)
|
|
359
|
+
)
|
|
360
|
+
|
|
361
|
+
return None
|
|
203
362
|
|
|
204
363
|
def set_pubsub_push_handler(self, pubsub_push_handler_func):
|
|
205
364
|
"""Set the pubsub push handler function"""
|
|
@@ -209,6 +368,12 @@ class AsyncPushNotificationsParser(Protocol):
|
|
|
209
368
|
"""Set the invalidation push handler function"""
|
|
210
369
|
self.invalidation_push_handler_func = invalidation_push_handler_func
|
|
211
370
|
|
|
371
|
+
def set_node_moving_push_handler(self, node_moving_push_handler_func):
|
|
372
|
+
self.node_moving_push_handler_func = node_moving_push_handler_func
|
|
373
|
+
|
|
374
|
+
def set_maintenance_push_handler(self, maintenance_push_handler_func):
|
|
375
|
+
self.maintenance_push_handler_func = maintenance_push_handler_func
|
|
376
|
+
|
|
212
377
|
|
|
213
378
|
class _AsyncRESPBase(AsyncBaseParser):
|
|
214
379
|
"""Base class for async resp parsing"""
|
|
@@ -47,6 +47,8 @@ class _HiredisParser(BaseParser, PushNotificationsParser):
|
|
|
47
47
|
self.socket_read_size = socket_read_size
|
|
48
48
|
self._buffer = bytearray(socket_read_size)
|
|
49
49
|
self.pubsub_push_handler_func = self.handle_pubsub_push_response
|
|
50
|
+
self.node_moving_push_handler_func = None
|
|
51
|
+
self.maintenance_push_handler_func = None
|
|
50
52
|
self.invalidation_push_handler_func = None
|
|
51
53
|
self._hiredis_PushNotificationType = None
|
|
52
54
|
|
|
@@ -141,12 +143,15 @@ class _HiredisParser(BaseParser, PushNotificationsParser):
|
|
|
141
143
|
response, self._hiredis_PushNotificationType
|
|
142
144
|
):
|
|
143
145
|
response = self.handle_push_response(response)
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
)
|
|
148
|
-
else:
|
|
146
|
+
|
|
147
|
+
# if this is a push request return the push response
|
|
148
|
+
if push_request:
|
|
149
149
|
return response
|
|
150
|
+
|
|
151
|
+
return self.read_response(
|
|
152
|
+
disable_decoding=disable_decoding,
|
|
153
|
+
push_request=push_request,
|
|
154
|
+
)
|
|
150
155
|
return response
|
|
151
156
|
|
|
152
157
|
if disable_decoding:
|
|
@@ -169,12 +174,13 @@ class _HiredisParser(BaseParser, PushNotificationsParser):
|
|
|
169
174
|
response, self._hiredis_PushNotificationType
|
|
170
175
|
):
|
|
171
176
|
response = self.handle_push_response(response)
|
|
172
|
-
if
|
|
173
|
-
return self.read_response(
|
|
174
|
-
disable_decoding=disable_decoding, push_request=push_request
|
|
175
|
-
)
|
|
176
|
-
else:
|
|
177
|
+
if push_request:
|
|
177
178
|
return response
|
|
179
|
+
return self.read_response(
|
|
180
|
+
disable_decoding=disable_decoding,
|
|
181
|
+
push_request=push_request,
|
|
182
|
+
)
|
|
183
|
+
|
|
178
184
|
elif (
|
|
179
185
|
isinstance(response, list)
|
|
180
186
|
and response
|
|
@@ -18,6 +18,8 @@ class _RESP3Parser(_RESPBase, PushNotificationsParser):
|
|
|
18
18
|
def __init__(self, socket_read_size):
|
|
19
19
|
super().__init__(socket_read_size)
|
|
20
20
|
self.pubsub_push_handler_func = self.handle_pubsub_push_response
|
|
21
|
+
self.node_moving_push_handler_func = None
|
|
22
|
+
self.maintenance_push_handler_func = None
|
|
21
23
|
self.invalidation_push_handler_func = None
|
|
22
24
|
|
|
23
25
|
def handle_pubsub_push_response(self, response):
|
|
@@ -117,17 +119,21 @@ class _RESP3Parser(_RESPBase, PushNotificationsParser):
|
|
|
117
119
|
for _ in range(int(response))
|
|
118
120
|
]
|
|
119
121
|
response = self.handle_push_response(response)
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
)
|
|
124
|
-
else:
|
|
122
|
+
|
|
123
|
+
# if this is a push request return the push response
|
|
124
|
+
if push_request:
|
|
125
125
|
return response
|
|
126
|
+
|
|
127
|
+
return self._read_response(
|
|
128
|
+
disable_decoding=disable_decoding,
|
|
129
|
+
push_request=push_request,
|
|
130
|
+
)
|
|
126
131
|
else:
|
|
127
132
|
raise InvalidResponse(f"Protocol Error: {raw!r}")
|
|
128
133
|
|
|
129
134
|
if isinstance(response, bytes) and disable_decoding is False:
|
|
130
135
|
response = self.encoder.decode(response)
|
|
136
|
+
|
|
131
137
|
return response
|
|
132
138
|
|
|
133
139
|
|
|
@@ -387,6 +387,12 @@ class Redis(
|
|
|
387
387
|
# on a set of redis commands
|
|
388
388
|
self._single_conn_lock = asyncio.Lock()
|
|
389
389
|
|
|
390
|
+
# When used as an async context manager, we need to increment and decrement
|
|
391
|
+
# a usage counter so that we can close the connection pool when no one is
|
|
392
|
+
# using the client.
|
|
393
|
+
self._usage_counter = 0
|
|
394
|
+
self._usage_lock = asyncio.Lock()
|
|
395
|
+
|
|
390
396
|
def __repr__(self):
|
|
391
397
|
return (
|
|
392
398
|
f"<{self.__class__.__module__}.{self.__class__.__name__}"
|
|
@@ -594,10 +600,47 @@ class Redis(
|
|
|
594
600
|
)
|
|
595
601
|
|
|
596
602
|
async def __aenter__(self: _RedisT) -> _RedisT:
|
|
597
|
-
|
|
603
|
+
"""
|
|
604
|
+
Async context manager entry. Increments a usage counter so that the
|
|
605
|
+
connection pool is only closed (via aclose()) when no context is using
|
|
606
|
+
the client.
|
|
607
|
+
"""
|
|
608
|
+
await self._increment_usage()
|
|
609
|
+
try:
|
|
610
|
+
# Initialize the client (i.e. establish connection, etc.)
|
|
611
|
+
return await self.initialize()
|
|
612
|
+
except Exception:
|
|
613
|
+
# If initialization fails, decrement the counter to keep it in sync
|
|
614
|
+
await self._decrement_usage()
|
|
615
|
+
raise
|
|
616
|
+
|
|
617
|
+
async def _increment_usage(self) -> int:
|
|
618
|
+
"""
|
|
619
|
+
Helper coroutine to increment the usage counter while holding the lock.
|
|
620
|
+
Returns the new value of the usage counter.
|
|
621
|
+
"""
|
|
622
|
+
async with self._usage_lock:
|
|
623
|
+
self._usage_counter += 1
|
|
624
|
+
return self._usage_counter
|
|
625
|
+
|
|
626
|
+
async def _decrement_usage(self) -> int:
|
|
627
|
+
"""
|
|
628
|
+
Helper coroutine to decrement the usage counter while holding the lock.
|
|
629
|
+
Returns the new value of the usage counter.
|
|
630
|
+
"""
|
|
631
|
+
async with self._usage_lock:
|
|
632
|
+
self._usage_counter -= 1
|
|
633
|
+
return self._usage_counter
|
|
598
634
|
|
|
599
635
|
async def __aexit__(self, exc_type, exc_value, traceback):
|
|
600
|
-
|
|
636
|
+
"""
|
|
637
|
+
Async context manager exit. Decrements a usage counter. If this is the
|
|
638
|
+
last exit (counter becomes zero), the client closes its connection pool.
|
|
639
|
+
"""
|
|
640
|
+
current_usage = await asyncio.shield(self._decrement_usage())
|
|
641
|
+
if current_usage == 0:
|
|
642
|
+
# This was the last active context, so disconnect the pool.
|
|
643
|
+
await asyncio.shield(self.aclose())
|
|
601
644
|
|
|
602
645
|
_DEL_MESSAGE = "Unclosed Redis client"
|
|
603
646
|
|
|
@@ -431,6 +431,12 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
|
|
|
431
431
|
self._initialize = True
|
|
432
432
|
self._lock: Optional[asyncio.Lock] = None
|
|
433
433
|
|
|
434
|
+
# When used as an async context manager, we need to increment and decrement
|
|
435
|
+
# a usage counter so that we can close the connection pool when no one is
|
|
436
|
+
# using the client.
|
|
437
|
+
self._usage_counter = 0
|
|
438
|
+
self._usage_lock = asyncio.Lock()
|
|
439
|
+
|
|
434
440
|
async def initialize(self) -> "RedisCluster":
|
|
435
441
|
"""Get all nodes from startup nodes & creates connections if not initialized."""
|
|
436
442
|
if self._initialize:
|
|
@@ -467,10 +473,47 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
|
|
|
467
473
|
await self.aclose()
|
|
468
474
|
|
|
469
475
|
async def __aenter__(self) -> "RedisCluster":
|
|
470
|
-
|
|
476
|
+
"""
|
|
477
|
+
Async context manager entry. Increments a usage counter so that the
|
|
478
|
+
connection pool is only closed (via aclose()) when no context is using
|
|
479
|
+
the client.
|
|
480
|
+
"""
|
|
481
|
+
await self._increment_usage()
|
|
482
|
+
try:
|
|
483
|
+
# Initialize the client (i.e. establish connection, etc.)
|
|
484
|
+
return await self.initialize()
|
|
485
|
+
except Exception:
|
|
486
|
+
# If initialization fails, decrement the counter to keep it in sync
|
|
487
|
+
await self._decrement_usage()
|
|
488
|
+
raise
|
|
471
489
|
|
|
472
|
-
async def
|
|
473
|
-
|
|
490
|
+
async def _increment_usage(self) -> int:
|
|
491
|
+
"""
|
|
492
|
+
Helper coroutine to increment the usage counter while holding the lock.
|
|
493
|
+
Returns the new value of the usage counter.
|
|
494
|
+
"""
|
|
495
|
+
async with self._usage_lock:
|
|
496
|
+
self._usage_counter += 1
|
|
497
|
+
return self._usage_counter
|
|
498
|
+
|
|
499
|
+
async def _decrement_usage(self) -> int:
|
|
500
|
+
"""
|
|
501
|
+
Helper coroutine to decrement the usage counter while holding the lock.
|
|
502
|
+
Returns the new value of the usage counter.
|
|
503
|
+
"""
|
|
504
|
+
async with self._usage_lock:
|
|
505
|
+
self._usage_counter -= 1
|
|
506
|
+
return self._usage_counter
|
|
507
|
+
|
|
508
|
+
async def __aexit__(self, exc_type, exc_value, traceback):
|
|
509
|
+
"""
|
|
510
|
+
Async context manager exit. Decrements a usage counter. If this is the
|
|
511
|
+
last exit (counter becomes zero), the client closes its connection pool.
|
|
512
|
+
"""
|
|
513
|
+
current_usage = await asyncio.shield(self._decrement_usage())
|
|
514
|
+
if current_usage == 0:
|
|
515
|
+
# This was the last active context, so disconnect the pool.
|
|
516
|
+
await asyncio.shield(self.aclose())
|
|
474
517
|
|
|
475
518
|
def __await__(self) -> Generator[Any, None, "RedisCluster"]:
|
|
476
519
|
return self.initialize().__await__()
|
|
@@ -56,6 +56,10 @@ from redis.exceptions import (
|
|
|
56
56
|
WatchError,
|
|
57
57
|
)
|
|
58
58
|
from redis.lock import Lock
|
|
59
|
+
from redis.maintenance_events import (
|
|
60
|
+
MaintenanceEventPoolHandler,
|
|
61
|
+
MaintenanceEventsConfig,
|
|
62
|
+
)
|
|
59
63
|
from redis.retry import Retry
|
|
60
64
|
from redis.utils import (
|
|
61
65
|
_set_info_logger,
|
|
@@ -244,6 +248,7 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
244
248
|
cache: Optional[CacheInterface] = None,
|
|
245
249
|
cache_config: Optional[CacheConfig] = None,
|
|
246
250
|
event_dispatcher: Optional[EventDispatcher] = None,
|
|
251
|
+
maintenance_events_config: Optional[MaintenanceEventsConfig] = None,
|
|
247
252
|
) -> None:
|
|
248
253
|
"""
|
|
249
254
|
Initialize a new Redis client.
|
|
@@ -368,6 +373,23 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
368
373
|
]:
|
|
369
374
|
raise RedisError("Client caching is only supported with RESP version 3")
|
|
370
375
|
|
|
376
|
+
if maintenance_events_config and self.connection_pool.get_protocol() not in [
|
|
377
|
+
3,
|
|
378
|
+
"3",
|
|
379
|
+
]:
|
|
380
|
+
raise RedisError(
|
|
381
|
+
"Push handlers on connection are only supported with RESP version 3"
|
|
382
|
+
)
|
|
383
|
+
if maintenance_events_config and maintenance_events_config.enabled:
|
|
384
|
+
self.maintenance_events_pool_handler = MaintenanceEventPoolHandler(
|
|
385
|
+
self.connection_pool, maintenance_events_config
|
|
386
|
+
)
|
|
387
|
+
self.connection_pool.set_maintenance_events_pool_handler(
|
|
388
|
+
self.maintenance_events_pool_handler
|
|
389
|
+
)
|
|
390
|
+
else:
|
|
391
|
+
self.maintenance_events_pool_handler = None
|
|
392
|
+
|
|
371
393
|
self.single_connection_lock = threading.RLock()
|
|
372
394
|
self.connection = None
|
|
373
395
|
self._single_connection_client = single_connection_client
|
|
@@ -565,8 +587,15 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
565
587
|
return Monitor(self.connection_pool)
|
|
566
588
|
|
|
567
589
|
def client(self):
|
|
590
|
+
maintenance_events_config = (
|
|
591
|
+
None
|
|
592
|
+
if self.maintenance_events_pool_handler is None
|
|
593
|
+
else self.maintenance_events_pool_handler.config
|
|
594
|
+
)
|
|
568
595
|
return self.__class__(
|
|
569
|
-
connection_pool=self.connection_pool,
|
|
596
|
+
connection_pool=self.connection_pool,
|
|
597
|
+
single_connection_client=True,
|
|
598
|
+
maintenance_events_config=maintenance_events_config,
|
|
570
599
|
)
|
|
571
600
|
|
|
572
601
|
def __enter__(self):
|
|
@@ -635,7 +664,11 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
635
664
|
),
|
|
636
665
|
lambda _: self._close_connection(conn),
|
|
637
666
|
)
|
|
667
|
+
|
|
638
668
|
finally:
|
|
669
|
+
if conn and conn.should_reconnect():
|
|
670
|
+
self._close_connection(conn)
|
|
671
|
+
conn.connect()
|
|
639
672
|
if self._single_connection_client:
|
|
640
673
|
self.single_connection_lock.release()
|
|
641
674
|
if not self.connection:
|
|
@@ -686,11 +719,7 @@ class Monitor:
|
|
|
686
719
|
self.connection = self.connection_pool.get_connection()
|
|
687
720
|
|
|
688
721
|
def __enter__(self):
|
|
689
|
-
self.
|
|
690
|
-
# check that monitor returns 'OK', but don't return it to user
|
|
691
|
-
response = self.connection.read_response()
|
|
692
|
-
if not bool_ok(response):
|
|
693
|
-
raise RedisError(f"MONITOR failed: {response}")
|
|
722
|
+
self._start_monitor()
|
|
694
723
|
return self
|
|
695
724
|
|
|
696
725
|
def __exit__(self, *args):
|
|
@@ -700,8 +729,13 @@ class Monitor:
|
|
|
700
729
|
def next_command(self):
|
|
701
730
|
"""Parse the response from a monitor command"""
|
|
702
731
|
response = self.connection.read_response()
|
|
732
|
+
|
|
733
|
+
if response is None:
|
|
734
|
+
return None
|
|
735
|
+
|
|
703
736
|
if isinstance(response, bytes):
|
|
704
737
|
response = self.connection.encoder.decode(response, force=True)
|
|
738
|
+
|
|
705
739
|
command_time, command_data = response.split(" ", 1)
|
|
706
740
|
m = self.monitor_re.match(command_data)
|
|
707
741
|
db_id, client_info, command = m.groups()
|
|
@@ -737,6 +771,14 @@ class Monitor:
|
|
|
737
771
|
while True:
|
|
738
772
|
yield self.next_command()
|
|
739
773
|
|
|
774
|
+
def _start_monitor(self):
|
|
775
|
+
self.connection.send_command("MONITOR")
|
|
776
|
+
# check that monitor returns 'OK', but don't return it to user
|
|
777
|
+
response = self.connection.read_response()
|
|
778
|
+
|
|
779
|
+
if not bool_ok(response):
|
|
780
|
+
raise RedisError(f"MONITOR failed: {response}")
|
|
781
|
+
|
|
740
782
|
|
|
741
783
|
class PubSub:
|
|
742
784
|
"""
|
|
@@ -881,7 +923,7 @@ class PubSub:
|
|
|
881
923
|
"""
|
|
882
924
|
ttl = 10
|
|
883
925
|
conn = self.connection
|
|
884
|
-
while self.health_check_response_counter > 0 and ttl > 0:
|
|
926
|
+
while conn and self.health_check_response_counter > 0 and ttl > 0:
|
|
885
927
|
if self._execute(conn, conn.can_read, timeout=conn.socket_timeout):
|
|
886
928
|
response = self._execute(conn, conn.read_response)
|
|
887
929
|
if self.is_health_check_response(response):
|
|
@@ -911,11 +953,17 @@ class PubSub:
|
|
|
911
953
|
called by the # connection to resubscribe us to any channels and
|
|
912
954
|
patterns we were previously listening to
|
|
913
955
|
"""
|
|
914
|
-
|
|
956
|
+
|
|
957
|
+
if conn.should_reconnect():
|
|
958
|
+
self._reconnect(conn)
|
|
959
|
+
|
|
960
|
+
response = conn.retry.call_with_retry(
|
|
915
961
|
lambda: command(*args, **kwargs),
|
|
916
962
|
lambda _: self._reconnect(conn),
|
|
917
963
|
)
|
|
918
964
|
|
|
965
|
+
return response
|
|
966
|
+
|
|
919
967
|
def parse_response(self, block=True, timeout=0):
|
|
920
968
|
"""Parse the response from a publish/subscribe command"""
|
|
921
969
|
conn = self.connection
|
|
@@ -1125,6 +1173,7 @@ class PubSub:
|
|
|
1125
1173
|
return None
|
|
1126
1174
|
|
|
1127
1175
|
response = self.parse_response(block=(timeout is None), timeout=timeout)
|
|
1176
|
+
|
|
1128
1177
|
if response:
|
|
1129
1178
|
return self.handle_message(response, ignore_subscribe_messages)
|
|
1130
1179
|
return None
|
|
@@ -1148,6 +1197,7 @@ class PubSub:
|
|
|
1148
1197
|
return None
|
|
1149
1198
|
if isinstance(response, bytes):
|
|
1150
1199
|
response = [b"pong", response] if response != b"PONG" else [b"pong", b""]
|
|
1200
|
+
|
|
1151
1201
|
message_type = str_if_bytes(response[0])
|
|
1152
1202
|
if message_type == "pmessage":
|
|
1153
1203
|
message = {
|
|
@@ -1351,6 +1401,7 @@ class Pipeline(Redis):
|
|
|
1351
1401
|
# clean up the other instance attributes
|
|
1352
1402
|
self.watching = False
|
|
1353
1403
|
self.explicit_transaction = False
|
|
1404
|
+
|
|
1354
1405
|
# we can safely return the connection to the pool here since we're
|
|
1355
1406
|
# sure we're no longer WATCHing anything
|
|
1356
1407
|
if self.connection:
|
|
@@ -1510,6 +1561,7 @@ class Pipeline(Redis):
|
|
|
1510
1561
|
if command_name in self.response_callbacks:
|
|
1511
1562
|
r = self.response_callbacks[command_name](r, **options)
|
|
1512
1563
|
data.append(r)
|
|
1564
|
+
|
|
1513
1565
|
return data
|
|
1514
1566
|
|
|
1515
1567
|
def _execute_pipeline(self, connection, commands, raise_on_error):
|
|
@@ -1517,16 +1569,17 @@ class Pipeline(Redis):
|
|
|
1517
1569
|
all_cmds = connection.pack_commands([args for args, _ in commands])
|
|
1518
1570
|
connection.send_packed_command(all_cmds)
|
|
1519
1571
|
|
|
1520
|
-
|
|
1572
|
+
responses = []
|
|
1521
1573
|
for args, options in commands:
|
|
1522
1574
|
try:
|
|
1523
|
-
|
|
1575
|
+
responses.append(self.parse_response(connection, args[0], **options))
|
|
1524
1576
|
except ResponseError as e:
|
|
1525
|
-
|
|
1577
|
+
responses.append(e)
|
|
1526
1578
|
|
|
1527
1579
|
if raise_on_error:
|
|
1528
|
-
self.raise_first_error(commands,
|
|
1529
|
-
|
|
1580
|
+
self.raise_first_error(commands, responses)
|
|
1581
|
+
|
|
1582
|
+
return responses
|
|
1530
1583
|
|
|
1531
1584
|
def raise_first_error(self, commands, response):
|
|
1532
1585
|
for i, r in enumerate(response):
|
|
@@ -1611,6 +1664,8 @@ class Pipeline(Redis):
|
|
|
1611
1664
|
lambda error: self._disconnect_raise_on_watching(conn, error),
|
|
1612
1665
|
)
|
|
1613
1666
|
finally:
|
|
1667
|
+
# in reset() the connection is disconnected before returned to the pool if
|
|
1668
|
+
# it is marked for reconnect.
|
|
1614
1669
|
self.reset()
|
|
1615
1670
|
|
|
1616
1671
|
def discard(self):
|