valkey-glide 1.3.5rc2__pp39-pypy39_pp73-macosx_10_7_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of valkey-glide might be problematic. Click here for more details.
- glide/__init__.py +330 -0
- glide/async_commands/__init__.py +5 -0
- glide/async_commands/bitmap.py +311 -0
- glide/async_commands/cluster_commands.py +1294 -0
- glide/async_commands/command_args.py +102 -0
- glide/async_commands/core.py +7040 -0
- glide/async_commands/server_modules/ft.py +395 -0
- glide/async_commands/server_modules/ft_options/ft_aggregate_options.py +293 -0
- glide/async_commands/server_modules/ft_options/ft_constants.py +84 -0
- glide/async_commands/server_modules/ft_options/ft_create_options.py +409 -0
- glide/async_commands/server_modules/ft_options/ft_profile_options.py +108 -0
- glide/async_commands/server_modules/ft_options/ft_search_options.py +131 -0
- glide/async_commands/server_modules/glide_json.py +1255 -0
- glide/async_commands/server_modules/json_batch.py +790 -0
- glide/async_commands/sorted_set.py +402 -0
- glide/async_commands/standalone_commands.py +935 -0
- glide/async_commands/stream.py +442 -0
- glide/async_commands/transaction.py +5175 -0
- glide/config.py +590 -0
- glide/constants.py +120 -0
- glide/exceptions.py +62 -0
- glide/glide.pyi +36 -0
- glide/glide.pypy39-pp73-darwin.so +0 -0
- glide/glide_client.py +604 -0
- glide/logger.py +85 -0
- glide/protobuf/command_request_pb2.py +54 -0
- glide/protobuf/command_request_pb2.pyi +1164 -0
- glide/protobuf/connection_request_pb2.py +52 -0
- glide/protobuf/connection_request_pb2.pyi +292 -0
- glide/protobuf/response_pb2.py +32 -0
- glide/protobuf/response_pb2.pyi +101 -0
- glide/protobuf_codec.py +109 -0
- glide/py.typed +0 -0
- glide/routes.py +114 -0
- valkey_glide-1.3.5rc2.dist-info/METADATA +125 -0
- valkey_glide-1.3.5rc2.dist-info/RECORD +37 -0
- valkey_glide-1.3.5rc2.dist-info/WHEEL +4 -0
glide/glide_client.py
ADDED
|
@@ -0,0 +1,604 @@
|
|
|
1
|
+
# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import sys
|
|
5
|
+
import threading
|
|
6
|
+
from typing import Any, Dict, List, Optional, Tuple, Type, Union, cast
|
|
7
|
+
|
|
8
|
+
from glide.async_commands.cluster_commands import ClusterCommands
|
|
9
|
+
from glide.async_commands.command_args import ObjectType
|
|
10
|
+
from glide.async_commands.core import CoreCommands
|
|
11
|
+
from glide.async_commands.standalone_commands import StandaloneCommands
|
|
12
|
+
from glide.config import BaseClientConfiguration, ServerCredentials
|
|
13
|
+
from glide.constants import DEFAULT_READ_BYTES_SIZE, OK, TEncodable, TRequest, TResult
|
|
14
|
+
from glide.exceptions import (
|
|
15
|
+
ClosingError,
|
|
16
|
+
ConfigurationError,
|
|
17
|
+
ConnectionError,
|
|
18
|
+
ExecAbortError,
|
|
19
|
+
RequestError,
|
|
20
|
+
TimeoutError,
|
|
21
|
+
)
|
|
22
|
+
from glide.logger import Level as LogLevel
|
|
23
|
+
from glide.logger import Logger as ClientLogger
|
|
24
|
+
from glide.protobuf.command_request_pb2 import Command, CommandRequest, RequestType
|
|
25
|
+
from glide.protobuf.connection_request_pb2 import ConnectionRequest
|
|
26
|
+
from glide.protobuf.response_pb2 import RequestErrorType, Response
|
|
27
|
+
from glide.protobuf_codec import PartialMessageException, ProtobufCodec
|
|
28
|
+
from glide.routes import Route, set_protobuf_route
|
|
29
|
+
|
|
30
|
+
from .glide import (
|
|
31
|
+
DEFAULT_TIMEOUT_IN_MILLISECONDS,
|
|
32
|
+
MAX_REQUEST_ARGS_LEN,
|
|
33
|
+
ClusterScanCursor,
|
|
34
|
+
create_leaked_bytes_vec,
|
|
35
|
+
get_statistics,
|
|
36
|
+
start_socket_listener_external,
|
|
37
|
+
value_from_pointer,
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
if sys.version_info >= (3, 11):
|
|
41
|
+
import asyncio as async_timeout
|
|
42
|
+
from typing import Self
|
|
43
|
+
else:
|
|
44
|
+
import async_timeout
|
|
45
|
+
from typing_extensions import Self
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def get_request_error_class(
|
|
49
|
+
error_type: Optional[RequestErrorType.ValueType],
|
|
50
|
+
) -> Type[RequestError]:
|
|
51
|
+
if error_type == RequestErrorType.Disconnect:
|
|
52
|
+
return ConnectionError
|
|
53
|
+
if error_type == RequestErrorType.ExecAbort:
|
|
54
|
+
return ExecAbortError
|
|
55
|
+
if error_type == RequestErrorType.Timeout:
|
|
56
|
+
return TimeoutError
|
|
57
|
+
if error_type == RequestErrorType.Unspecified:
|
|
58
|
+
return RequestError
|
|
59
|
+
return RequestError
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class BaseClient(CoreCommands):
|
|
63
|
+
def __init__(self, config: BaseClientConfiguration):
|
|
64
|
+
"""
|
|
65
|
+
To create a new client, use the `create` classmethod
|
|
66
|
+
"""
|
|
67
|
+
self.config: BaseClientConfiguration = config
|
|
68
|
+
self._available_futures: Dict[int, asyncio.Future] = {}
|
|
69
|
+
self._available_callback_indexes: List[int] = list()
|
|
70
|
+
self._buffered_requests: List[TRequest] = list()
|
|
71
|
+
self._writer_lock = threading.Lock()
|
|
72
|
+
self.socket_path: Optional[str] = None
|
|
73
|
+
self._reader_task: Optional[asyncio.Task] = None
|
|
74
|
+
self._is_closed: bool = False
|
|
75
|
+
self._pubsub_futures: List[asyncio.Future] = []
|
|
76
|
+
self._pubsub_lock = threading.Lock()
|
|
77
|
+
self._pending_push_notifications: List[Response] = list()
|
|
78
|
+
|
|
79
|
+
@classmethod
|
|
80
|
+
async def create(cls, config: BaseClientConfiguration) -> Self:
|
|
81
|
+
"""Creates a Glide client.
|
|
82
|
+
|
|
83
|
+
Args:
|
|
84
|
+
config (ClientConfiguration): The client configurations.
|
|
85
|
+
If no configuration is provided, a default client to "localhost":6379 will be created.
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
Self: a Glide Client instance.
|
|
89
|
+
"""
|
|
90
|
+
config = config
|
|
91
|
+
self = cls(config)
|
|
92
|
+
init_future: asyncio.Future = asyncio.Future()
|
|
93
|
+
loop = asyncio.get_event_loop()
|
|
94
|
+
|
|
95
|
+
def init_callback(socket_path: Optional[str], err: Optional[str]):
|
|
96
|
+
if err is not None:
|
|
97
|
+
raise ClosingError(err)
|
|
98
|
+
elif socket_path is None:
|
|
99
|
+
raise ClosingError(
|
|
100
|
+
"Socket initialization error: Missing valid socket path."
|
|
101
|
+
)
|
|
102
|
+
else:
|
|
103
|
+
# Received socket path
|
|
104
|
+
self.socket_path = socket_path
|
|
105
|
+
loop.call_soon_threadsafe(init_future.set_result, True)
|
|
106
|
+
|
|
107
|
+
start_socket_listener_external(init_callback=init_callback)
|
|
108
|
+
|
|
109
|
+
# will log if the logger was created (wrapper or costumer) on info
|
|
110
|
+
# level or higher
|
|
111
|
+
ClientLogger.log(LogLevel.INFO, "connection info", "new connection established")
|
|
112
|
+
# Wait for the socket listener to complete its initialization
|
|
113
|
+
await init_future
|
|
114
|
+
# Create UDS connection
|
|
115
|
+
await self._create_uds_connection()
|
|
116
|
+
# Start the reader loop as a background task
|
|
117
|
+
self._reader_task = asyncio.create_task(self._reader_loop())
|
|
118
|
+
# Set the client configurations
|
|
119
|
+
await self._set_connection_configurations()
|
|
120
|
+
return self
|
|
121
|
+
|
|
122
|
+
async def _create_uds_connection(self) -> None:
|
|
123
|
+
try:
|
|
124
|
+
# Open an UDS connection
|
|
125
|
+
async with async_timeout.timeout(DEFAULT_TIMEOUT_IN_MILLISECONDS):
|
|
126
|
+
reader, writer = await asyncio.open_unix_connection(
|
|
127
|
+
path=self.socket_path
|
|
128
|
+
)
|
|
129
|
+
self._reader = reader
|
|
130
|
+
self._writer = writer
|
|
131
|
+
except Exception as e:
|
|
132
|
+
await self.close(f"Failed to create UDS connection: {e}")
|
|
133
|
+
raise
|
|
134
|
+
|
|
135
|
+
def __del__(self) -> None:
|
|
136
|
+
try:
|
|
137
|
+
if self._reader_task:
|
|
138
|
+
self._reader_task.cancel()
|
|
139
|
+
except RuntimeError as e:
|
|
140
|
+
if "no running event loop" in str(e):
|
|
141
|
+
# event loop already closed
|
|
142
|
+
pass
|
|
143
|
+
|
|
144
|
+
async def close(self, err_message: Optional[str] = None) -> None:
|
|
145
|
+
"""
|
|
146
|
+
Terminate the client by closing all associated resources, including the socket and any active futures.
|
|
147
|
+
All open futures will be closed with an exception.
|
|
148
|
+
|
|
149
|
+
Args:
|
|
150
|
+
err_message (Optional[str]): If not None, this error message will be passed along with the exceptions when closing all open futures.
|
|
151
|
+
Defaults to None.
|
|
152
|
+
"""
|
|
153
|
+
self._is_closed = True
|
|
154
|
+
for response_future in self._available_futures.values():
|
|
155
|
+
if not response_future.done():
|
|
156
|
+
err_message = "" if err_message is None else err_message
|
|
157
|
+
response_future.set_exception(ClosingError(err_message))
|
|
158
|
+
try:
|
|
159
|
+
self._pubsub_lock.acquire()
|
|
160
|
+
for pubsub_future in self._pubsub_futures:
|
|
161
|
+
if not pubsub_future.done() and not pubsub_future.cancelled():
|
|
162
|
+
pubsub_future.set_exception(ClosingError(""))
|
|
163
|
+
finally:
|
|
164
|
+
self._pubsub_lock.release()
|
|
165
|
+
|
|
166
|
+
self._writer.close()
|
|
167
|
+
await self._writer.wait_closed()
|
|
168
|
+
self.__del__()
|
|
169
|
+
|
|
170
|
+
def _get_future(self, callback_idx: int) -> asyncio.Future:
|
|
171
|
+
response_future: asyncio.Future = asyncio.Future()
|
|
172
|
+
self._available_futures.update({callback_idx: response_future})
|
|
173
|
+
return response_future
|
|
174
|
+
|
|
175
|
+
def _get_protobuf_conn_request(self) -> ConnectionRequest:
|
|
176
|
+
return self.config._create_a_protobuf_conn_request()
|
|
177
|
+
|
|
178
|
+
async def _set_connection_configurations(self) -> None:
|
|
179
|
+
conn_request = self._get_protobuf_conn_request()
|
|
180
|
+
response_future: asyncio.Future = self._get_future(0)
|
|
181
|
+
await self._write_or_buffer_request(conn_request)
|
|
182
|
+
await response_future
|
|
183
|
+
if response_future.result() is not OK:
|
|
184
|
+
raise ClosingError(response_future.result())
|
|
185
|
+
|
|
186
|
+
def _create_write_task(self, request: TRequest):
|
|
187
|
+
asyncio.create_task(self._write_or_buffer_request(request))
|
|
188
|
+
|
|
189
|
+
async def _write_or_buffer_request(self, request: TRequest):
|
|
190
|
+
self._buffered_requests.append(request)
|
|
191
|
+
if self._writer_lock.acquire(False):
|
|
192
|
+
try:
|
|
193
|
+
while len(self._buffered_requests) > 0:
|
|
194
|
+
await self._write_buffered_requests_to_socket()
|
|
195
|
+
|
|
196
|
+
finally:
|
|
197
|
+
self._writer_lock.release()
|
|
198
|
+
|
|
199
|
+
async def _write_buffered_requests_to_socket(self) -> None:
|
|
200
|
+
requests = self._buffered_requests
|
|
201
|
+
self._buffered_requests = list()
|
|
202
|
+
b_arr = bytearray()
|
|
203
|
+
for request in requests:
|
|
204
|
+
ProtobufCodec.encode_delimited(b_arr, request)
|
|
205
|
+
self._writer.write(b_arr)
|
|
206
|
+
await self._writer.drain()
|
|
207
|
+
|
|
208
|
+
def _encode_arg(self, arg: TEncodable) -> bytes:
|
|
209
|
+
"""
|
|
210
|
+
Converts a string argument to bytes.
|
|
211
|
+
|
|
212
|
+
Args:
|
|
213
|
+
arg (str): An encodable argument.
|
|
214
|
+
|
|
215
|
+
Returns:
|
|
216
|
+
bytes: The encoded argument as bytes.
|
|
217
|
+
"""
|
|
218
|
+
if isinstance(arg, str):
|
|
219
|
+
# TODO: Allow passing different encoding options
|
|
220
|
+
return bytes(arg, encoding="utf8")
|
|
221
|
+
return arg
|
|
222
|
+
|
|
223
|
+
def _encode_and_sum_size(
|
|
224
|
+
self,
|
|
225
|
+
args_list: Optional[List[TEncodable]],
|
|
226
|
+
) -> Tuple[List[bytes], int]:
|
|
227
|
+
"""
|
|
228
|
+
Encodes the list and calculates the total memory size.
|
|
229
|
+
|
|
230
|
+
Args:
|
|
231
|
+
args_list (Optional[List[TEncodable]]): A list of strings to be converted to bytes.
|
|
232
|
+
If None or empty, returns ([], 0).
|
|
233
|
+
|
|
234
|
+
Returns:
|
|
235
|
+
int: The total memory size of the encoded arguments in bytes.
|
|
236
|
+
"""
|
|
237
|
+
args_size = 0
|
|
238
|
+
encoded_args_list: List[bytes] = []
|
|
239
|
+
if not args_list:
|
|
240
|
+
return (encoded_args_list, args_size)
|
|
241
|
+
for arg in args_list:
|
|
242
|
+
encoded_arg = self._encode_arg(arg) if isinstance(arg, str) else arg
|
|
243
|
+
encoded_args_list.append(encoded_arg)
|
|
244
|
+
args_size += len(encoded_arg)
|
|
245
|
+
return (encoded_args_list, args_size)
|
|
246
|
+
|
|
247
|
+
async def _execute_command(
|
|
248
|
+
self,
|
|
249
|
+
request_type: RequestType.ValueType,
|
|
250
|
+
args: List[TEncodable],
|
|
251
|
+
route: Optional[Route] = None,
|
|
252
|
+
) -> TResult:
|
|
253
|
+
if self._is_closed:
|
|
254
|
+
raise ClosingError(
|
|
255
|
+
"Unable to execute requests; the client is closed. Please create a new client."
|
|
256
|
+
)
|
|
257
|
+
request = CommandRequest()
|
|
258
|
+
request.callback_idx = self._get_callback_index()
|
|
259
|
+
request.single_command.request_type = request_type
|
|
260
|
+
request.single_command.args_array.args[:] = [
|
|
261
|
+
bytes(elem, encoding="utf8") if isinstance(elem, str) else elem
|
|
262
|
+
for elem in args
|
|
263
|
+
]
|
|
264
|
+
(encoded_args, args_size) = self._encode_and_sum_size(args)
|
|
265
|
+
if args_size < MAX_REQUEST_ARGS_LEN:
|
|
266
|
+
request.single_command.args_array.args[:] = encoded_args
|
|
267
|
+
else:
|
|
268
|
+
request.single_command.args_vec_pointer = create_leaked_bytes_vec(
|
|
269
|
+
encoded_args
|
|
270
|
+
)
|
|
271
|
+
set_protobuf_route(request, route)
|
|
272
|
+
return await self._write_request_await_response(request)
|
|
273
|
+
|
|
274
|
+
async def _execute_transaction(
|
|
275
|
+
self,
|
|
276
|
+
commands: List[Tuple[RequestType.ValueType, List[TEncodable]]],
|
|
277
|
+
route: Optional[Route] = None,
|
|
278
|
+
) -> List[TResult]:
|
|
279
|
+
if self._is_closed:
|
|
280
|
+
raise ClosingError(
|
|
281
|
+
"Unable to execute requests; the client is closed. Please create a new client."
|
|
282
|
+
)
|
|
283
|
+
request = CommandRequest()
|
|
284
|
+
request.callback_idx = self._get_callback_index()
|
|
285
|
+
transaction_commands = []
|
|
286
|
+
for requst_type, args in commands:
|
|
287
|
+
command = Command()
|
|
288
|
+
command.request_type = requst_type
|
|
289
|
+
# For now, we allow the user to pass the command as array of strings
|
|
290
|
+
# we convert them here into bytes (the datatype that our rust core expects)
|
|
291
|
+
(encoded_args, args_size) = self._encode_and_sum_size(args)
|
|
292
|
+
if args_size < MAX_REQUEST_ARGS_LEN:
|
|
293
|
+
command.args_array.args[:] = encoded_args
|
|
294
|
+
else:
|
|
295
|
+
command.args_vec_pointer = create_leaked_bytes_vec(encoded_args)
|
|
296
|
+
transaction_commands.append(command)
|
|
297
|
+
request.transaction.commands.extend(transaction_commands)
|
|
298
|
+
set_protobuf_route(request, route)
|
|
299
|
+
return await self._write_request_await_response(request)
|
|
300
|
+
|
|
301
|
+
async def _execute_script(
|
|
302
|
+
self,
|
|
303
|
+
hash: str,
|
|
304
|
+
keys: Optional[List[Union[str, bytes]]] = None,
|
|
305
|
+
args: Optional[List[Union[str, bytes]]] = None,
|
|
306
|
+
route: Optional[Route] = None,
|
|
307
|
+
) -> TResult:
|
|
308
|
+
if self._is_closed:
|
|
309
|
+
raise ClosingError(
|
|
310
|
+
"Unable to execute requests; the client is closed. Please create a new client."
|
|
311
|
+
)
|
|
312
|
+
request = CommandRequest()
|
|
313
|
+
request.callback_idx = self._get_callback_index()
|
|
314
|
+
(encoded_keys, keys_size) = self._encode_and_sum_size(keys)
|
|
315
|
+
(encoded_args, args_size) = self._encode_and_sum_size(args)
|
|
316
|
+
if (keys_size + args_size) < MAX_REQUEST_ARGS_LEN:
|
|
317
|
+
request.script_invocation.hash = hash
|
|
318
|
+
request.script_invocation.keys[:] = encoded_keys
|
|
319
|
+
request.script_invocation.args[:] = encoded_args
|
|
320
|
+
|
|
321
|
+
else:
|
|
322
|
+
request.script_invocation_pointers.hash = hash
|
|
323
|
+
request.script_invocation_pointers.keys_pointer = create_leaked_bytes_vec(
|
|
324
|
+
encoded_keys
|
|
325
|
+
)
|
|
326
|
+
request.script_invocation_pointers.args_pointer = create_leaked_bytes_vec(
|
|
327
|
+
encoded_args
|
|
328
|
+
)
|
|
329
|
+
set_protobuf_route(request, route)
|
|
330
|
+
return await self._write_request_await_response(request)
|
|
331
|
+
|
|
332
|
+
async def get_pubsub_message(self) -> CoreCommands.PubSubMsg:
|
|
333
|
+
if self._is_closed:
|
|
334
|
+
raise ClosingError(
|
|
335
|
+
"Unable to execute requests; the client is closed. Please create a new client."
|
|
336
|
+
)
|
|
337
|
+
|
|
338
|
+
if not self.config._is_pubsub_configured():
|
|
339
|
+
raise ConfigurationError(
|
|
340
|
+
"The operation will never complete since there was no pubsub subscriptions applied to the client."
|
|
341
|
+
)
|
|
342
|
+
|
|
343
|
+
if self.config._get_pubsub_callback_and_context()[0] is not None:
|
|
344
|
+
raise ConfigurationError(
|
|
345
|
+
"The operation will never complete since messages will be passed to the configured callback."
|
|
346
|
+
)
|
|
347
|
+
|
|
348
|
+
# locking might not be required
|
|
349
|
+
response_future: asyncio.Future = asyncio.Future()
|
|
350
|
+
try:
|
|
351
|
+
self._pubsub_lock.acquire()
|
|
352
|
+
self._pubsub_futures.append(response_future)
|
|
353
|
+
self._complete_pubsub_futures_safe()
|
|
354
|
+
finally:
|
|
355
|
+
self._pubsub_lock.release()
|
|
356
|
+
return await response_future
|
|
357
|
+
|
|
358
|
+
def try_get_pubsub_message(self) -> Optional[CoreCommands.PubSubMsg]:
|
|
359
|
+
if self._is_closed:
|
|
360
|
+
raise ClosingError(
|
|
361
|
+
"Unable to execute requests; the client is closed. Please create a new client."
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
if not self.config._is_pubsub_configured():
|
|
365
|
+
raise ConfigurationError(
|
|
366
|
+
"The operation will never succeed since there was no pubsbub subscriptions applied to the client."
|
|
367
|
+
)
|
|
368
|
+
|
|
369
|
+
if self.config._get_pubsub_callback_and_context()[0] is not None:
|
|
370
|
+
raise ConfigurationError(
|
|
371
|
+
"The operation will never succeed since messages will be passed to the configured callback."
|
|
372
|
+
)
|
|
373
|
+
|
|
374
|
+
# locking might not be required
|
|
375
|
+
msg: Optional[CoreCommands.PubSubMsg] = None
|
|
376
|
+
try:
|
|
377
|
+
self._pubsub_lock.acquire()
|
|
378
|
+
self._complete_pubsub_futures_safe()
|
|
379
|
+
while len(self._pending_push_notifications) and not msg:
|
|
380
|
+
push_notification = self._pending_push_notifications.pop(0)
|
|
381
|
+
msg = self._notification_to_pubsub_message_safe(push_notification)
|
|
382
|
+
finally:
|
|
383
|
+
self._pubsub_lock.release()
|
|
384
|
+
return msg
|
|
385
|
+
|
|
386
|
+
def _cancel_pubsub_futures_with_exception_safe(self, exception: ConnectionError):
|
|
387
|
+
while len(self._pubsub_futures):
|
|
388
|
+
next_future = self._pubsub_futures.pop(0)
|
|
389
|
+
if not next_future.cancelled():
|
|
390
|
+
next_future.set_exception(exception)
|
|
391
|
+
|
|
392
|
+
def _notification_to_pubsub_message_safe(
|
|
393
|
+
self, response: Response
|
|
394
|
+
) -> Optional[CoreCommands.PubSubMsg]:
|
|
395
|
+
pubsub_message = None
|
|
396
|
+
push_notification = cast(
|
|
397
|
+
Dict[str, Any], value_from_pointer(response.resp_pointer)
|
|
398
|
+
)
|
|
399
|
+
message_kind = push_notification["kind"]
|
|
400
|
+
if message_kind == "Disconnection":
|
|
401
|
+
ClientLogger.log(
|
|
402
|
+
LogLevel.WARN,
|
|
403
|
+
"disconnect notification",
|
|
404
|
+
"Transport disconnected, messages might be lost",
|
|
405
|
+
)
|
|
406
|
+
elif (
|
|
407
|
+
message_kind == "Message"
|
|
408
|
+
or message_kind == "PMessage"
|
|
409
|
+
or message_kind == "SMessage"
|
|
410
|
+
):
|
|
411
|
+
values: List = push_notification["values"]
|
|
412
|
+
if message_kind == "PMessage":
|
|
413
|
+
pubsub_message = BaseClient.PubSubMsg(
|
|
414
|
+
message=values[2], channel=values[1], pattern=values[0]
|
|
415
|
+
)
|
|
416
|
+
else:
|
|
417
|
+
pubsub_message = BaseClient.PubSubMsg(
|
|
418
|
+
message=values[1], channel=values[0], pattern=None
|
|
419
|
+
)
|
|
420
|
+
elif (
|
|
421
|
+
message_kind == "PSubscribe"
|
|
422
|
+
or message_kind == "Subscribe"
|
|
423
|
+
or message_kind == "SSubscribe"
|
|
424
|
+
or message_kind == "Unsubscribe"
|
|
425
|
+
or message_kind == "PUnsubscribe"
|
|
426
|
+
or message_kind == "SUnsubscribe"
|
|
427
|
+
):
|
|
428
|
+
pass
|
|
429
|
+
else:
|
|
430
|
+
ClientLogger.log(
|
|
431
|
+
LogLevel.WARN,
|
|
432
|
+
"unknown notification",
|
|
433
|
+
f"Unknown notification message: '{message_kind}'",
|
|
434
|
+
)
|
|
435
|
+
|
|
436
|
+
return pubsub_message
|
|
437
|
+
|
|
438
|
+
def _complete_pubsub_futures_safe(self):
|
|
439
|
+
while len(self._pending_push_notifications) and len(self._pubsub_futures):
|
|
440
|
+
next_push_notification = self._pending_push_notifications.pop(0)
|
|
441
|
+
pubsub_message = self._notification_to_pubsub_message_safe(
|
|
442
|
+
next_push_notification
|
|
443
|
+
)
|
|
444
|
+
if pubsub_message:
|
|
445
|
+
self._pubsub_futures.pop(0).set_result(pubsub_message)
|
|
446
|
+
|
|
447
|
+
async def _write_request_await_response(self, request: CommandRequest):
|
|
448
|
+
# Create a response future for this request and add it to the available
|
|
449
|
+
# futures map
|
|
450
|
+
response_future = self._get_future(request.callback_idx)
|
|
451
|
+
self._create_write_task(request)
|
|
452
|
+
await response_future
|
|
453
|
+
return response_future.result()
|
|
454
|
+
|
|
455
|
+
def _get_callback_index(self) -> int:
|
|
456
|
+
try:
|
|
457
|
+
return self._available_callback_indexes.pop()
|
|
458
|
+
except IndexError:
|
|
459
|
+
# The list is empty
|
|
460
|
+
return len(self._available_futures)
|
|
461
|
+
|
|
462
|
+
async def _process_response(self, response: Response) -> None:
|
|
463
|
+
res_future = self._available_futures.pop(response.callback_idx, None)
|
|
464
|
+
if not res_future or response.HasField("closing_error"):
|
|
465
|
+
err_msg = (
|
|
466
|
+
response.closing_error
|
|
467
|
+
if response.HasField("closing_error")
|
|
468
|
+
else f"Client Error - closing due to unknown error. callback index: {response.callback_idx}"
|
|
469
|
+
)
|
|
470
|
+
if res_future is not None:
|
|
471
|
+
res_future.set_exception(ClosingError(err_msg))
|
|
472
|
+
await self.close(err_msg)
|
|
473
|
+
raise ClosingError(err_msg)
|
|
474
|
+
else:
|
|
475
|
+
self._available_callback_indexes.append(response.callback_idx)
|
|
476
|
+
if response.HasField("request_error"):
|
|
477
|
+
error_type = get_request_error_class(response.request_error.type)
|
|
478
|
+
res_future.set_exception(error_type(response.request_error.message))
|
|
479
|
+
elif response.HasField("resp_pointer"):
|
|
480
|
+
res_future.set_result(value_from_pointer(response.resp_pointer))
|
|
481
|
+
elif response.HasField("constant_response"):
|
|
482
|
+
res_future.set_result(OK)
|
|
483
|
+
else:
|
|
484
|
+
res_future.set_result(None)
|
|
485
|
+
|
|
486
|
+
async def _process_push(self, response: Response) -> None:
|
|
487
|
+
if response.HasField("closing_error") or not response.HasField("resp_pointer"):
|
|
488
|
+
err_msg = (
|
|
489
|
+
response.closing_error
|
|
490
|
+
if response.HasField("closing_error")
|
|
491
|
+
else "Client Error - push notification without resp_pointer"
|
|
492
|
+
)
|
|
493
|
+
await self.close(err_msg)
|
|
494
|
+
raise ClosingError(err_msg)
|
|
495
|
+
|
|
496
|
+
try:
|
|
497
|
+
self._pubsub_lock.acquire()
|
|
498
|
+
callback, context = self.config._get_pubsub_callback_and_context()
|
|
499
|
+
if callback:
|
|
500
|
+
pubsub_message = self._notification_to_pubsub_message_safe(response)
|
|
501
|
+
if pubsub_message:
|
|
502
|
+
callback(pubsub_message, context)
|
|
503
|
+
else:
|
|
504
|
+
self._pending_push_notifications.append(response)
|
|
505
|
+
self._complete_pubsub_futures_safe()
|
|
506
|
+
finally:
|
|
507
|
+
self._pubsub_lock.release()
|
|
508
|
+
|
|
509
|
+
async def _reader_loop(self) -> None:
|
|
510
|
+
# Socket reader loop
|
|
511
|
+
remaining_read_bytes = bytearray()
|
|
512
|
+
while True:
|
|
513
|
+
read_bytes = await self._reader.read(DEFAULT_READ_BYTES_SIZE)
|
|
514
|
+
if len(read_bytes) == 0:
|
|
515
|
+
err_msg = "The communication layer was unexpectedly closed."
|
|
516
|
+
await self.close(err_msg)
|
|
517
|
+
raise ClosingError(err_msg)
|
|
518
|
+
read_bytes = remaining_read_bytes + bytearray(read_bytes)
|
|
519
|
+
read_bytes_view = memoryview(read_bytes)
|
|
520
|
+
offset = 0
|
|
521
|
+
while offset <= len(read_bytes):
|
|
522
|
+
try:
|
|
523
|
+
response, offset = ProtobufCodec.decode_delimited(
|
|
524
|
+
read_bytes, read_bytes_view, offset, Response
|
|
525
|
+
)
|
|
526
|
+
except PartialMessageException:
|
|
527
|
+
# Received only partial response, break the inner loop
|
|
528
|
+
remaining_read_bytes = read_bytes[offset:]
|
|
529
|
+
break
|
|
530
|
+
response = cast(Response, response)
|
|
531
|
+
if response.is_push:
|
|
532
|
+
await self._process_push(response=response)
|
|
533
|
+
else:
|
|
534
|
+
await self._process_response(response=response)
|
|
535
|
+
|
|
536
|
+
async def get_statistics(self) -> dict:
|
|
537
|
+
return get_statistics()
|
|
538
|
+
|
|
539
|
+
async def _update_connection_password(
|
|
540
|
+
self, password: Optional[str], immediate_auth: bool
|
|
541
|
+
) -> TResult:
|
|
542
|
+
request = CommandRequest()
|
|
543
|
+
request.callback_idx = self._get_callback_index()
|
|
544
|
+
if password is not None:
|
|
545
|
+
request.update_connection_password.password = password
|
|
546
|
+
request.update_connection_password.immediate_auth = immediate_auth
|
|
547
|
+
response = await self._write_request_await_response(request)
|
|
548
|
+
# Update the client binding side password if managed to change core configuration password
|
|
549
|
+
if response is OK:
|
|
550
|
+
if self.config.credentials is None:
|
|
551
|
+
self.config.credentials = ServerCredentials(password=password or "")
|
|
552
|
+
self.config.credentials.password = password or ""
|
|
553
|
+
return response
|
|
554
|
+
|
|
555
|
+
|
|
556
|
+
class GlideClusterClient(BaseClient, ClusterCommands):
|
|
557
|
+
"""
|
|
558
|
+
Client used for connection to cluster servers.
|
|
559
|
+
For full documentation, see
|
|
560
|
+
https://github.com/valkey-io/valkey-glide/wiki/Python-wrapper#cluster
|
|
561
|
+
"""
|
|
562
|
+
|
|
563
|
+
async def _cluster_scan(
|
|
564
|
+
self,
|
|
565
|
+
cursor: ClusterScanCursor,
|
|
566
|
+
match: Optional[TEncodable] = None,
|
|
567
|
+
count: Optional[int] = None,
|
|
568
|
+
type: Optional[ObjectType] = None,
|
|
569
|
+
allow_non_covered_slots: bool = False,
|
|
570
|
+
) -> List[Union[ClusterScanCursor, List[bytes]]]:
|
|
571
|
+
if self._is_closed:
|
|
572
|
+
raise ClosingError(
|
|
573
|
+
"Unable to execute requests; the client is closed. Please create a new client."
|
|
574
|
+
)
|
|
575
|
+
request = CommandRequest()
|
|
576
|
+
request.callback_idx = self._get_callback_index()
|
|
577
|
+
# Take out the id string from the wrapping object
|
|
578
|
+
cursor_string = cursor.get_cursor()
|
|
579
|
+
request.cluster_scan.cursor = cursor_string
|
|
580
|
+
request.cluster_scan.allow_non_covered_slots = allow_non_covered_slots
|
|
581
|
+
if match is not None:
|
|
582
|
+
request.cluster_scan.match_pattern = (
|
|
583
|
+
self._encode_arg(match) if isinstance(match, str) else match
|
|
584
|
+
)
|
|
585
|
+
if count is not None:
|
|
586
|
+
request.cluster_scan.count = count
|
|
587
|
+
if type is not None:
|
|
588
|
+
request.cluster_scan.object_type = type.value
|
|
589
|
+
response = await self._write_request_await_response(request)
|
|
590
|
+
return [ClusterScanCursor(bytes(response[0]).decode()), response[1]]
|
|
591
|
+
|
|
592
|
+
def _get_protobuf_conn_request(self) -> ConnectionRequest:
|
|
593
|
+
return self.config._create_a_protobuf_conn_request(cluster_mode=True)
|
|
594
|
+
|
|
595
|
+
|
|
596
|
+
class GlideClient(BaseClient, StandaloneCommands):
|
|
597
|
+
"""
|
|
598
|
+
Client used for connection to standalone servers.
|
|
599
|
+
For full documentation, see
|
|
600
|
+
https://github.com/valkey-io/valkey-glide/wiki/Python-wrapper#standalone
|
|
601
|
+
"""
|
|
602
|
+
|
|
603
|
+
|
|
604
|
+
TGlideClient = Union[GlideClient, GlideClusterClient]
|
glide/logger.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from enum import Enum
|
|
6
|
+
from typing import Optional
|
|
7
|
+
|
|
8
|
+
from .glide import Level as internalLevel
|
|
9
|
+
from .glide import py_init, py_log
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class Level(Enum):
|
|
13
|
+
ERROR = internalLevel.Error
|
|
14
|
+
WARN = internalLevel.Warn
|
|
15
|
+
INFO = internalLevel.Info
|
|
16
|
+
DEBUG = internalLevel.Debug
|
|
17
|
+
TRACE = internalLevel.Trace
|
|
18
|
+
OFF = internalLevel.Off
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class Logger:
|
|
22
|
+
"""
|
|
23
|
+
A singleton class that allows logging which is consistent with logs from the internal rust core.
|
|
24
|
+
The logger can be set up in 2 ways -
|
|
25
|
+
1. By calling Logger.init, which configures the logger only if it wasn't previously configured.
|
|
26
|
+
2. By calling Logger.set_logger_config, which replaces the existing configuration, and means that new logs will not be
|
|
27
|
+
saved with the logs that were sent before the call.
|
|
28
|
+
If set_logger_config wasn't called, the first log attempt will initialize a new logger with default configuration decided
|
|
29
|
+
by the Rust core.
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
_instance = None
|
|
33
|
+
logger_level: internalLevel
|
|
34
|
+
|
|
35
|
+
def __init__(self, level: Optional[Level] = None, file_name: Optional[str] = None):
|
|
36
|
+
level_value = level.value if level else None
|
|
37
|
+
Logger.logger_level = py_init(level_value, file_name)
|
|
38
|
+
|
|
39
|
+
@classmethod
|
|
40
|
+
def init(cls, level: Optional[Level] = None, file_name: Optional[str] = None):
|
|
41
|
+
"""_summary_
|
|
42
|
+
Initialize a logger if it wasn't initialized before - this method is meant to be used when there is no intention to
|
|
43
|
+
replace an existing logger.
|
|
44
|
+
The logger will filter all logs with a level lower than the given level,
|
|
45
|
+
If given a fileName argument, will write the logs to files postfixed with fileName. If fileName isn't provided,
|
|
46
|
+
the logs will be written to the console.
|
|
47
|
+
Args:
|
|
48
|
+
level (Optional[Level]): Set the logger level to one of [ERROR, WARN, INFO, DEBUG, TRACE, OFF].
|
|
49
|
+
If log level isn't provided, the logger will be configured with default configuration decided by the Rust core.
|
|
50
|
+
file_name (Optional[str]): If provided the target of the logs will be the file mentioned.
|
|
51
|
+
Otherwise, logs will be printed to the console.
|
|
52
|
+
To turn off logging completely, set the level to Level.OFF.
|
|
53
|
+
"""
|
|
54
|
+
if cls._instance is None:
|
|
55
|
+
cls._instance = cls(level, file_name)
|
|
56
|
+
|
|
57
|
+
@classmethod
|
|
58
|
+
def log(cls, log_level: Level, log_identifier: str, message: str):
|
|
59
|
+
"""Logs the provided message if the provided log level is lower then the logger level.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
log_level (Level): The log level of the provided message
|
|
63
|
+
log_identifier (str): The log identifier should give the log a context.
|
|
64
|
+
message (str): The message to log.
|
|
65
|
+
"""
|
|
66
|
+
if not cls._instance:
|
|
67
|
+
cls._instance = cls(None)
|
|
68
|
+
if not log_level.value.is_lower(Logger.logger_level):
|
|
69
|
+
return
|
|
70
|
+
py_log(log_level.value, log_identifier, message)
|
|
71
|
+
|
|
72
|
+
@classmethod
|
|
73
|
+
def set_logger_config(
|
|
74
|
+
cls, level: Optional[Level] = None, file_name: Optional[str] = None
|
|
75
|
+
):
|
|
76
|
+
"""Creates a new logger instance and configure it with the provided log level and file name.
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
level (Optional[Level]): Set the logger level to one of [ERROR, WARN, INFO, DEBUG, TRACE, OFF].
|
|
80
|
+
If log level isn't provided, the logger will be configured with default configuration decided by the Rust core.
|
|
81
|
+
file_name (Optional[str]): If provided the target of the logs will be the file mentioned.
|
|
82
|
+
Otherwise, logs will be printed to the console.
|
|
83
|
+
To turn off logging completely, set the level to OFF.
|
|
84
|
+
"""
|
|
85
|
+
Logger._instance = Logger(level, file_name)
|