valkey-glide 1.3.4rc1__cp313-cp313-macosx_11_0_arm64.whl → 2.0.0rc6__cp313-cp313-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of valkey-glide might be problematic. Click here for more details.

Files changed (32) hide show
  1. glide/__init__.py +11 -7
  2. glide/async_commands/{transaction.py → batch.py} +1413 -987
  3. glide/async_commands/bitmap.py +94 -85
  4. glide/async_commands/cluster_commands.py +308 -123
  5. glide/async_commands/command_args.py +7 -6
  6. glide/async_commands/core.py +1304 -714
  7. glide/async_commands/server_modules/ft.py +83 -14
  8. glide/async_commands/server_modules/ft_options/ft_aggregate_options.py +15 -8
  9. glide/async_commands/server_modules/ft_options/ft_create_options.py +23 -11
  10. glide/async_commands/server_modules/ft_options/ft_profile_options.py +12 -7
  11. glide/async_commands/server_modules/ft_options/ft_search_options.py +12 -6
  12. glide/async_commands/server_modules/glide_json.py +134 -43
  13. glide/async_commands/server_modules/json_batch.py +157 -127
  14. glide/async_commands/sorted_set.py +39 -29
  15. glide/async_commands/standalone_commands.py +199 -95
  16. glide/async_commands/stream.py +94 -87
  17. glide/config.py +165 -105
  18. glide/constants.py +8 -4
  19. glide/glide.cpython-313-darwin.so +0 -0
  20. glide/glide_client.py +273 -94
  21. glide/logger.py +1 -1
  22. glide/protobuf/command_request_pb2.py +15 -15
  23. glide/protobuf/command_request_pb2.pyi +69 -46
  24. glide/protobuf/connection_request_pb2.py +15 -13
  25. glide/protobuf/connection_request_pb2.pyi +57 -29
  26. glide/protobuf/response_pb2.pyi +8 -9
  27. glide/protobuf_codec.py +7 -6
  28. glide/routes.py +41 -8
  29. {valkey_glide-1.3.4rc1.dist-info → valkey_glide-2.0.0rc6.dist-info}/METADATA +29 -8
  30. valkey_glide-2.0.0rc6.dist-info/RECORD +37 -0
  31. valkey_glide-1.3.4rc1.dist-info/RECORD +0 -37
  32. {valkey_glide-1.3.4rc1.dist-info → valkey_glide-2.0.0rc6.dist-info}/WHEEL +0 -0
glide/glide_client.py CHANGED
@@ -1,9 +1,24 @@
1
1
  # Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0
2
2
 
3
- import asyncio
4
3
  import sys
5
4
  import threading
6
- from typing import Any, Dict, List, Optional, Tuple, Type, Union, cast
5
+ from typing import (
6
+ TYPE_CHECKING,
7
+ Any,
8
+ Awaitable,
9
+ Dict,
10
+ List,
11
+ Optional,
12
+ Set,
13
+ Tuple,
14
+ Type,
15
+ Union,
16
+ cast,
17
+ )
18
+
19
+ import anyio
20
+ import sniffio
21
+ from anyio import to_thread
7
22
 
8
23
  from glide.async_commands.cluster_commands import ClusterCommands
9
24
  from glide.async_commands.command_args import ObjectType
@@ -38,12 +53,18 @@ from .glide import (
38
53
  )
39
54
 
40
55
  if sys.version_info >= (3, 11):
41
- import asyncio as async_timeout
42
56
  from typing import Self
43
57
  else:
44
- import async_timeout
45
58
  from typing_extensions import Self
46
59
 
60
+ if TYPE_CHECKING:
61
+ import asyncio
62
+
63
+ import trio
64
+
65
+ TTask = Union[asyncio.Task[None], trio.lowlevel.Task]
66
+ TFuture = Union[asyncio.Future[Any], "_CompatFuture"]
67
+
47
68
 
48
69
  def get_request_error_class(
49
70
  error_type: Optional[RequestErrorType.ValueType],
@@ -59,38 +80,168 @@ def get_request_error_class(
59
80
  return RequestError
60
81
 
61
82
 
83
+ class _CompatFuture:
84
+ """anyio shim for asyncio.Future-like functionality"""
85
+
86
+ def __init__(self) -> None:
87
+ self._is_done = anyio.Event()
88
+ self._result: Any = None
89
+ self._exception: Optional[Exception] = None
90
+
91
+ def set_result(self, result: Any) -> None:
92
+ self._result = result
93
+ self._is_done.set()
94
+
95
+ def set_exception(self, exception: Exception) -> None:
96
+ self._exception = exception
97
+ self._is_done.set()
98
+
99
+ def done(self) -> bool:
100
+ return self._is_done.is_set()
101
+
102
+ def __await__(self):
103
+ return self._is_done.wait().__await__()
104
+
105
+ def result(self) -> Any:
106
+ if self._exception:
107
+ raise self._exception
108
+
109
+ return self._result
110
+
111
+
112
+ def _get_new_future_instance() -> "TFuture":
113
+ if sniffio.current_async_library() == "asyncio":
114
+ import asyncio
115
+
116
+ return asyncio.get_running_loop().create_future()
117
+
118
+ # _CompatFuture is also compatible with asyncio, but is not as closely integrated
119
+ # into the asyncio event loop and thus introduces a noticeable performance
120
+ # degradation. so we only use it for trio
121
+ return _CompatFuture()
122
+
123
+
62
124
  class BaseClient(CoreCommands):
63
125
  def __init__(self, config: BaseClientConfiguration):
64
126
  """
65
127
  To create a new client, use the `create` classmethod
66
128
  """
67
129
  self.config: BaseClientConfiguration = config
68
- self._available_futures: Dict[int, asyncio.Future] = {}
130
+ self._available_futures: Dict[int, "TFuture"] = {}
69
131
  self._available_callback_indexes: List[int] = list()
70
132
  self._buffered_requests: List[TRequest] = list()
71
133
  self._writer_lock = threading.Lock()
72
134
  self.socket_path: Optional[str] = None
73
- self._reader_task: Optional[asyncio.Task] = None
135
+ self._reader_task: Optional["TTask"] = None
74
136
  self._is_closed: bool = False
75
- self._pubsub_futures: List[asyncio.Future] = []
137
+ self._pubsub_futures: List["TFuture"] = []
76
138
  self._pubsub_lock = threading.Lock()
77
139
  self._pending_push_notifications: List[Response] = list()
78
140
 
141
+ self._pending_tasks: Optional[Set[Awaitable[None]]] = None
142
+ """asyncio-only to avoid gc on pending write tasks"""
143
+
144
+ def _create_task(self, task, *args, **kwargs):
145
+ """framework agnostic free-floating task shim"""
146
+ framework = sniffio.current_async_library()
147
+ if framework == "trio":
148
+ from functools import partial
149
+
150
+ import trio
151
+
152
+ return trio.lowlevel.spawn_system_task(partial(task, **kwargs), *args)
153
+ elif framework == "asyncio":
154
+ import asyncio
155
+
156
+ # the asyncio event loop holds weak refs to tasks, so it's recommended to
157
+ # hold strong refs to them during their lifetime to prevent garbage
158
+ # collection
159
+ t = asyncio.create_task(task(*args, **kwargs))
160
+
161
+ if self._pending_tasks is None:
162
+ self._pending_tasks = set()
163
+
164
+ self._pending_tasks.add(t)
165
+ t.add_done_callback(self._pending_tasks.discard)
166
+
167
+ return t
168
+
169
+ raise RuntimeError(f"Unsupported async framework {framework}")
170
+
79
171
  @classmethod
80
172
  async def create(cls, config: BaseClientConfiguration) -> Self:
81
173
  """Creates a Glide client.
82
174
 
83
175
  Args:
84
- config (ClientConfiguration): The client configurations.
85
- If no configuration is provided, a default client to "localhost":6379 will be created.
176
+ config (ClientConfiguration): The configuration options for the client, including cluster addresses,
177
+ authentication credentials, TLS settings, periodic checks, and Pub/Sub subscriptions.
86
178
 
87
179
  Returns:
88
- Self: a Glide Client instance.
180
+ Self: A promise that resolves to a connected client instance.
181
+
182
+ Examples:
183
+ # Connecting to a Standalone Server
184
+ >>> from glide import GlideClientConfiguration, NodeAddress, GlideClient, ServerCredentials, BackoffStrategy
185
+ >>> config = GlideClientConfiguration(
186
+ ... [
187
+ ... NodeAddress('primary.example.com', 6379),
188
+ ... NodeAddress('replica1.example.com', 6379),
189
+ ... ],
190
+ ... use_tls = True,
191
+ ... database_id = 1,
192
+ ... credentials = ServerCredentials(username = 'user1', password = 'passwordA'),
193
+ ... reconnect_strategy = BackoffStrategy(num_of_retries = 5, factor = 1000, exponent_base = 2),
194
+ ... pubsub_subscriptions = GlideClientConfiguration.PubSubSubscriptions(
195
+ ... channels_and_patterns = {GlideClientConfiguration.PubSubChannelModes.Exact: {'updates'}},
196
+ ... callback = lambda message,context : print(message),
197
+ ... ),
198
+ ... )
199
+ >>> client = await GlideClient.create(config)
200
+
201
+ # Connecting to a Cluster
202
+ >>> from glide import GlideClusterClientConfiguration, NodeAddress, GlideClusterClient,
203
+ ... PeriodicChecksManualInterval
204
+ >>> config = GlideClusterClientConfiguration(
205
+ ... [
206
+ ... NodeAddress('address1.example.com', 6379),
207
+ ... NodeAddress('address2.example.com', 6379),
208
+ ... ],
209
+ ... use_tls = True,
210
+ ... periodic_checks = PeriodicChecksManualInterval(duration_in_sec = 30),
211
+ ... credentials = ServerCredentials(username = 'user1', password = 'passwordA'),
212
+ ... reconnect_strategy = BackoffStrategy(num_of_retries = 5, factor = 1000, exponent_base = 2),
213
+ ... pubsub_subscriptions = GlideClusterClientConfiguration.PubSubSubscriptions(
214
+ ... channels_and_patterns = {
215
+ ... GlideClusterClientConfiguration.PubSubChannelModes.Exact: {'updates'},
216
+ ... GlideClusterClientConfiguration.PubSubChannelModes.Sharded: {'sharded_channel'},
217
+ ... },
218
+ ... callback = lambda message,context : print(message),
219
+ ... ),
220
+ ... )
221
+ >>> client = await GlideClusterClient.create(config)
222
+
223
+ Remarks:
224
+ Use this static method to create and connect a client to a Valkey server.
225
+ The client will automatically handle connection establishment, including cluster topology discovery and
226
+ handling of authentication and TLS configurations.
227
+
228
+ - **Cluster Topology Discovery**: The client will automatically discover the cluster topology based
229
+ on the seed addresses provided.
230
+ - **Authentication**: If `ServerCredentials` are provided, the client will attempt to authenticate
231
+ using the specified username and password.
232
+ - **TLS**: If `use_tls` is set to `true`, the client will establish secure connections using TLS.
233
+ - **Periodic Checks**: The `periodic_checks` setting allows you to configure how often the client
234
+ checks for cluster topology changes.
235
+ - **Reconnection Strategy**: The `BackoffStrategy` settings define how the client will attempt to
236
+ reconnect in case of disconnections.
237
+ - **Pub/Sub Subscriptions**: Any channels or patterns specified in `PubSubSubscriptions` will be
238
+ subscribed to upon connection.
239
+
89
240
  """
90
241
  config = config
91
242
  self = cls(config)
92
- init_future: asyncio.Future = asyncio.Future()
93
- loop = asyncio.get_event_loop()
243
+
244
+ init_event: threading.Event = threading.Event()
94
245
 
95
246
  def init_callback(socket_path: Optional[str], err: Optional[str]):
96
247
  if err is not None:
@@ -102,7 +253,7 @@ class BaseClient(CoreCommands):
102
253
  else:
103
254
  # Received socket path
104
255
  self.socket_path = socket_path
105
- loop.call_soon_threadsafe(init_future.set_result, True)
256
+ init_event.set()
106
257
 
107
258
  start_socket_listener_external(init_callback=init_callback)
108
259
 
@@ -110,36 +261,27 @@ class BaseClient(CoreCommands):
110
261
  # level or higher
111
262
  ClientLogger.log(LogLevel.INFO, "connection info", "new connection established")
112
263
  # Wait for the socket listener to complete its initialization
113
- await init_future
264
+ await to_thread.run_sync(init_event.wait)
114
265
  # Create UDS connection
115
266
  await self._create_uds_connection()
267
+
116
268
  # Start the reader loop as a background task
117
- self._reader_task = asyncio.create_task(self._reader_loop())
269
+ self._reader_task = self._create_task(self._reader_loop)
270
+
118
271
  # Set the client configurations
119
272
  await self._set_connection_configurations()
273
+
120
274
  return self
121
275
 
122
276
  async def _create_uds_connection(self) -> None:
123
277
  try:
124
278
  # Open an UDS connection
125
- async with async_timeout.timeout(DEFAULT_TIMEOUT_IN_MILLISECONDS):
126
- reader, writer = await asyncio.open_unix_connection(
127
- path=self.socket_path
279
+ with anyio.fail_after(DEFAULT_TIMEOUT_IN_MILLISECONDS):
280
+ self._stream = await anyio.connect_unix(
281
+ path=cast(str, self.socket_path)
128
282
  )
129
- self._reader = reader
130
- self._writer = writer
131
283
  except Exception as e:
132
- await self.close(f"Failed to create UDS connection: {e}")
133
- raise
134
-
135
- def __del__(self) -> None:
136
- try:
137
- if self._reader_task:
138
- self._reader_task.cancel()
139
- except RuntimeError as e:
140
- if "no running event loop" in str(e):
141
- # event loop already closed
142
- pass
284
+ raise ClosingError("Failed to create UDS connection") from e
143
285
 
144
286
  async def close(self, err_message: Optional[str] = None) -> None:
145
287
  """
@@ -147,28 +289,28 @@ class BaseClient(CoreCommands):
147
289
  All open futures will be closed with an exception.
148
290
 
149
291
  Args:
150
- err_message (Optional[str]): If not None, this error message will be passed along with the exceptions when closing all open futures.
292
+ err_message (Optional[str]): If not None, this error message will be passed along with the exceptions when
293
+ closing all open futures.
151
294
  Defaults to None.
152
295
  """
153
- self._is_closed = True
154
- for response_future in self._available_futures.values():
155
- if not response_future.done():
156
- err_message = "" if err_message is None else err_message
157
- response_future.set_exception(ClosingError(err_message))
158
- try:
159
- self._pubsub_lock.acquire()
160
- for pubsub_future in self._pubsub_futures:
161
- if not pubsub_future.done() and not pubsub_future.cancelled():
162
- pubsub_future.set_exception(ClosingError(""))
163
- finally:
164
- self._pubsub_lock.release()
296
+ if not self._is_closed:
297
+ self._is_closed = True
298
+ err_message = "" if err_message is None else err_message
299
+ for response_future in self._available_futures.values():
300
+ if not response_future.done():
301
+ response_future.set_exception(ClosingError(err_message))
302
+ try:
303
+ self._pubsub_lock.acquire()
304
+ for pubsub_future in self._pubsub_futures:
305
+ if not pubsub_future.done():
306
+ pubsub_future.set_exception(ClosingError(err_message))
307
+ finally:
308
+ self._pubsub_lock.release()
165
309
 
166
- self._writer.close()
167
- await self._writer.wait_closed()
168
- self.__del__()
310
+ await self._stream.aclose()
169
311
 
170
- def _get_future(self, callback_idx: int) -> asyncio.Future:
171
- response_future: asyncio.Future = asyncio.Future()
312
+ def _get_future(self, callback_idx: int) -> "TFuture":
313
+ response_future: "TFuture" = _get_new_future_instance()
172
314
  self._available_futures.update({callback_idx: response_future})
173
315
  return response_future
174
316
 
@@ -177,14 +319,15 @@ class BaseClient(CoreCommands):
177
319
 
178
320
  async def _set_connection_configurations(self) -> None:
179
321
  conn_request = self._get_protobuf_conn_request()
180
- response_future: asyncio.Future = self._get_future(0)
181
- await self._write_or_buffer_request(conn_request)
322
+ response_future: "TFuture" = self._get_future(0)
323
+ self._create_write_task(conn_request)
182
324
  await response_future
183
- if response_future.result() is not OK:
184
- raise ClosingError(response_future.result())
325
+ res = response_future.result()
326
+ if res is not OK:
327
+ raise ClosingError(res)
185
328
 
186
329
  def _create_write_task(self, request: TRequest):
187
- asyncio.create_task(self._write_or_buffer_request(request))
330
+ self._create_task(self._write_or_buffer_request, request)
188
331
 
189
332
  async def _write_or_buffer_request(self, request: TRequest):
190
333
  self._buffered_requests.append(request)
@@ -192,7 +335,21 @@ class BaseClient(CoreCommands):
192
335
  try:
193
336
  while len(self._buffered_requests) > 0:
194
337
  await self._write_buffered_requests_to_socket()
195
-
338
+ except Exception as e:
339
+ # trio system tasks cannot raise exceptions, so gracefully propagate
340
+ # any error to the pending future instead
341
+ callback_idx = (
342
+ request.callback_idx if isinstance(request, CommandRequest) else 0
343
+ )
344
+ res_future = self._available_futures.pop(callback_idx, None)
345
+ if res_future:
346
+ res_future.set_exception(e)
347
+ else:
348
+ ClientLogger.log(
349
+ LogLevel.WARN,
350
+ "unhandled response error",
351
+ f"Unhandled response error for unknown request: {callback_idx}",
352
+ )
196
353
  finally:
197
354
  self._writer_lock.release()
198
355
 
@@ -202,8 +359,7 @@ class BaseClient(CoreCommands):
202
359
  b_arr = bytearray()
203
360
  for request in requests:
204
361
  ProtobufCodec.encode_delimited(b_arr, request)
205
- self._writer.write(b_arr)
206
- await self._writer.drain()
362
+ await self._stream.send(b_arr)
207
363
 
208
364
  def _encode_arg(self, arg: TEncodable) -> bytes:
209
365
  """
@@ -271,10 +427,15 @@ class BaseClient(CoreCommands):
271
427
  set_protobuf_route(request, route)
272
428
  return await self._write_request_await_response(request)
273
429
 
274
- async def _execute_transaction(
430
+ async def _execute_batch(
275
431
  self,
276
432
  commands: List[Tuple[RequestType.ValueType, List[TEncodable]]],
433
+ is_atomic: bool,
434
+ raise_on_error: bool = False,
435
+ retry_server_error: bool = False,
436
+ retry_connection_error: bool = False,
277
437
  route: Optional[Route] = None,
438
+ timeout: Optional[int] = None,
278
439
  ) -> List[TResult]:
279
440
  if self._is_closed:
280
441
  raise ClosingError(
@@ -282,7 +443,7 @@ class BaseClient(CoreCommands):
282
443
  )
283
444
  request = CommandRequest()
284
445
  request.callback_idx = self._get_callback_index()
285
- transaction_commands = []
446
+ batch_commands = []
286
447
  for requst_type, args in commands:
287
448
  command = Command()
288
449
  command.request_type = requst_type
@@ -293,8 +454,14 @@ class BaseClient(CoreCommands):
293
454
  command.args_array.args[:] = encoded_args
294
455
  else:
295
456
  command.args_vec_pointer = create_leaked_bytes_vec(encoded_args)
296
- transaction_commands.append(command)
297
- request.transaction.commands.extend(transaction_commands)
457
+ batch_commands.append(command)
458
+ request.batch.commands.extend(batch_commands)
459
+ request.batch.is_atomic = is_atomic
460
+ request.batch.raise_on_error = raise_on_error
461
+ if timeout is not None:
462
+ request.batch.timeout = timeout
463
+ request.batch.retry_server_error = retry_server_error
464
+ request.batch.retry_connection_error = retry_connection_error
298
465
  set_protobuf_route(request, route)
299
466
  return await self._write_request_await_response(request)
300
467
 
@@ -346,14 +513,15 @@ class BaseClient(CoreCommands):
346
513
  )
347
514
 
348
515
  # locking might not be required
349
- response_future: asyncio.Future = asyncio.Future()
516
+ response_future: "TFuture" = _get_new_future_instance()
350
517
  try:
351
518
  self._pubsub_lock.acquire()
352
519
  self._pubsub_futures.append(response_future)
353
520
  self._complete_pubsub_futures_safe()
354
521
  finally:
355
522
  self._pubsub_lock.release()
356
- return await response_future
523
+ await response_future
524
+ return response_future.result()
357
525
 
358
526
  def try_get_pubsub_message(self) -> Optional[CoreCommands.PubSubMsg]:
359
527
  if self._is_closed:
@@ -386,8 +554,7 @@ class BaseClient(CoreCommands):
386
554
  def _cancel_pubsub_futures_with_exception_safe(self, exception: ConnectionError):
387
555
  while len(self._pubsub_futures):
388
556
  next_future = self._pubsub_futures.pop(0)
389
- if not next_future.cancelled():
390
- next_future.set_exception(exception)
557
+ next_future.set_exception(exception)
391
558
 
392
559
  def _notification_to_pubsub_message_safe(
393
560
  self, response: Response
@@ -467,10 +634,16 @@ class BaseClient(CoreCommands):
467
634
  if response.HasField("closing_error")
468
635
  else f"Client Error - closing due to unknown error. callback index: {response.callback_idx}"
469
636
  )
637
+ exc = ClosingError(err_msg)
470
638
  if res_future is not None:
471
- res_future.set_exception(ClosingError(err_msg))
472
- await self.close(err_msg)
473
- raise ClosingError(err_msg)
639
+ res_future.set_exception(exc)
640
+ else:
641
+ ClientLogger.log(
642
+ LogLevel.WARN,
643
+ "unhandled response error",
644
+ f"Unhandled response error for unknown request: {response.callback_idx}",
645
+ )
646
+ raise exc
474
647
  else:
475
648
  self._available_callback_indexes.append(response.callback_idx)
476
649
  if response.HasField("request_error"):
@@ -490,9 +663,7 @@ class BaseClient(CoreCommands):
490
663
  if response.HasField("closing_error")
491
664
  else "Client Error - push notification without resp_pointer"
492
665
  )
493
- await self.close(err_msg)
494
666
  raise ClosingError(err_msg)
495
-
496
667
  try:
497
668
  self._pubsub_lock.acquire()
498
669
  callback, context = self.config._get_pubsub_callback_and_context()
@@ -508,30 +679,36 @@ class BaseClient(CoreCommands):
508
679
 
509
680
  async def _reader_loop(self) -> None:
510
681
  # Socket reader loop
511
- remaining_read_bytes = bytearray()
512
- while True:
513
- read_bytes = await self._reader.read(DEFAULT_READ_BYTES_SIZE)
514
- if len(read_bytes) == 0:
515
- err_msg = "The communication layer was unexpectedly closed."
516
- await self.close(err_msg)
517
- raise ClosingError(err_msg)
518
- read_bytes = remaining_read_bytes + bytearray(read_bytes)
519
- read_bytes_view = memoryview(read_bytes)
520
- offset = 0
521
- while offset <= len(read_bytes):
682
+ try:
683
+ remaining_read_bytes = bytearray()
684
+ while True:
522
685
  try:
523
- response, offset = ProtobufCodec.decode_delimited(
524
- read_bytes, read_bytes_view, offset, Response
686
+ read_bytes = await self._stream.receive(DEFAULT_READ_BYTES_SIZE)
687
+ except (anyio.ClosedResourceError, anyio.EndOfStream):
688
+ raise ClosingError(
689
+ "The communication layer was unexpectedly closed."
525
690
  )
526
- except PartialMessageException:
527
- # Received only partial response, break the inner loop
528
- remaining_read_bytes = read_bytes[offset:]
529
- break
530
- response = cast(Response, response)
531
- if response.is_push:
532
- await self._process_push(response=response)
533
- else:
534
- await self._process_response(response=response)
691
+ read_bytes = remaining_read_bytes + bytearray(read_bytes)
692
+ read_bytes_view = memoryview(read_bytes)
693
+ offset = 0
694
+ while offset <= len(read_bytes):
695
+ try:
696
+ response, offset = ProtobufCodec.decode_delimited(
697
+ read_bytes, read_bytes_view, offset, Response
698
+ )
699
+ except PartialMessageException:
700
+ # Received only partial response, break the inner loop
701
+ remaining_read_bytes = read_bytes[offset:]
702
+ break
703
+ response = cast(Response, response)
704
+ if response.is_push:
705
+ await self._process_push(response=response)
706
+ else:
707
+ await self._process_response(response=response)
708
+ except Exception as e:
709
+ # close and stop reading at terminal exceptions from incoming responses or
710
+ # stream closures
711
+ await self.close(str(e))
535
712
 
536
713
  async def get_statistics(self) -> dict:
537
714
  return get_statistics()
@@ -556,8 +733,9 @@ class BaseClient(CoreCommands):
556
733
  class GlideClusterClient(BaseClient, ClusterCommands):
557
734
  """
558
735
  Client used for connection to cluster servers.
736
+ Use :func:`~BaseClient.create` to request a client.
559
737
  For full documentation, see
560
- https://github.com/valkey-io/valkey-glide/wiki/Python-wrapper#cluster
738
+ [Valkey GLIDE Wiki](https://github.com/valkey-io/valkey-glide/wiki/Python-wrapper#cluster)
561
739
  """
562
740
 
563
741
  async def _cluster_scan(
@@ -596,8 +774,9 @@ class GlideClusterClient(BaseClient, ClusterCommands):
596
774
  class GlideClient(BaseClient, StandaloneCommands):
597
775
  """
598
776
  Client used for connection to standalone servers.
777
+ Use :func:`~BaseClient.create` to request a client.
599
778
  For full documentation, see
600
- https://github.com/valkey-io/valkey-glide/wiki/Python-wrapper#standalone
779
+ [Valkey GLIDE Wiki](https://github.com/valkey-io/valkey-glide/wiki/Python-wrapper#standalone)
601
780
  """
602
781
 
603
782
 
glide/logger.py CHANGED
@@ -24,7 +24,7 @@ class Logger:
24
24
  The logger can be set up in 2 ways -
25
25
  1. By calling Logger.init, which configures the logger only if it wasn't previously configured.
26
26
  2. By calling Logger.set_logger_config, which replaces the existing configuration, and means that new logs will not be
27
- saved with the logs that were sent before the call.
27
+ saved with the logs that were sent before the call.
28
28
  If set_logger_config wasn't called, the first log attempt will initialize a new logger with default configuration decided
29
29
  by the Rust core.
30
30
  """