valkey-glide 2.0.0rc5__pp311-pypy311_pp73-macosx_11_0_arm64.whl → 2.0.1__pp311-pypy311_pp73-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of valkey-glide might be problematic. Click here for more details.

glide/__init__.py CHANGED
@@ -7,6 +7,11 @@ from glide.async_commands.batch import (
7
7
  TBatch,
8
8
  Transaction,
9
9
  )
10
+ from glide.async_commands.batch_options import (
11
+ BatchOptions,
12
+ BatchRetryStrategy,
13
+ ClusterBatchOptions,
14
+ )
10
15
  from glide.async_commands.bitmap import (
11
16
  BitEncoding,
12
17
  BitFieldGet,
@@ -125,6 +130,7 @@ from glide.config import (
125
130
  ProtocolVersion,
126
131
  ReadFrom,
127
132
  ServerCredentials,
133
+ TlsAdvancedConfiguration,
128
134
  )
129
135
  from glide.constants import (
130
136
  OK,
@@ -168,7 +174,13 @@ from glide.routes import (
168
174
  SlotType,
169
175
  )
170
176
 
171
- from .glide import ClusterScanCursor, Script
177
+ from .glide import (
178
+ ClusterScanCursor,
179
+ OpenTelemetryConfig,
180
+ OpenTelemetryMetricsConfig,
181
+ OpenTelemetryTracesConfig,
182
+ Script,
183
+ )
172
184
 
173
185
  PubSubMsg = CoreCommands.PubSubMsg
174
186
 
@@ -182,6 +194,10 @@ __all__ = [
182
194
  "Transaction",
183
195
  "TGlideClient",
184
196
  "TBatch",
197
+ # Batch Options
198
+ "BatchOptions",
199
+ "BatchRetryStrategy",
200
+ "ClusterBatchOptions",
185
201
  # Config
186
202
  "AdvancedGlideClientConfiguration",
187
203
  "AdvancedGlideClusterClientConfiguration",
@@ -191,6 +207,9 @@ __all__ = [
191
207
  "ReadFrom",
192
208
  "ServerCredentials",
193
209
  "NodeAddress",
210
+ "OpenTelemetryConfig",
211
+ "OpenTelemetryTracesConfig",
212
+ "OpenTelemetryMetricsConfig",
194
213
  "ProtocolVersion",
195
214
  "PeriodicChecksManualInterval",
196
215
  "PeriodicChecksStatus",
@@ -205,6 +224,7 @@ __all__ = [
205
224
  "TJsonUniversalResponse",
206
225
  "TOK",
207
226
  "TResult",
227
+ "TlsAdvancedConfiguration",
208
228
  "TXInfoStreamFullResponse",
209
229
  "TXInfoStreamResponse",
210
230
  "FtAggregateResponse",
@@ -1,9 +1,9 @@
1
1
  # Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0
2
2
 
3
+ import sys
3
4
  import threading
4
5
  from typing import List, Mapping, Optional, Tuple, TypeVar, Union
5
6
 
6
- from deprecated import deprecated
7
7
  from glide.async_commands.bitmap import (
8
8
  BitFieldGet,
9
9
  BitFieldSubCommands,
@@ -55,8 +55,14 @@ from glide.async_commands.stream import (
55
55
  _create_xpending_range_args,
56
56
  )
57
57
  from glide.constants import TEncodable
58
+ from glide.exceptions import RequestError
58
59
  from glide.protobuf.command_request_pb2 import RequestType
59
60
 
61
+ if sys.version_info >= (3, 13):
62
+ from warnings import deprecated
63
+ else:
64
+ from typing_extensions import deprecated
65
+
60
66
  TBatch = TypeVar("TBatch", bound="BaseBatch")
61
67
 
62
68
 
@@ -109,7 +115,7 @@ class BaseBatch:
109
115
 
110
116
  def get(self: TBatch, key: TEncodable) -> TBatch:
111
117
  """
112
- Get the value associated with the given key, or null if no such value exists.
118
+ Get the value associated with the given key, or null if no such key exists.
113
119
 
114
120
  See [valkey.io](https://valkey.io/commands/get/) for details.
115
121
 
@@ -273,6 +279,10 @@ class BaseBatch:
273
279
  [custom command](https://github.com/valkey-io/valkey-glide/wiki/General-Concepts#custom-command)
274
280
  for details on the restrictions and limitations of the custom command API.
275
281
 
282
+ This function should only be used for single-response commands. Commands that don't return complete response and awaits
283
+ (such as SUBSCRIBE), or that return potentially more than a single response (such as XREAD), or that change the
284
+ client's behavior (such as entering pub/sub mode on RESP2 connections) shouldn't be called using this function.
285
+
276
286
  Args:
277
287
  command_args (List[TEncodable]): List of command arguments.
278
288
  Every part of the command, including the command name and subcommands, should be added as a
@@ -312,6 +322,8 @@ class BaseBatch:
312
322
  """
313
323
  Get information and statistics about the server.
314
324
 
325
+ Starting from server version 7, command supports multiple section arguments.
326
+
315
327
  See [valkey.io](https://valkey.io/commands/info/) for details.
316
328
 
317
329
  Args:
@@ -1057,7 +1069,7 @@ class BaseBatch:
1057
1069
  Command response:
1058
1070
  Optional[Mapping[bytes, List[bytes]]]: A map of `key` name mapped to an array of popped elements.
1059
1071
 
1060
- None if no elements could be popped.
1072
+ `None` if no elements could be popped.
1061
1073
 
1062
1074
  Since: Valkey version 7.0.0.
1063
1075
  """
@@ -1683,8 +1695,8 @@ class BaseBatch:
1683
1695
  Commands response:
1684
1696
  TOK: A simple "OK" response.
1685
1697
 
1686
- If `start` exceeds the end of the list, or if `start` is greater than `end`, the result will be an empty list
1687
- (which causes `key` to be removed).
1698
+ If `start` exceeds the end of the list, or if `start` is greater than `end`, the list is emptied
1699
+ and the key is removed.
1688
1700
 
1689
1701
  If `end` exceeds the actual end of the list, it will be treated like the last element of the list.
1690
1702
 
@@ -1700,16 +1712,18 @@ class BaseBatch:
1700
1712
  ) -> TBatch:
1701
1713
  """
1702
1714
  Removes the first `count` occurrences of elements equal to `element` from the list stored at `key`.
1703
- If `count` is positive, it removes elements equal to `element` moving from head to tail.
1704
- If `count` is negative, it removes elements equal to `element` moving from tail to head.
1705
- If `count` is 0 or greater than the occurrences of elements equal to `element`, it removes all elements
1706
- equal to `element`.
1707
1715
 
1708
1716
  See [valkey.io](https://valkey.io/commands/lrem/) for more details.
1709
1717
 
1710
1718
  Args:
1711
1719
  key (TEncodable): The key of the list.
1712
1720
  count (int): The count of occurrences of elements equal to `element` to remove.
1721
+
1722
+ - If `count` is positive, it removes elements equal to `element` moving from head to tail.
1723
+ - If `count` is negative, it removes elements equal to `element` moving from tail to head.
1724
+ - If `count` is 0 or greater than the occurrences of elements equal to `element`, it removes all elements
1725
+ equal to `element`.
1726
+
1713
1727
  element (TEncodable): The element to remove from the list.
1714
1728
 
1715
1729
  Commands response:
@@ -2306,6 +2320,10 @@ class BaseBatch:
2306
2320
  args.append("REPLACE")
2307
2321
  if absttl is True:
2308
2322
  args.append("ABSTTL")
2323
+ if idletime is not None and frequency is not None:
2324
+ raise RequestError(
2325
+ "syntax error: IDLETIME and FREQ cannot be set at the same time."
2326
+ )
2309
2327
  if idletime is not None:
2310
2328
  args.extend(["IDLETIME", str(idletime)])
2311
2329
  if frequency is not None:
@@ -2412,26 +2430,26 @@ class BaseBatch:
2412
2430
 
2413
2431
  Args:
2414
2432
  key (TEncodable): The key of the stream.
2415
- start (StreamRangeBound): The starting stream ID bound for the range.
2433
+ start (StreamRangeBound): The starting stream entry ID bound for the range.
2416
2434
 
2417
- - Use `IdBound` to specify a stream ID.
2418
- - Use `ExclusiveIdBound` to specify an exclusive bounded stream ID.
2435
+ - Use `IdBound` to specify a stream entry ID.
2436
+ - Since Valkey 6.2.0, use `ExclusiveIdBound` to specify an exclusive bounded stream entry ID.
2419
2437
  - Use `MinId` to start with the minimum available ID.
2420
2438
 
2421
- end (StreamRangeBound): The ending stream ID bound for the range.
2439
+ end (StreamRangeBound): The ending stream entry ID bound for the range.
2422
2440
 
2423
- - Use `IdBound` to specify a stream ID.
2424
- - Use `ExclusiveIdBound` to specify an exclusive bounded stream ID.
2441
+ - Use `IdBound` to specify a stream entry ID.
2442
+ - Since Valkey 6.2.0, use `ExclusiveIdBound` to specify an exclusive bounded stream entry ID.
2425
2443
  - Use `MaxId` to end with the maximum available ID.
2426
2444
 
2427
2445
  count (Optional[int]): An optional argument specifying the maximum count of stream entries to return.
2428
2446
  If `count` is not provided, all stream entries in the range will be returned.
2429
2447
 
2430
2448
  Command response:
2431
- Optional[Mapping[bytes, List[List[bytes]]]]: A mapping of stream IDs to stream entry data, where entry data is a
2449
+ Optional[Mapping[bytes, List[List[bytes]]]]: A mapping of stream entry IDs to stream entry data, where entry data is a
2432
2450
  list of pairings with format `[[field, entry], [field, entry], ...]`.
2433
2451
 
2434
- Returns None if the range arguments are not applicable.
2452
+ Returns None if the range arguments are not applicable. Or if count is non-positive.
2435
2453
  """
2436
2454
  args = [key, start.to_arg(), end.to_arg()]
2437
2455
  if count is not None:
@@ -2454,26 +2472,26 @@ class BaseBatch:
2454
2472
 
2455
2473
  Args:
2456
2474
  key (TEncodable): The key of the stream.
2457
- end (StreamRangeBound): The ending stream ID bound for the range.
2475
+ end (StreamRangeBound): The ending stream entry ID bound for the range.
2458
2476
 
2459
- - Use `IdBound` to specify a stream ID.
2460
- - Use `ExclusiveIdBound` to specify an exclusive bounded stream ID.
2477
+ - Use `IdBound` to specify a stream entry ID.
2478
+ - Since Valkey 6.2.0, use `ExclusiveIdBound` to specify an exclusive bounded stream entry ID.
2461
2479
  - Use `MaxId` to end with the maximum available ID.
2462
2480
 
2463
- start (StreamRangeBound): The starting stream ID bound for the range.
2481
+ start (StreamRangeBound): The starting stream entry ID bound for the range.
2464
2482
 
2465
- - Use `IdBound` to specify a stream ID.
2466
- - Use `ExclusiveIdBound` to specify an exclusive bounded stream ID.
2483
+ - Use `IdBound` to specify a stream entry ID.
2484
+ - Since Valkey 6.2.0, use `ExclusiveIdBound` to specify an exclusive bounded stream entry ID.
2467
2485
  - Use `MinId` to start with the minimum available ID.
2468
2486
 
2469
2487
  count (Optional[int]): An optional argument specifying the maximum count of stream entries to return.
2470
2488
  If `count` is not provided, all stream entries in the range will be returned.
2471
2489
 
2472
2490
  Command response:
2473
- Optional[Mapping[bytes, List[List[bytes]]]]: A mapping of stream IDs to stream entry data, where entry data is a
2491
+ Optional[Mapping[bytes, List[List[bytes]]]]: A mapping of stream entry IDs to stream entry data, where entry data is a
2474
2492
  list of pairings with format `[[field, entry], [field, entry], ...]`.
2475
2493
 
2476
- Returns None if the range arguments are not applicable.
2494
+ Returns None if the range arguments are not applicable. Or if count is non-positive.
2477
2495
  """
2478
2496
  args = [key, end.to_arg(), start.to_arg()]
2479
2497
  if count is not None:
@@ -2787,7 +2805,7 @@ class BaseBatch:
2787
2805
  min_idle_time_ms (int): Filters the claimed entries to those that have been idle for more than the specified
2788
2806
  value.
2789
2807
  start (TEncodable): Filters the claimed entries to those that have an ID equal or greater than the specified value.
2790
- count (Optional[int]): Limits the number of claimed entries to the specified value.
2808
+ count (Optional[int]): Limits the number of claimed entries to the specified value. Default value is 100.
2791
2809
 
2792
2810
  Command response:
2793
2811
  List[Union[str, Mapping[bytes, List[List[bytes]]], List[bytes]]]: A list containing the following elements:
@@ -2832,7 +2850,7 @@ class BaseBatch:
2832
2850
  min_idle_time_ms (int): Filters the claimed entries to those that have been idle for more than the specified
2833
2851
  value.
2834
2852
  start (TEncodable): Filters the claimed entries to those that have an ID equal or greater than the specified value.
2835
- count (Optional[int]): Limits the number of claimed entries to the specified value.
2853
+ count (Optional[int]): Limits the number of claimed entries to the specified value. Default value is 100.
2836
2854
 
2837
2855
  Command response:
2838
2856
  List[Union[bytes, List[bytes]]]: A list containing the following elements:
@@ -3263,7 +3281,7 @@ class BaseBatch:
3263
3281
  Commands response:
3264
3282
  Optional[float]: The score of the member.
3265
3283
 
3266
- If there was a conflict with choosing the XX/NX/LT/GT options, the operation aborts and None is returned.
3284
+ If there was a conflict with choosing the XX/NX/LT/GT options, the operation aborts and `None` is returned.
3267
3285
  """
3268
3286
  args = [key]
3269
3287
  if existing_options:
@@ -4264,10 +4282,10 @@ class BaseBatch:
4264
4282
  elements (List[TEncodable]): A list of members to add to the HyperLogLog stored at `key`.
4265
4283
 
4266
4284
  Commands response:
4267
- int: If the HyperLogLog is newly created, or if the HyperLogLog approximated cardinality is
4268
- altered, then returns 1.
4285
+ bool: If the HyperLogLog is newly created, or if the HyperLogLog approximated cardinality is
4286
+ altered, then returns `True`.
4269
4287
 
4270
- Otherwise, returns 0.
4288
+ Otherwise, returns `False`.
4271
4289
  """
4272
4290
  return self.append_command(RequestType.PfAdd, [key] + elements)
4273
4291
 
@@ -5573,13 +5591,13 @@ class ClusterBatch(BaseBatch):
5573
5591
  # TODO: add all CLUSTER commands
5574
5592
 
5575
5593
 
5576
- @deprecated(reason="Use ClusterBatch(is_atomic=True) instead.")
5594
+ @deprecated("Use Batch(is_atomic=True) instead.")
5577
5595
  class Transaction(Batch):
5578
5596
  def __init__(self):
5579
5597
  super().__init__(is_atomic=True)
5580
5598
 
5581
5599
 
5582
- @deprecated(reason="Use ClusterBatch(is_atomic=True) instead.")
5600
+ @deprecated("Use ClusterBatch(is_atomic=True) instead.")
5583
5601
  class ClusterTransaction(ClusterBatch):
5584
5602
  def __init__(self):
5585
5603
  super().__init__(is_atomic=True)
@@ -0,0 +1,261 @@
1
+ # Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0
2
+
3
+ from typing import Optional
4
+
5
+ from glide.constants import TSingleNodeRoute
6
+
7
+
8
+ class BatchRetryStrategy:
9
+ """
10
+ Defines a retry strategy for cluster batch requests, allowing control over retries in case of
11
+ server or connection errors.
12
+
13
+ This strategy determines whether failed commands should be retried, impacting execution order
14
+ and potential side effects.
15
+
16
+ Behavior:
17
+ - If `retry_server_error` is `True`, failed commands with a retriable error (e.g.,
18
+ `TRYAGAIN`) will be retried.
19
+ - If `retry_connection_error` is `True`, batch requests will be retried on
20
+ connection failures.
21
+
22
+ Cautions:
23
+ - **Server Errors:** Retrying may cause commands targeting the same slot to be executed
24
+ out of order.
25
+ - **Connection Errors:** Retrying may lead to duplicate executions, since the server might
26
+ have already received and processed the request before the error occurred.
27
+
28
+ Example Scenario:
29
+ ```
30
+ MGET key {key}:1
31
+ SET key "value"
32
+ ```
33
+
34
+ Expected response when keys are empty:
35
+ ```
36
+ [None, None]
37
+ "OK"
38
+ ```
39
+
40
+ However, if the slot is migrating, both commands may return an `ASK` error and be
41
+ redirected. Upon `ASK` redirection, a multi-key command may return a `TRYAGAIN`
42
+ error (triggering a retry), while the `SET` command succeeds immediately. This
43
+ can result in an unintended reordering of commands if the first command is retried
44
+ after the slot stabilizes:
45
+ ```
46
+ ["value", None]
47
+ "OK"
48
+ ```
49
+
50
+ Note:
51
+ Currently, retry strategies are supported only for non-atomic batches.
52
+
53
+ Default:
54
+ Both `retry_server_error` and `retry_connection_error` are set to `False`.
55
+
56
+ Args:
57
+ retry_server_error (bool): If `True`, failed commands with a retriable error (e.g., `TRYAGAIN`)
58
+ will be automatically retried.
59
+
60
+ ⚠️ **Warning:** Enabling this flag may cause commands targeting the same slot to execute
61
+ out of order.
62
+
63
+ By default, this is set to `False`.
64
+
65
+ retry_connection_error (bool): If `True`, batch requests will be retried in case of connection errors.
66
+
67
+ ⚠️ **Warning:** Retrying after a connection error may lead to duplicate executions, since
68
+ the server might have already received and processed the request before the error occurred.
69
+
70
+ By default, this is set to `False`.
71
+
72
+ """
73
+
74
+ def __init__(
75
+ self,
76
+ retry_server_error: bool = False,
77
+ retry_connection_error: bool = False,
78
+ ):
79
+ """
80
+ Initialize a BatchRetryStrategy.
81
+
82
+ Args:
83
+ retry_server_error (bool): If `True`, failed commands with a retriable error (e.g., `TRYAGAIN`)
84
+ will be automatically retried.
85
+
86
+ ⚠️ **Warning:** Enabling this flag may cause commands targeting the same slot to execute
87
+ out of order.
88
+
89
+ By default, this is set to `False`.
90
+
91
+ retry_connection_error (bool): If `True`, batch requests will be retried in case of connection errors.
92
+
93
+ ⚠️ **Warning:** Retrying after a connection error may lead to duplicate executions, since
94
+ the server might have already received and processed the request before the error occurred.
95
+
96
+ By default, this is set to `False`.
97
+
98
+ """
99
+ self.retry_server_error = retry_server_error
100
+ self.retry_connection_error = retry_connection_error
101
+
102
+
103
+ class BaseBatchOptions:
104
+ """
105
+ Base options settings class for sending a batch request. Shared settings for standalone and
106
+ cluster batch requests.
107
+
108
+ Args:
109
+ timeout (Optional[int]): The duration in milliseconds that the client should wait for the batch request
110
+ to complete. This duration encompasses sending the request, awaiting a response from the server,
111
+ and any required reconnections or retries. If the specified timeout is exceeded for a pending request,
112
+ it will result in a timeout error. If not explicitly set, the client's default request timeout will be used.
113
+ """
114
+
115
+ def __init__(
116
+ self,
117
+ timeout: Optional[int] = None,
118
+ ):
119
+ """
120
+ Initialize BaseBatchOptions.
121
+
122
+ Args:
123
+ timeout (Optional[int]): The duration in milliseconds that the client should wait for the batch request
124
+ to complete. This duration encompasses sending the request, awaiting a response from the server,
125
+ and any required reconnections or retries. If the specified timeout is exceeded for a pending request,
126
+ it will result in a timeout error. If not explicitly set, the client's default request timeout will be used.
127
+ """
128
+ self.timeout = timeout
129
+
130
+
131
+ class BatchOptions(BaseBatchOptions):
132
+ """
133
+ Options for a batch request for a standalone client.
134
+
135
+ Args:
136
+ timeout (Optional[int]): The duration in milliseconds that the client should wait for the batch request
137
+ to complete. This duration encompasses sending the request, awaiting a response from the server,
138
+ and any required reconnections or retries. If the specified timeout is exceeded for a pending request,
139
+ it will result in a timeout error. If not explicitly set, the client's default request timeout will be used.
140
+ """
141
+
142
+ def __init__(
143
+ self,
144
+ timeout: Optional[int] = None,
145
+ ):
146
+ """
147
+ Options for a batch request for a standalone client
148
+
149
+ Args:
150
+ timeout (Optional[int]): The duration in milliseconds that the client should wait for the batch request
151
+ to complete. This duration encompasses sending the request, awaiting a response from the server,
152
+ and any required reconnections or retries. If the specified timeout is exceeded for a pending request,
153
+ it will result in a timeout error. If not explicitly set, the client's default request timeout will be used.
154
+ """
155
+ super().__init__(timeout)
156
+
157
+
158
+ class ClusterBatchOptions(BaseBatchOptions):
159
+ """
160
+ Options for cluster batch operations.
161
+
162
+ Args:
163
+ timeout (Optional[int]): The duration in milliseconds that the client should wait for the batch request
164
+ to complete. This duration encompasses sending the request, awaiting a response from the server,
165
+ and any required reconnections or retries. If the specified timeout is exceeded for a pending request,
166
+ it will result in a timeout error. If not explicitly set, the client's default request timeout will be used.
167
+
168
+ route (Optional[TSingleNodeRoute]): Configures single-node routing for the batch request. The client
169
+ will send the batch to the specified node defined by `route`.
170
+
171
+ If a redirection error occurs:
172
+
173
+ - For Atomic Batches (Transactions), the entire transaction will be redirected.
174
+ - For Non-Atomic Batches (Pipelines), only the commands that encountered redirection errors
175
+ will be redirected.
176
+
177
+ retry_strategy (Optional[BatchRetryStrategy]): ⚠️ **Please see `BatchRetryStrategy` and read carefully before enabling these
178
+ options.**
179
+
180
+ Defines the retry strategy for handling cluster batch request failures.
181
+
182
+ This strategy determines whether failed commands should be retried, potentially impacting
183
+ execution order.
184
+
185
+ - If `retry_server_error` is `True`, retriable errors (e.g., TRYAGAIN) will
186
+ trigger a retry.
187
+ - If `retry_connection_error` is `True`, connection failures will trigger a
188
+ retry.
189
+
190
+ **Warnings:**
191
+
192
+ - Retrying server errors may cause commands targeting the same slot to execute out of
193
+ order.
194
+ - Retrying connection errors may lead to duplicate executions, as it is unclear which
195
+ commands have already been processed.
196
+
197
+ **Note:** Currently, retry strategies are supported only for non-atomic batches.
198
+
199
+ **Recommendation:** It is recommended to increase the timeout in `timeout`
200
+ when enabling these strategies.
201
+
202
+ **Default:** Both `retry_server_error` and `retry_connection_error` are set to
203
+ `False`.
204
+
205
+ """
206
+
207
+ def __init__(
208
+ self,
209
+ timeout: Optional[int] = None,
210
+ route: Optional[TSingleNodeRoute] = None,
211
+ retry_strategy: Optional[BatchRetryStrategy] = None,
212
+ ):
213
+ """
214
+ Initialize ClusterBatchOptions.
215
+
216
+ Args:
217
+ timeout (Optional[int]): The duration in milliseconds that the client should wait for the batch request
218
+ to complete. This duration encompasses sending the request, awaiting a response from the server,
219
+ and any required reconnections or retries. If the specified timeout is exceeded for a pending request,
220
+ it will result in a timeout error. If not explicitly set, the client's default request timeout will be used.
221
+
222
+ route (Optional[TSingleNodeRoute]): Configures single-node routing for the batch request. The client
223
+ will send the batch to the specified node defined by `route`.
224
+
225
+ If a redirection error occurs:
226
+
227
+ - For Atomic Batches (Transactions), the entire transaction will be redirected.
228
+ - For Non-Atomic Batches (Pipelines), only the commands that encountered redirection errors
229
+ will be redirected.
230
+
231
+ retry_strategy (Optional[BatchRetryStrategy]): ⚠️ **Please see `BatchRetryStrategy` and read carefully before enabling these
232
+ options.**
233
+
234
+ Defines the retry strategy for handling cluster batch request failures.
235
+
236
+ This strategy determines whether failed commands should be retried, potentially impacting
237
+ execution order.
238
+
239
+ - If `retry_server_error` is `True`, retriable errors (e.g., TRYAGAIN) will
240
+ trigger a retry.
241
+ - If `retry_connection_error` is `True`, connection failures will trigger a
242
+ retry.
243
+
244
+ **Warnings:**
245
+
246
+ - Retrying server errors may cause commands targeting the same slot to execute out of
247
+ order.
248
+ - Retrying connection errors may lead to duplicate executions, as it is unclear which
249
+ commands have already been processed.
250
+
251
+ **Note:** Currently, retry strategies are supported only for non-atomic batches.
252
+
253
+ **Recommendation:** It is recommended to increase the timeout in `timeout`
254
+ when enabling these strategies.
255
+
256
+ **Default:** Both `retry_server_error` and `retry_connection_error` are set to
257
+ `False`.
258
+ """
259
+ super().__init__(timeout)
260
+ self.retry_strategy = retry_strategy
261
+ self.route = route