valkey-glide 2.0.0rc6__cp312-cp312-macosx_10_7_x86_64.whl → 2.0.1__cp312-cp312-macosx_10_7_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of valkey-glide might be problematic. Click here for more details.

glide/__init__.py CHANGED
@@ -7,6 +7,11 @@ from glide.async_commands.batch import (
7
7
  TBatch,
8
8
  Transaction,
9
9
  )
10
+ from glide.async_commands.batch_options import (
11
+ BatchOptions,
12
+ BatchRetryStrategy,
13
+ ClusterBatchOptions,
14
+ )
10
15
  from glide.async_commands.bitmap import (
11
16
  BitEncoding,
12
17
  BitFieldGet,
@@ -125,6 +130,7 @@ from glide.config import (
125
130
  ProtocolVersion,
126
131
  ReadFrom,
127
132
  ServerCredentials,
133
+ TlsAdvancedConfiguration,
128
134
  )
129
135
  from glide.constants import (
130
136
  OK,
@@ -168,7 +174,13 @@ from glide.routes import (
168
174
  SlotType,
169
175
  )
170
176
 
171
- from .glide import ClusterScanCursor, Script
177
+ from .glide import (
178
+ ClusterScanCursor,
179
+ OpenTelemetryConfig,
180
+ OpenTelemetryMetricsConfig,
181
+ OpenTelemetryTracesConfig,
182
+ Script,
183
+ )
172
184
 
173
185
  PubSubMsg = CoreCommands.PubSubMsg
174
186
 
@@ -182,6 +194,10 @@ __all__ = [
182
194
  "Transaction",
183
195
  "TGlideClient",
184
196
  "TBatch",
197
+ # Batch Options
198
+ "BatchOptions",
199
+ "BatchRetryStrategy",
200
+ "ClusterBatchOptions",
185
201
  # Config
186
202
  "AdvancedGlideClientConfiguration",
187
203
  "AdvancedGlideClusterClientConfiguration",
@@ -191,6 +207,9 @@ __all__ = [
191
207
  "ReadFrom",
192
208
  "ServerCredentials",
193
209
  "NodeAddress",
210
+ "OpenTelemetryConfig",
211
+ "OpenTelemetryTracesConfig",
212
+ "OpenTelemetryMetricsConfig",
194
213
  "ProtocolVersion",
195
214
  "PeriodicChecksManualInterval",
196
215
  "PeriodicChecksStatus",
@@ -205,6 +224,7 @@ __all__ = [
205
224
  "TJsonUniversalResponse",
206
225
  "TOK",
207
226
  "TResult",
227
+ "TlsAdvancedConfiguration",
208
228
  "TXInfoStreamFullResponse",
209
229
  "TXInfoStreamResponse",
210
230
  "FtAggregateResponse",
@@ -1695,8 +1695,8 @@ class BaseBatch:
1695
1695
  Commands response:
1696
1696
  TOK: A simple "OK" response.
1697
1697
 
1698
- If `start` exceeds the end of the list, or if `start` is greater than `end`, the result will be an empty list
1699
- (which causes `key` to be removed).
1698
+ If `start` exceeds the end of the list, or if `start` is greater than `end`, the list is emptied
1699
+ and the key is removed.
1700
1700
 
1701
1701
  If `end` exceeds the actual end of the list, it will be treated like the last element of the list.
1702
1702
 
@@ -1712,16 +1712,18 @@ class BaseBatch:
1712
1712
  ) -> TBatch:
1713
1713
  """
1714
1714
  Removes the first `count` occurrences of elements equal to `element` from the list stored at `key`.
1715
- If `count` is positive, it removes elements equal to `element` moving from head to tail.
1716
- If `count` is negative, it removes elements equal to `element` moving from tail to head.
1717
- If `count` is 0 or greater than the occurrences of elements equal to `element`, it removes all elements
1718
- equal to `element`.
1719
1715
 
1720
1716
  See [valkey.io](https://valkey.io/commands/lrem/) for more details.
1721
1717
 
1722
1718
  Args:
1723
1719
  key (TEncodable): The key of the list.
1724
1720
  count (int): The count of occurrences of elements equal to `element` to remove.
1721
+
1722
+ - If `count` is positive, it removes elements equal to `element` moving from head to tail.
1723
+ - If `count` is negative, it removes elements equal to `element` moving from tail to head.
1724
+ - If `count` is 0 or greater than the occurrences of elements equal to `element`, it removes all elements
1725
+ equal to `element`.
1726
+
1725
1727
  element (TEncodable): The element to remove from the list.
1726
1728
 
1727
1729
  Commands response:
@@ -4280,10 +4282,10 @@ class BaseBatch:
4280
4282
  elements (List[TEncodable]): A list of members to add to the HyperLogLog stored at `key`.
4281
4283
 
4282
4284
  Commands response:
4283
- int: If the HyperLogLog is newly created, or if the HyperLogLog approximated cardinality is
4284
- altered, then returns 1.
4285
+ bool: If the HyperLogLog is newly created, or if the HyperLogLog approximated cardinality is
4286
+ altered, then returns `True`.
4285
4287
 
4286
- Otherwise, returns 0.
4288
+ Otherwise, returns `False`.
4287
4289
  """
4288
4290
  return self.append_command(RequestType.PfAdd, [key] + elements)
4289
4291
 
@@ -5589,7 +5591,7 @@ class ClusterBatch(BaseBatch):
5589
5591
  # TODO: add all CLUSTER commands
5590
5592
 
5591
5593
 
5592
- @deprecated("Use ClusterBatch(is_atomic=True) instead.")
5594
+ @deprecated("Use Batch(is_atomic=True) instead.")
5593
5595
  class Transaction(Batch):
5594
5596
  def __init__(self):
5595
5597
  super().__init__(is_atomic=True)
@@ -0,0 +1,261 @@
1
+ # Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0
2
+
3
+ from typing import Optional
4
+
5
+ from glide.constants import TSingleNodeRoute
6
+
7
+
8
+ class BatchRetryStrategy:
9
+ """
10
+ Defines a retry strategy for cluster batch requests, allowing control over retries in case of
11
+ server or connection errors.
12
+
13
+ This strategy determines whether failed commands should be retried, impacting execution order
14
+ and potential side effects.
15
+
16
+ Behavior:
17
+ - If `retry_server_error` is `True`, failed commands with a retriable error (e.g.,
18
+ `TRYAGAIN`) will be retried.
19
+ - If `retry_connection_error` is `True`, batch requests will be retried on
20
+ connection failures.
21
+
22
+ Cautions:
23
+ - **Server Errors:** Retrying may cause commands targeting the same slot to be executed
24
+ out of order.
25
+ - **Connection Errors:** Retrying may lead to duplicate executions, since the server might
26
+ have already received and processed the request before the error occurred.
27
+
28
+ Example Scenario:
29
+ ```
30
+ MGET key {key}:1
31
+ SET key "value"
32
+ ```
33
+
34
+ Expected response when keys are empty:
35
+ ```
36
+ [None, None]
37
+ "OK"
38
+ ```
39
+
40
+ However, if the slot is migrating, both commands may return an `ASK` error and be
41
+ redirected. Upon `ASK` redirection, a multi-key command may return a `TRYAGAIN`
42
+ error (triggering a retry), while the `SET` command succeeds immediately. This
43
+ can result in an unintended reordering of commands if the first command is retried
44
+ after the slot stabilizes:
45
+ ```
46
+ ["value", None]
47
+ "OK"
48
+ ```
49
+
50
+ Note:
51
+ Currently, retry strategies are supported only for non-atomic batches.
52
+
53
+ Default:
54
+ Both `retry_server_error` and `retry_connection_error` are set to `False`.
55
+
56
+ Args:
57
+ retry_server_error (bool): If `True`, failed commands with a retriable error (e.g., `TRYAGAIN`)
58
+ will be automatically retried.
59
+
60
+ ⚠️ **Warning:** Enabling this flag may cause commands targeting the same slot to execute
61
+ out of order.
62
+
63
+ By default, this is set to `False`.
64
+
65
+ retry_connection_error (bool): If `True`, batch requests will be retried in case of connection errors.
66
+
67
+ ⚠️ **Warning:** Retrying after a connection error may lead to duplicate executions, since
68
+ the server might have already received and processed the request before the error occurred.
69
+
70
+ By default, this is set to `False`.
71
+
72
+ """
73
+
74
+ def __init__(
75
+ self,
76
+ retry_server_error: bool = False,
77
+ retry_connection_error: bool = False,
78
+ ):
79
+ """
80
+ Initialize a BatchRetryStrategy.
81
+
82
+ Args:
83
+ retry_server_error (bool): If `True`, failed commands with a retriable error (e.g., `TRYAGAIN`)
84
+ will be automatically retried.
85
+
86
+ ⚠️ **Warning:** Enabling this flag may cause commands targeting the same slot to execute
87
+ out of order.
88
+
89
+ By default, this is set to `False`.
90
+
91
+ retry_connection_error (bool): If `True`, batch requests will be retried in case of connection errors.
92
+
93
+ ⚠️ **Warning:** Retrying after a connection error may lead to duplicate executions, since
94
+ the server might have already received and processed the request before the error occurred.
95
+
96
+ By default, this is set to `False`.
97
+
98
+ """
99
+ self.retry_server_error = retry_server_error
100
+ self.retry_connection_error = retry_connection_error
101
+
102
+
103
+ class BaseBatchOptions:
104
+ """
105
+ Base options settings class for sending a batch request. Shared settings for standalone and
106
+ cluster batch requests.
107
+
108
+ Args:
109
+ timeout (Optional[int]): The duration in milliseconds that the client should wait for the batch request
110
+ to complete. This duration encompasses sending the request, awaiting a response from the server,
111
+ and any required reconnections or retries. If the specified timeout is exceeded for a pending request,
112
+ it will result in a timeout error. If not explicitly set, the client's default request timeout will be used.
113
+ """
114
+
115
+ def __init__(
116
+ self,
117
+ timeout: Optional[int] = None,
118
+ ):
119
+ """
120
+ Initialize BaseBatchOptions.
121
+
122
+ Args:
123
+ timeout (Optional[int]): The duration in milliseconds that the client should wait for the batch request
124
+ to complete. This duration encompasses sending the request, awaiting a response from the server,
125
+ and any required reconnections or retries. If the specified timeout is exceeded for a pending request,
126
+ it will result in a timeout error. If not explicitly set, the client's default request timeout will be used.
127
+ """
128
+ self.timeout = timeout
129
+
130
+
131
+ class BatchOptions(BaseBatchOptions):
132
+ """
133
+ Options for a batch request for a standalone client.
134
+
135
+ Args:
136
+ timeout (Optional[int]): The duration in milliseconds that the client should wait for the batch request
137
+ to complete. This duration encompasses sending the request, awaiting a response from the server,
138
+ and any required reconnections or retries. If the specified timeout is exceeded for a pending request,
139
+ it will result in a timeout error. If not explicitly set, the client's default request timeout will be used.
140
+ """
141
+
142
+ def __init__(
143
+ self,
144
+ timeout: Optional[int] = None,
145
+ ):
146
+ """
147
+ Options for a batch request for a standalone client
148
+
149
+ Args:
150
+ timeout (Optional[int]): The duration in milliseconds that the client should wait for the batch request
151
+ to complete. This duration encompasses sending the request, awaiting a response from the server,
152
+ and any required reconnections or retries. If the specified timeout is exceeded for a pending request,
153
+ it will result in a timeout error. If not explicitly set, the client's default request timeout will be used.
154
+ """
155
+ super().__init__(timeout)
156
+
157
+
158
+ class ClusterBatchOptions(BaseBatchOptions):
159
+ """
160
+ Options for cluster batch operations.
161
+
162
+ Args:
163
+ timeout (Optional[int]): The duration in milliseconds that the client should wait for the batch request
164
+ to complete. This duration encompasses sending the request, awaiting a response from the server,
165
+ and any required reconnections or retries. If the specified timeout is exceeded for a pending request,
166
+ it will result in a timeout error. If not explicitly set, the client's default request timeout will be used.
167
+
168
+ route (Optional[TSingleNodeRoute]): Configures single-node routing for the batch request. The client
169
+ will send the batch to the specified node defined by `route`.
170
+
171
+ If a redirection error occurs:
172
+
173
+ - For Atomic Batches (Transactions), the entire transaction will be redirected.
174
+ - For Non-Atomic Batches (Pipelines), only the commands that encountered redirection errors
175
+ will be redirected.
176
+
177
+ retry_strategy (Optional[BatchRetryStrategy]): ⚠️ **Please see `BatchRetryStrategy` and read carefully before enabling these
178
+ options.**
179
+
180
+ Defines the retry strategy for handling cluster batch request failures.
181
+
182
+ This strategy determines whether failed commands should be retried, potentially impacting
183
+ execution order.
184
+
185
+ - If `retry_server_error` is `True`, retriable errors (e.g., TRYAGAIN) will
186
+ trigger a retry.
187
+ - If `retry_connection_error` is `True`, connection failures will trigger a
188
+ retry.
189
+
190
+ **Warnings:**
191
+
192
+ - Retrying server errors may cause commands targeting the same slot to execute out of
193
+ order.
194
+ - Retrying connection errors may lead to duplicate executions, as it is unclear which
195
+ commands have already been processed.
196
+
197
+ **Note:** Currently, retry strategies are supported only for non-atomic batches.
198
+
199
+ **Recommendation:** It is recommended to increase the timeout in `timeout`
200
+ when enabling these strategies.
201
+
202
+ **Default:** Both `retry_server_error` and `retry_connection_error` are set to
203
+ `False`.
204
+
205
+ """
206
+
207
+ def __init__(
208
+ self,
209
+ timeout: Optional[int] = None,
210
+ route: Optional[TSingleNodeRoute] = None,
211
+ retry_strategy: Optional[BatchRetryStrategy] = None,
212
+ ):
213
+ """
214
+ Initialize ClusterBatchOptions.
215
+
216
+ Args:
217
+ timeout (Optional[int]): The duration in milliseconds that the client should wait for the batch request
218
+ to complete. This duration encompasses sending the request, awaiting a response from the server,
219
+ and any required reconnections or retries. If the specified timeout is exceeded for a pending request,
220
+ it will result in a timeout error. If not explicitly set, the client's default request timeout will be used.
221
+
222
+ route (Optional[TSingleNodeRoute]): Configures single-node routing for the batch request. The client
223
+ will send the batch to the specified node defined by `route`.
224
+
225
+ If a redirection error occurs:
226
+
227
+ - For Atomic Batches (Transactions), the entire transaction will be redirected.
228
+ - For Non-Atomic Batches (Pipelines), only the commands that encountered redirection errors
229
+ will be redirected.
230
+
231
+ retry_strategy (Optional[BatchRetryStrategy]): ⚠️ **Please see `BatchRetryStrategy` and read carefully before enabling these
232
+ options.**
233
+
234
+ Defines the retry strategy for handling cluster batch request failures.
235
+
236
+ This strategy determines whether failed commands should be retried, potentially impacting
237
+ execution order.
238
+
239
+ - If `retry_server_error` is `True`, retriable errors (e.g., TRYAGAIN) will
240
+ trigger a retry.
241
+ - If `retry_connection_error` is `True`, connection failures will trigger a
242
+ retry.
243
+
244
+ **Warnings:**
245
+
246
+ - Retrying server errors may cause commands targeting the same slot to execute out of
247
+ order.
248
+ - Retrying connection errors may lead to duplicate executions, as it is unclear which
249
+ commands have already been processed.
250
+
251
+ **Note:** Currently, retry strategies are supported only for non-atomic batches.
252
+
253
+ **Recommendation:** It is recommended to increase the timeout in `timeout`
254
+ when enabling these strategies.
255
+
256
+ **Default:** Both `retry_server_error` and `retry_connection_error` are set to
257
+ `False`.
258
+ """
259
+ super().__init__(timeout)
260
+ self.retry_strategy = retry_strategy
261
+ self.route = route
@@ -5,6 +5,7 @@ from __future__ import annotations
5
5
  from typing import Dict, List, Mapping, Optional, Union, cast
6
6
 
7
7
  from glide.async_commands.batch import ClusterBatch
8
+ from glide.async_commands.batch_options import ClusterBatchOptions
8
9
  from glide.async_commands.command_args import ObjectType
9
10
  from glide.async_commands.core import (
10
11
  CoreCommands,
@@ -19,8 +20,8 @@ from glide.constants import (
19
20
  TFunctionListResponse,
20
21
  TFunctionStatsSingleNodeResponse,
21
22
  TResult,
22
- TSingleNodeRoute,
23
23
  )
24
+ from glide.exceptions import RequestError
24
25
  from glide.protobuf.command_request_pb2 import RequestType
25
26
  from glide.routes import Route
26
27
 
@@ -74,8 +75,8 @@ class ClusterCommands(CoreCommands):
74
75
  Args:
75
76
  sections (Optional[List[InfoSection]]): A list of InfoSection values specifying which sections of
76
77
  information to retrieve. When no parameter is provided, the default option is assumed.
77
- route (Optional[Route]): The command will be routed to all primaries, unless `route` is provided, in which
78
- case the client will route the command to the nodes defined by `route`. Defaults to None.
78
+ route (Optional[Route]): The command will be routed to all primaries, unless `route` is provided, in
79
+ which case the client will route the command to the nodes defined by `route`. Defaults to None.
79
80
 
80
81
  Returns:
81
82
  TClusterResponse[bytes]: If a single node route is requested, returns a bytes string containing the information for
@@ -94,128 +95,109 @@ class ClusterCommands(CoreCommands):
94
95
  self,
95
96
  batch: ClusterBatch,
96
97
  raise_on_error: bool,
97
- route: Optional[TSingleNodeRoute] = None,
98
- timeout: Optional[int] = None,
99
- retry_server_error: bool = False,
100
- retry_connection_error: bool = False,
98
+ options: Optional[ClusterBatchOptions] = None,
101
99
  ) -> Optional[List[TResult]]:
102
100
  """
103
101
  Executes a batch by processing the queued commands.
104
102
 
105
- See [Valkey Transactions (Atomic Batches)](https://valkey.io/docs/topics/transactions/) for details.
106
- See [Valkey Pipelines (Non-Atomic Batches)](https://valkey.io/docs/topics/pipelining/) for details.
107
-
108
- #### Routing Behavior:
109
-
110
- - If a `route` is specified:
111
- - The entire batch is sent to the specified node.
103
+ **Routing Behavior:**
112
104
 
105
+ - If a `route` is specified in `ClusterBatchOptions`, the entire batch is sent
106
+ to the specified node.
113
107
  - If no `route` is specified:
114
- - Atomic batches (Transactions): Routed to the slot owner of the first key in the batch.
115
- If no key is found, the request is sent to a random node.
116
- - Non-atomic batches (Pipelines): Each command is routed to the node owning the corresponding
117
- key's slot. If no key is present, routing follows the command's default request policy.
118
- Multi-node commands are automatically split and dispatched to the appropriate nodes.
108
+ - **Atomic batches (Transactions):** Routed to the slot owner of the
109
+ first key in the batch. If no key is found, the request is sent to a random node.
110
+ - **Non-atomic batches (Pipelines):** Each command is routed to the node
111
+ owning the corresponding key's slot. If no key is present, routing follows the
112
+ command's request policy. Multi-node commands are automatically split and
113
+ dispatched to the appropriate nodes.
119
114
 
120
- #### Behavior notes:
115
+ **Behavior notes:**
121
116
 
122
- - Atomic Batches (Transactions): All key-based commands must map to the same hash slot.
123
- If keys span different slots, the transaction will fail. If the transaction fails due to a
124
- `WATCH` command, `exec` will return `None`.
117
+ - **Atomic Batches (Transactions):** All key-based commands must map to the
118
+ same hash slot. If keys span different slots, the transaction will fail. If the
119
+ transaction fails due to a `WATCH` command, `EXEC` will return `None`.
125
120
 
126
- #### Retry and Redirection:
121
+ **Retry and Redirection:**
127
122
 
128
123
  - If a redirection error occurs:
129
- - Atomic batches (Transactions): The entire transaction will be redirected.
130
- - Non-atomic batches (Pipelines): Only commands that encountered redirection errors will be redirected.
131
-
132
- - Retries for failures will be handled according to the `retry_server_error` and
133
- `retry_connection_error` parameters.
124
+ - **Atomic batches (Transactions):** The entire transaction will be
125
+ redirected.
126
+ - **Non-atomic batches:** Only commands that encountered redirection
127
+ errors will be redirected.
128
+ - Retries for failures will be handled according to the configured `BatchRetryStrategy`.
134
129
 
135
130
  Args:
136
- batch (ClusterBatch): A `ClusterBatch` object containing a list of commands to be executed.
137
- raise_on_error (bool): Determines how errors are handled within the batch response. When set to
138
- `True`, the first encountered error in the batch will be raised as a `RequestError`
139
- exception after all retries and reconnections have been executed. When set to `False`,
140
- errors will be included as part of the batch response array, allowing the caller to process both
141
- successful and failed commands together. In this case, error details will be provided as
142
- instances of `RequestError`.
143
- route (Optional[TSingleNodeRoute]): Configures single-node routing for the batch request. The client
144
- will send the batch to the specified node defined by `route`.
145
-
146
- If a redirection error occurs:
147
- - For Atomic Batches (Transactions), the entire transaction will be redirected.
148
- - For Non-Atomic Batches (Pipelines), only the commands that encountered redirection errors
149
- will be redirected.
150
- timeout (Optional[int]): The duration in milliseconds that the client should wait for the batch request
151
- to complete. This duration encompasses sending the request, awaiting a response from the server,
152
- and any required reconnections or retries.
153
-
154
- If the specified timeout is exceeded, a timeout error will be raised. If not explicitly set,
155
- the client's default request timeout will be used.
156
- retry_server_error (bool): If `True`, retriable server errors (e.g., `TRYAGAIN`) will trigger a retry.
157
- Warning: Retrying server errors may cause commands targeting the same slot to execute out of order.
158
- Note: Currently supported only for non-atomic batches. Recommended to increase timeout when enabled.
159
- retry_connection_error (bool): If `True`, connection failures will trigger a retry. Warning:
160
- Retrying connection errors may lead to duplicate executions, as it is unclear which commands have
161
- already been processed. Note: Currently supported only for non-atomic batches. Recommended to increase
162
- timeout when enabled.
131
+ batch (ClusterBatch): A `ClusterBatch` containing the commands to execute.
132
+ raise_on_error (bool): Determines how errors are handled within the batch response.
133
+ When set to `True`, the first encountered error in the batch will be raised as an
134
+ exception of type `RequestError` after all retries and reconnections have been
135
+ executed.
136
+ When set to `False`, errors will be included as part of the batch response,
137
+ allowing the caller to process both successful and failed commands together. In this case,
138
+ error details will be provided as instances of `RequestError`.
139
+ options (Optional[ClusterBatchOptions]): A `ClusterBatchOptions` object containing execution options.
163
140
 
164
141
  Returns:
165
- Optional[List[TResult]]: A list of results corresponding to the execution of each command in the batch.
166
- If a command returns a value, it will be included in the list. If a command doesn't return a value,
167
- the list entry will be `None`. If the batch failed due to a `WATCH` command, `exec` will return
168
- `None`.
142
+ Optional[List[TResult]]: An array of results, where each entry
143
+ corresponds to a command's execution result.
169
144
 
170
- Examples:
171
- # Example 1: Atomic Batch (Transaction)
172
- >>> atomic_batch = ClusterBatch(is_atomic=True) # Atomic (Transaction)
173
- >>> atomic_batch.set("key", "1")
174
- >>> atomic_batch.incr("key")
175
- >>> atomic_batch.get("key")
176
- >>> atomic_result = await cluster_client.exec(atomic_batch, false)
177
- >>> print(f"Atomic Batch Result: {atomic_result}")
178
- # Expected Output: Atomic Batch Result: [OK, 2, 2]
145
+ See Also:
146
+ [Valkey Transactions (Atomic Batches)](https://valkey.io/docs/topics/transactions/)
147
+ [Valkey Pipelines (Non-Atomic Batches)](https://valkey.io/docs/topics/pipelining/)
179
148
 
180
- # Example 2: Non-Atomic Batch (Pipeline)
181
- >>> non_atomic_batch = ClusterBatch(is_atomic=False) # Non-Atomic (Pipeline)
182
- >>> non_atomic_batch.set("key1", "value1")
183
- >>> non_atomic_batch.set("key2", "value2")
184
- >>> non_atomic_batch.get("key1")
185
- >>> non_atomic_batch.get("key2")
186
- >>> non_atomic_result = await cluster_client.exec(non_atomic_batch, false)
187
- >>> print(f"Non-Atomic Batch Result: {non_atomic_result}")
188
- # Expected Output: Non-Atomic Batch Result: [OK, OK, value1, value2]
189
-
190
- # Example 3: Atomic batch with options
149
+ Examples:
150
+ # Atomic batch (transaction): all keys must share the same hash slot
151
+ >>> options = ClusterBatchOptions(timeout=1000) # Set a timeout of 1000 milliseconds
191
152
  >>> atomic_batch = ClusterBatch(is_atomic=True)
192
153
  >>> atomic_batch.set("key", "1")
193
154
  >>> atomic_batch.incr("key")
194
155
  >>> atomic_batch.get("key")
195
- >>> atomic_result = await cluster_client.exec(
196
- ... atomic_batch,
197
- ... timeout=1000, # Set a timeout of 1000 milliseconds
198
- ... raise_on_error=False # Do not raise an error on failure
199
- ... )
156
+ >>> atomic_result = await cluster_client.exec(atomic_batch, False, options)
200
157
  >>> print(f"Atomic Batch Result: {atomic_result}")
201
158
  # Output: Atomic Batch Result: [OK, 2, 2]
202
159
 
203
- # Example 4: Non-atomic batch with retry options
160
+ # Non-atomic batch (pipeline): keys may span different hash slots
161
+ >>> retry_strategy = BatchRetryStrategy(retry_server_error=True, retry_connection_error=False)
162
+ >>> pipeline_options = ClusterBatchOptions(retry_strategy=retry_strategy)
204
163
  >>> non_atomic_batch = ClusterBatch(is_atomic=False)
205
164
  >>> non_atomic_batch.set("key1", "value1")
206
165
  >>> non_atomic_batch.set("key2", "value2")
207
166
  >>> non_atomic_batch.get("key1")
208
167
  >>> non_atomic_batch.get("key2")
209
- >>> non_atomic_result = await cluster_client.exec(
210
- ... non_atomic_batch,
211
- ... raise_on_error=False,
212
- ... retry_server_error=True,
213
- ... retry_connection_error=False
214
- ... )
168
+ >>> non_atomic_result = await cluster_client.exec(non_atomic_batch, False, pipeline_options)
215
169
  >>> print(f"Non-Atomic Batch Result: {non_atomic_result}")
216
170
  # Output: Non-Atomic Batch Result: [OK, OK, value1, value2]
217
171
  """
218
172
  commands = batch.commands[:]
173
+
174
+ if (
175
+ batch.is_atomic
176
+ and options
177
+ and options.retry_strategy
178
+ and (
179
+ options.retry_strategy.retry_server_error
180
+ or options.retry_strategy.retry_connection_error
181
+ )
182
+ ):
183
+ raise RequestError(
184
+ "Retry strategies are not supported for atomic batches (transactions). "
185
+ )
186
+
187
+ # Extract values to make the _execute_batch call cleaner
188
+ retry_server_error = (
189
+ options.retry_strategy.retry_server_error
190
+ if options and options.retry_strategy
191
+ else False
192
+ )
193
+ retry_connection_error = (
194
+ options.retry_strategy.retry_connection_error
195
+ if options and options.retry_strategy
196
+ else False
197
+ )
198
+ route = options.route if options else None
199
+ timeout = options.timeout if options else None
200
+
219
201
  return await self._execute_batch(
220
202
  commands,
221
203
  batch.is_atomic,
@@ -2536,8 +2536,8 @@ class CoreCommands(Protocol):
2536
2536
  Returns:
2537
2537
  TOK: A simple "OK" response.
2538
2538
 
2539
- If `start` exceeds the end of the list, or if `start` is greater than `end`, the result will be an empty list
2540
- (which causes `key` to be removed).
2539
+ If `start` exceeds the end of the list, or if `start` is greater than `end`, the list is emptied
2540
+ and the key is removed.
2541
2541
 
2542
2542
  If `end` exceeds the actual end of the list, it will be treated like the last element of the list.
2543
2543
 
@@ -2555,9 +2555,6 @@ class CoreCommands(Protocol):
2555
2555
  async def lrem(self, key: TEncodable, count: int, element: TEncodable) -> int:
2556
2556
  """
2557
2557
  Removes the first `count` occurrences of elements equal to `element` from the list stored at `key`.
2558
- If `count` is positive, it removes elements equal to `element` moving from head to tail.
2559
- If `count` is negative, it removes elements equal to `element` moving from tail to head.
2560
- If `count` is 0 or greater than the occurrences of elements equal to `element`, it removes all elements
2561
2558
  equal to `element`.
2562
2559
 
2563
2560
  See [valkey.io](https://valkey.io/commands/lrem/) for more details.
@@ -2565,6 +2562,11 @@ class CoreCommands(Protocol):
2565
2562
  Args:
2566
2563
  key (TEncodable): The key of the list.
2567
2564
  count (int): The count of occurrences of elements equal to `element` to remove.
2565
+
2566
+ - If `count` is positive, it removes elements equal to `element` moving from head to tail.
2567
+ - If `count` is negative, it removes elements equal to `element` moving from tail to head.
2568
+ - If `count` is 0 or greater than the occurrences of elements equal to `element`, it removes all elements
2569
+
2568
2570
  element (TEncodable): The element to remove from the list.
2569
2571
 
2570
2572
  Returns:
@@ -6049,19 +6051,19 @@ class CoreCommands(Protocol):
6049
6051
  elements (List[TEncodable]): A list of members to add to the HyperLogLog stored at `key`.
6050
6052
 
6051
6053
  Returns:
6052
- int: If the HyperLogLog is newly created, or if the HyperLogLog approximated cardinality is
6053
- altered, then returns 1.
6054
+ bool: If the HyperLogLog is newly created, or if the HyperLogLog approximated cardinality is
6055
+ altered, then returns `True`.
6054
6056
 
6055
- Otherwise, returns 0.
6057
+ Otherwise, returns `False`.
6056
6058
 
6057
6059
  Examples:
6058
6060
  >>> await client.pfadd("hll_1", ["a", "b", "c" ])
6059
- 1 # A data structure was created or modified
6061
+ True # A data structure was created or modified
6060
6062
  >>> await client.pfadd("hll_2", [])
6061
- 1 # A new empty data structure was created
6063
+ True # A new empty data structure was created
6062
6064
  """
6063
6065
  return cast(
6064
- int,
6066
+ bool,
6065
6067
  await self._execute_command(RequestType.PfAdd, [key] + elements),
6066
6068
  )
6067
6069