redis 6.4.0__py3-none-any.whl → 7.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. redis/__init__.py +1 -1
  2. redis/_parsers/base.py +193 -8
  3. redis/_parsers/helpers.py +64 -6
  4. redis/_parsers/hiredis.py +16 -10
  5. redis/_parsers/resp3.py +11 -5
  6. redis/asyncio/client.py +65 -8
  7. redis/asyncio/cluster.py +57 -5
  8. redis/asyncio/connection.py +62 -2
  9. redis/asyncio/http/__init__.py +0 -0
  10. redis/asyncio/http/http_client.py +265 -0
  11. redis/asyncio/multidb/__init__.py +0 -0
  12. redis/asyncio/multidb/client.py +530 -0
  13. redis/asyncio/multidb/command_executor.py +339 -0
  14. redis/asyncio/multidb/config.py +210 -0
  15. redis/asyncio/multidb/database.py +69 -0
  16. redis/asyncio/multidb/event.py +84 -0
  17. redis/asyncio/multidb/failover.py +125 -0
  18. redis/asyncio/multidb/failure_detector.py +38 -0
  19. redis/asyncio/multidb/healthcheck.py +285 -0
  20. redis/background.py +204 -0
  21. redis/cache.py +1 -0
  22. redis/client.py +97 -16
  23. redis/cluster.py +14 -3
  24. redis/commands/core.py +348 -313
  25. redis/commands/helpers.py +0 -20
  26. redis/commands/json/commands.py +2 -2
  27. redis/commands/search/__init__.py +2 -2
  28. redis/commands/search/aggregation.py +24 -26
  29. redis/commands/search/commands.py +10 -10
  30. redis/commands/search/field.py +2 -2
  31. redis/commands/search/query.py +23 -23
  32. redis/commands/vectorset/__init__.py +1 -1
  33. redis/commands/vectorset/commands.py +43 -25
  34. redis/commands/vectorset/utils.py +40 -4
  35. redis/connection.py +1257 -83
  36. redis/data_structure.py +81 -0
  37. redis/event.py +84 -10
  38. redis/exceptions.py +8 -0
  39. redis/http/__init__.py +0 -0
  40. redis/http/http_client.py +425 -0
  41. redis/maint_notifications.py +810 -0
  42. redis/multidb/__init__.py +0 -0
  43. redis/multidb/circuit.py +144 -0
  44. redis/multidb/client.py +526 -0
  45. redis/multidb/command_executor.py +350 -0
  46. redis/multidb/config.py +207 -0
  47. redis/multidb/database.py +130 -0
  48. redis/multidb/event.py +89 -0
  49. redis/multidb/exception.py +17 -0
  50. redis/multidb/failover.py +125 -0
  51. redis/multidb/failure_detector.py +104 -0
  52. redis/multidb/healthcheck.py +282 -0
  53. redis/retry.py +14 -1
  54. redis/utils.py +34 -0
  55. {redis-6.4.0.dist-info → redis-7.0.0.dist-info}/METADATA +7 -4
  56. redis-7.0.0.dist-info/RECORD +105 -0
  57. redis-6.4.0.dist-info/RECORD +0 -78
  58. {redis-6.4.0.dist-info → redis-7.0.0.dist-info}/WHEEL +0 -0
  59. {redis-6.4.0.dist-info → redis-7.0.0.dist-info}/licenses/LICENSE +0 -0
File without changes
@@ -0,0 +1,144 @@
1
+ from abc import ABC, abstractmethod
2
+ from enum import Enum
3
+ from typing import Callable
4
+
5
+ import pybreaker
6
+
7
+ DEFAULT_GRACE_PERIOD = 60
8
+
9
+
10
+ class State(Enum):
11
+ CLOSED = "closed"
12
+ OPEN = "open"
13
+ HALF_OPEN = "half-open"
14
+
15
+
16
+ class CircuitBreaker(ABC):
17
+ @property
18
+ @abstractmethod
19
+ def grace_period(self) -> float:
20
+ """The grace period in seconds when the circle should be kept open."""
21
+ pass
22
+
23
+ @grace_period.setter
24
+ @abstractmethod
25
+ def grace_period(self, grace_period: float):
26
+ """Set the grace period in seconds."""
27
+
28
+ @property
29
+ @abstractmethod
30
+ def state(self) -> State:
31
+ """The current state of the circuit."""
32
+ pass
33
+
34
+ @state.setter
35
+ @abstractmethod
36
+ def state(self, state: State):
37
+ """Set current state of the circuit."""
38
+ pass
39
+
40
+ @property
41
+ @abstractmethod
42
+ def database(self):
43
+ """Database associated with this circuit."""
44
+ pass
45
+
46
+ @database.setter
47
+ @abstractmethod
48
+ def database(self, database):
49
+ """Set database associated with this circuit."""
50
+ pass
51
+
52
+ @abstractmethod
53
+ def on_state_changed(self, cb: Callable[["CircuitBreaker", State, State], None]):
54
+ """Callback called when the state of the circuit changes."""
55
+ pass
56
+
57
+
58
+ class BaseCircuitBreaker(CircuitBreaker):
59
+ """
60
+ Base implementation of Circuit Breaker interface.
61
+ """
62
+
63
+ def __init__(self, cb: pybreaker.CircuitBreaker):
64
+ self._cb = cb
65
+ self._state_pb_mapper = {
66
+ State.CLOSED: self._cb.close,
67
+ State.OPEN: self._cb.open,
68
+ State.HALF_OPEN: self._cb.half_open,
69
+ }
70
+ self._database = None
71
+
72
+ @property
73
+ def grace_period(self) -> float:
74
+ return self._cb.reset_timeout
75
+
76
+ @grace_period.setter
77
+ def grace_period(self, grace_period: float):
78
+ self._cb.reset_timeout = grace_period
79
+
80
+ @property
81
+ def state(self) -> State:
82
+ return State(value=self._cb.state.name)
83
+
84
+ @state.setter
85
+ def state(self, state: State):
86
+ self._state_pb_mapper[state]()
87
+
88
+ @property
89
+ def database(self):
90
+ return self._database
91
+
92
+ @database.setter
93
+ def database(self, database):
94
+ self._database = database
95
+
96
+ @abstractmethod
97
+ def on_state_changed(self, cb: Callable[["CircuitBreaker", State, State], None]):
98
+ """Callback called when the state of the circuit changes."""
99
+ pass
100
+
101
+
102
+ class PBListener(pybreaker.CircuitBreakerListener):
103
+ """Wrapper for callback to be compatible with pybreaker implementation."""
104
+
105
+ def __init__(
106
+ self,
107
+ cb: Callable[[CircuitBreaker, State, State], None],
108
+ database,
109
+ ):
110
+ """
111
+ Initialize a PBListener instance.
112
+
113
+ Args:
114
+ cb: Callback function that will be called when the circuit breaker state changes.
115
+ database: Database instance associated with this circuit breaker.
116
+ """
117
+
118
+ self._cb = cb
119
+ self._database = database
120
+
121
+ def state_change(self, cb, old_state, new_state):
122
+ cb = PBCircuitBreakerAdapter(cb)
123
+ cb.database = self._database
124
+ old_state = State(value=old_state.name)
125
+ new_state = State(value=new_state.name)
126
+ self._cb(cb, old_state, new_state)
127
+
128
+
129
+ class PBCircuitBreakerAdapter(BaseCircuitBreaker):
130
+ def __init__(self, cb: pybreaker.CircuitBreaker):
131
+ """
132
+ Initialize a PBCircuitBreakerAdapter instance.
133
+
134
+ This adapter wraps pybreaker's CircuitBreaker implementation to make it compatible
135
+ with our CircuitBreaker interface.
136
+
137
+ Args:
138
+ cb: A pybreaker CircuitBreaker instance to be adapted.
139
+ """
140
+ super().__init__(cb)
141
+
142
+ def on_state_changed(self, cb: Callable[["CircuitBreaker", State, State], None]):
143
+ listener = PBListener(cb, self.database)
144
+ self._cb.add_listener(listener)
@@ -0,0 +1,526 @@
1
+ import logging
2
+ import threading
3
+ from concurrent.futures import as_completed
4
+ from concurrent.futures.thread import ThreadPoolExecutor
5
+ from typing import Any, Callable, List, Optional
6
+
7
+ from redis.background import BackgroundScheduler
8
+ from redis.client import PubSubWorkerThread
9
+ from redis.commands import CoreCommands, RedisModuleCommands
10
+ from redis.multidb.circuit import CircuitBreaker
11
+ from redis.multidb.circuit import State as CBState
12
+ from redis.multidb.command_executor import DefaultCommandExecutor
13
+ from redis.multidb.config import DEFAULT_GRACE_PERIOD, MultiDbConfig
14
+ from redis.multidb.database import Database, Databases, SyncDatabase
15
+ from redis.multidb.exception import NoValidDatabaseException, UnhealthyDatabaseException
16
+ from redis.multidb.failure_detector import FailureDetector
17
+ from redis.multidb.healthcheck import HealthCheck, HealthCheckPolicy
18
+ from redis.utils import experimental
19
+
20
+ logger = logging.getLogger(__name__)
21
+
22
+
23
+ @experimental
24
+ class MultiDBClient(RedisModuleCommands, CoreCommands):
25
+ """
26
+ Client that operates on multiple logical Redis databases.
27
+ Should be used in Active-Active database setups.
28
+ """
29
+
30
+ def __init__(self, config: MultiDbConfig):
31
+ self._databases = config.databases()
32
+ self._health_checks = config.default_health_checks()
33
+
34
+ if config.health_checks is not None:
35
+ self._health_checks.extend(config.health_checks)
36
+
37
+ self._health_check_interval = config.health_check_interval
38
+ self._health_check_policy: HealthCheckPolicy = config.health_check_policy.value(
39
+ config.health_check_probes, config.health_check_probes_delay
40
+ )
41
+ self._failure_detectors = config.default_failure_detectors()
42
+
43
+ if config.failure_detectors is not None:
44
+ self._failure_detectors.extend(config.failure_detectors)
45
+
46
+ self._failover_strategy = (
47
+ config.default_failover_strategy()
48
+ if config.failover_strategy is None
49
+ else config.failover_strategy
50
+ )
51
+ self._failover_strategy.set_databases(self._databases)
52
+ self._auto_fallback_interval = config.auto_fallback_interval
53
+ self._event_dispatcher = config.event_dispatcher
54
+ self._command_retry = config.command_retry
55
+ self._command_retry.update_supported_errors((ConnectionRefusedError,))
56
+ self.command_executor = DefaultCommandExecutor(
57
+ failure_detectors=self._failure_detectors,
58
+ databases=self._databases,
59
+ command_retry=self._command_retry,
60
+ failover_strategy=self._failover_strategy,
61
+ failover_attempts=config.failover_attempts,
62
+ failover_delay=config.failover_delay,
63
+ event_dispatcher=self._event_dispatcher,
64
+ auto_fallback_interval=self._auto_fallback_interval,
65
+ )
66
+ self.initialized = False
67
+ self._hc_lock = threading.RLock()
68
+ self._bg_scheduler = BackgroundScheduler()
69
+ self._config = config
70
+
71
+ def initialize(self):
72
+ """
73
+ Perform initialization of databases to define their initial state.
74
+ """
75
+
76
+ def raise_exception_on_failed_hc(error):
77
+ raise error
78
+
79
+ # Initial databases check to define initial state
80
+ self._check_databases_health(on_error=raise_exception_on_failed_hc)
81
+
82
+ # Starts recurring health checks on the background.
83
+ self._bg_scheduler.run_recurring(
84
+ self._health_check_interval,
85
+ self._check_databases_health,
86
+ )
87
+
88
+ is_active_db_found = False
89
+
90
+ for database, weight in self._databases:
91
+ # Set on state changed callback for each circuit.
92
+ database.circuit.on_state_changed(self._on_circuit_state_change_callback)
93
+
94
+ # Set states according to a weights and circuit state
95
+ if database.circuit.state == CBState.CLOSED and not is_active_db_found:
96
+ self.command_executor.active_database = database
97
+ is_active_db_found = True
98
+
99
+ if not is_active_db_found:
100
+ raise NoValidDatabaseException(
101
+ "Initial connection failed - no active database found"
102
+ )
103
+
104
+ self.initialized = True
105
+
106
+ def get_databases(self) -> Databases:
107
+ """
108
+ Returns a sorted (by weight) list of all databases.
109
+ """
110
+ return self._databases
111
+
112
+ def set_active_database(self, database: SyncDatabase) -> None:
113
+ """
114
+ Promote one of the existing databases to become an active.
115
+ """
116
+ exists = None
117
+
118
+ for existing_db, _ in self._databases:
119
+ if existing_db == database:
120
+ exists = True
121
+ break
122
+
123
+ if not exists:
124
+ raise ValueError("Given database is not a member of database list")
125
+
126
+ self._check_db_health(database)
127
+
128
+ if database.circuit.state == CBState.CLOSED:
129
+ highest_weighted_db, _ = self._databases.get_top_n(1)[0]
130
+ self.command_executor.active_database = database
131
+ return
132
+
133
+ raise NoValidDatabaseException(
134
+ "Cannot set active database, database is unhealthy"
135
+ )
136
+
137
+ def add_database(self, database: SyncDatabase):
138
+ """
139
+ Adds a new database to the database list.
140
+ """
141
+ for existing_db, _ in self._databases:
142
+ if existing_db == database:
143
+ raise ValueError("Given database already exists")
144
+
145
+ self._check_db_health(database)
146
+
147
+ highest_weighted_db, highest_weight = self._databases.get_top_n(1)[0]
148
+ self._databases.add(database, database.weight)
149
+ self._change_active_database(database, highest_weighted_db)
150
+
151
+ def _change_active_database(
152
+ self, new_database: SyncDatabase, highest_weight_database: SyncDatabase
153
+ ):
154
+ if (
155
+ new_database.weight > highest_weight_database.weight
156
+ and new_database.circuit.state == CBState.CLOSED
157
+ ):
158
+ self.command_executor.active_database = new_database
159
+
160
+ def remove_database(self, database: Database):
161
+ """
162
+ Removes a database from the database list.
163
+ """
164
+ weight = self._databases.remove(database)
165
+ highest_weighted_db, highest_weight = self._databases.get_top_n(1)[0]
166
+
167
+ if (
168
+ highest_weight <= weight
169
+ and highest_weighted_db.circuit.state == CBState.CLOSED
170
+ ):
171
+ self.command_executor.active_database = highest_weighted_db
172
+
173
+ def update_database_weight(self, database: SyncDatabase, weight: float):
174
+ """
175
+ Updates a database from the database list.
176
+ """
177
+ exists = None
178
+
179
+ for existing_db, _ in self._databases:
180
+ if existing_db == database:
181
+ exists = True
182
+ break
183
+
184
+ if not exists:
185
+ raise ValueError("Given database is not a member of database list")
186
+
187
+ highest_weighted_db, highest_weight = self._databases.get_top_n(1)[0]
188
+ self._databases.update_weight(database, weight)
189
+ database.weight = weight
190
+ self._change_active_database(database, highest_weighted_db)
191
+
192
+ def add_failure_detector(self, failure_detector: FailureDetector):
193
+ """
194
+ Adds a new failure detector to the database.
195
+ """
196
+ self._failure_detectors.append(failure_detector)
197
+
198
+ def add_health_check(self, healthcheck: HealthCheck):
199
+ """
200
+ Adds a new health check to the database.
201
+ """
202
+ with self._hc_lock:
203
+ self._health_checks.append(healthcheck)
204
+
205
+ def execute_command(self, *args, **options):
206
+ """
207
+ Executes a single command and return its result.
208
+ """
209
+ if not self.initialized:
210
+ self.initialize()
211
+
212
+ return self.command_executor.execute_command(*args, **options)
213
+
214
+ def pipeline(self):
215
+ """
216
+ Enters into pipeline mode of the client.
217
+ """
218
+ return Pipeline(self)
219
+
220
+ def transaction(self, func: Callable[["Pipeline"], None], *watches, **options):
221
+ """
222
+ Executes callable as transaction.
223
+ """
224
+ if not self.initialized:
225
+ self.initialize()
226
+
227
+ return self.command_executor.execute_transaction(func, *watches, *options)
228
+
229
+ def pubsub(self, **kwargs):
230
+ """
231
+ Return a Publish/Subscribe object. With this object, you can
232
+ subscribe to channels and listen for messages that get published to
233
+ them.
234
+ """
235
+ if not self.initialized:
236
+ self.initialize()
237
+
238
+ return PubSub(self, **kwargs)
239
+
240
+ def _check_db_health(self, database: SyncDatabase) -> bool:
241
+ """
242
+ Runs health checks on the given database until first failure.
243
+ """
244
+ # Health check will setup circuit state
245
+ is_healthy = self._health_check_policy.execute(self._health_checks, database)
246
+
247
+ if not is_healthy:
248
+ if database.circuit.state != CBState.OPEN:
249
+ database.circuit.state = CBState.OPEN
250
+ return is_healthy
251
+ elif is_healthy and database.circuit.state != CBState.CLOSED:
252
+ database.circuit.state = CBState.CLOSED
253
+
254
+ return is_healthy
255
+
256
+ def _check_databases_health(self, on_error: Callable[[Exception], None] = None):
257
+ """
258
+ Runs health checks as a recurring task.
259
+ Runs health checks against all databases.
260
+ """
261
+ with ThreadPoolExecutor(max_workers=len(self._databases)) as executor:
262
+ # Submit all health checks
263
+ futures = {
264
+ executor.submit(self._check_db_health, database)
265
+ for database, _ in self._databases
266
+ }
267
+
268
+ try:
269
+ for future in as_completed(
270
+ futures, timeout=self._health_check_interval
271
+ ):
272
+ try:
273
+ future.result()
274
+ except UnhealthyDatabaseException as e:
275
+ unhealthy_db = e.database
276
+ unhealthy_db.circuit.state = CBState.OPEN
277
+
278
+ logger.exception(
279
+ "Health check failed, due to exception",
280
+ exc_info=e.original_exception,
281
+ )
282
+
283
+ if on_error:
284
+ on_error(e.original_exception)
285
+ except TimeoutError:
286
+ raise TimeoutError(
287
+ "Health check execution exceeds health_check_interval"
288
+ )
289
+
290
+ def _on_circuit_state_change_callback(
291
+ self, circuit: CircuitBreaker, old_state: CBState, new_state: CBState
292
+ ):
293
+ if new_state == CBState.HALF_OPEN:
294
+ self._check_db_health(circuit.database)
295
+ return
296
+
297
+ if old_state == CBState.CLOSED and new_state == CBState.OPEN:
298
+ self._bg_scheduler.run_once(
299
+ DEFAULT_GRACE_PERIOD, _half_open_circuit, circuit
300
+ )
301
+
302
+ def close(self):
303
+ self.command_executor.active_database.client.close()
304
+
305
+
306
+ def _half_open_circuit(circuit: CircuitBreaker):
307
+ circuit.state = CBState.HALF_OPEN
308
+
309
+
310
+ class Pipeline(RedisModuleCommands, CoreCommands):
311
+ """
312
+ Pipeline implementation for multiple logical Redis databases.
313
+ """
314
+
315
+ def __init__(self, client: MultiDBClient):
316
+ self._command_stack = []
317
+ self._client = client
318
+
319
+ def __enter__(self) -> "Pipeline":
320
+ return self
321
+
322
+ def __exit__(self, exc_type, exc_value, traceback):
323
+ self.reset()
324
+
325
+ def __del__(self):
326
+ try:
327
+ self.reset()
328
+ except Exception:
329
+ pass
330
+
331
+ def __len__(self) -> int:
332
+ return len(self._command_stack)
333
+
334
+ def __bool__(self) -> bool:
335
+ """Pipeline instances should always evaluate to True"""
336
+ return True
337
+
338
+ def reset(self) -> None:
339
+ self._command_stack = []
340
+
341
+ def close(self) -> None:
342
+ """Close the pipeline"""
343
+ self.reset()
344
+
345
+ def pipeline_execute_command(self, *args, **options) -> "Pipeline":
346
+ """
347
+ Stage a command to be executed when execute() is next called
348
+
349
+ Returns the current Pipeline object back so commands can be
350
+ chained together, such as:
351
+
352
+ pipe = pipe.set('foo', 'bar').incr('baz').decr('bang')
353
+
354
+ At some other point, you can then run: pipe.execute(),
355
+ which will execute all commands queued in the pipe.
356
+ """
357
+ self._command_stack.append((args, options))
358
+ return self
359
+
360
+ def execute_command(self, *args, **kwargs):
361
+ """Adds a command to the stack"""
362
+ return self.pipeline_execute_command(*args, **kwargs)
363
+
364
+ def execute(self) -> List[Any]:
365
+ """Execute all the commands in the current pipeline"""
366
+ if not self._client.initialized:
367
+ self._client.initialize()
368
+
369
+ try:
370
+ return self._client.command_executor.execute_pipeline(
371
+ tuple(self._command_stack)
372
+ )
373
+ finally:
374
+ self.reset()
375
+
376
+
377
+ class PubSub:
378
+ """
379
+ PubSub object for multi database client.
380
+ """
381
+
382
+ def __init__(self, client: MultiDBClient, **kwargs):
383
+ """Initialize the PubSub object for a multi-database client.
384
+
385
+ Args:
386
+ client: MultiDBClient instance to use for pub/sub operations
387
+ **kwargs: Additional keyword arguments to pass to the underlying pubsub implementation
388
+ """
389
+
390
+ self._client = client
391
+ self._client.command_executor.pubsub(**kwargs)
392
+
393
+ def __enter__(self) -> "PubSub":
394
+ return self
395
+
396
+ def __del__(self) -> None:
397
+ try:
398
+ # if this object went out of scope prior to shutting down
399
+ # subscriptions, close the connection manually before
400
+ # returning it to the connection pool
401
+ self.reset()
402
+ except Exception:
403
+ pass
404
+
405
+ def reset(self) -> None:
406
+ return self._client.command_executor.execute_pubsub_method("reset")
407
+
408
+ def close(self) -> None:
409
+ self.reset()
410
+
411
+ @property
412
+ def subscribed(self) -> bool:
413
+ return self._client.command_executor.active_pubsub.subscribed
414
+
415
+ def execute_command(self, *args):
416
+ return self._client.command_executor.execute_pubsub_method(
417
+ "execute_command", *args
418
+ )
419
+
420
+ def psubscribe(self, *args, **kwargs):
421
+ """
422
+ Subscribe to channel patterns. Patterns supplied as keyword arguments
423
+ expect a pattern name as the key and a callable as the value. A
424
+ pattern's callable will be invoked automatically when a message is
425
+ received on that pattern rather than producing a message via
426
+ ``listen()``.
427
+ """
428
+ return self._client.command_executor.execute_pubsub_method(
429
+ "psubscribe", *args, **kwargs
430
+ )
431
+
432
+ def punsubscribe(self, *args):
433
+ """
434
+ Unsubscribe from the supplied patterns. If empty, unsubscribe from
435
+ all patterns.
436
+ """
437
+ return self._client.command_executor.execute_pubsub_method(
438
+ "punsubscribe", *args
439
+ )
440
+
441
+ def subscribe(self, *args, **kwargs):
442
+ """
443
+ Subscribe to channels. Channels supplied as keyword arguments expect
444
+ a channel name as the key and a callable as the value. A channel's
445
+ callable will be invoked automatically when a message is received on
446
+ that channel rather than producing a message via ``listen()`` or
447
+ ``get_message()``.
448
+ """
449
+ return self._client.command_executor.execute_pubsub_method(
450
+ "subscribe", *args, **kwargs
451
+ )
452
+
453
+ def unsubscribe(self, *args):
454
+ """
455
+ Unsubscribe from the supplied channels. If empty, unsubscribe from
456
+ all channels
457
+ """
458
+ return self._client.command_executor.execute_pubsub_method("unsubscribe", *args)
459
+
460
+ def ssubscribe(self, *args, **kwargs):
461
+ """
462
+ Subscribes the client to the specified shard channels.
463
+ Channels supplied as keyword arguments expect a channel name as the key
464
+ and a callable as the value. A channel's callable will be invoked automatically
465
+ when a message is received on that channel rather than producing a message via
466
+ ``listen()`` or ``get_sharded_message()``.
467
+ """
468
+ return self._client.command_executor.execute_pubsub_method(
469
+ "ssubscribe", *args, **kwargs
470
+ )
471
+
472
+ def sunsubscribe(self, *args):
473
+ """
474
+ Unsubscribe from the supplied shard_channels. If empty, unsubscribe from
475
+ all shard_channels
476
+ """
477
+ return self._client.command_executor.execute_pubsub_method(
478
+ "sunsubscribe", *args
479
+ )
480
+
481
+ def get_message(
482
+ self, ignore_subscribe_messages: bool = False, timeout: float = 0.0
483
+ ):
484
+ """
485
+ Get the next message if one is available, otherwise None.
486
+
487
+ If timeout is specified, the system will wait for `timeout` seconds
488
+ before returning. Timeout should be specified as a floating point
489
+ number, or None, to wait indefinitely.
490
+ """
491
+ return self._client.command_executor.execute_pubsub_method(
492
+ "get_message",
493
+ ignore_subscribe_messages=ignore_subscribe_messages,
494
+ timeout=timeout,
495
+ )
496
+
497
+ def get_sharded_message(
498
+ self, ignore_subscribe_messages: bool = False, timeout: float = 0.0
499
+ ):
500
+ """
501
+ Get the next message if one is available in a sharded channel, otherwise None.
502
+
503
+ If timeout is specified, the system will wait for `timeout` seconds
504
+ before returning. Timeout should be specified as a floating point
505
+ number, or None, to wait indefinitely.
506
+ """
507
+ return self._client.command_executor.execute_pubsub_method(
508
+ "get_sharded_message",
509
+ ignore_subscribe_messages=ignore_subscribe_messages,
510
+ timeout=timeout,
511
+ )
512
+
513
+ def run_in_thread(
514
+ self,
515
+ sleep_time: float = 0.0,
516
+ daemon: bool = False,
517
+ exception_handler: Optional[Callable] = None,
518
+ sharded_pubsub: bool = False,
519
+ ) -> "PubSubWorkerThread":
520
+ return self._client.command_executor.execute_pubsub_run(
521
+ sleep_time,
522
+ daemon=daemon,
523
+ exception_handler=exception_handler,
524
+ pubsub=self,
525
+ sharded_pubsub=sharded_pubsub,
526
+ )