redis 7.0.0b2__py3-none-any.whl → 7.0.0b3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. redis/__init__.py +1 -1
  2. redis/asyncio/client.py +14 -5
  3. redis/asyncio/cluster.py +5 -1
  4. redis/asyncio/connection.py +18 -0
  5. redis/asyncio/http/__init__.py +0 -0
  6. redis/asyncio/http/http_client.py +265 -0
  7. redis/asyncio/multidb/__init__.py +0 -0
  8. redis/asyncio/multidb/client.py +528 -0
  9. redis/asyncio/multidb/command_executor.py +339 -0
  10. redis/asyncio/multidb/config.py +210 -0
  11. redis/asyncio/multidb/database.py +69 -0
  12. redis/asyncio/multidb/event.py +84 -0
  13. redis/asyncio/multidb/failover.py +125 -0
  14. redis/asyncio/multidb/failure_detector.py +38 -0
  15. redis/asyncio/multidb/healthcheck.py +292 -0
  16. redis/background.py +204 -0
  17. redis/client.py +22 -3
  18. redis/cluster.py +3 -1
  19. redis/commands/core.py +10 -3
  20. redis/data_structure.py +81 -0
  21. redis/event.py +84 -10
  22. redis/http/__init__.py +0 -0
  23. redis/http/http_client.py +425 -0
  24. redis/multidb/__init__.py +0 -0
  25. redis/multidb/circuit.py +144 -0
  26. redis/multidb/client.py +524 -0
  27. redis/multidb/command_executor.py +350 -0
  28. redis/multidb/config.py +207 -0
  29. redis/multidb/database.py +130 -0
  30. redis/multidb/event.py +89 -0
  31. redis/multidb/exception.py +17 -0
  32. redis/multidb/failover.py +125 -0
  33. redis/multidb/failure_detector.py +104 -0
  34. redis/multidb/healthcheck.py +289 -0
  35. redis/retry.py +14 -1
  36. redis/utils.py +14 -0
  37. {redis-7.0.0b2.dist-info → redis-7.0.0b3.dist-info}/METADATA +3 -1
  38. {redis-7.0.0b2.dist-info → redis-7.0.0b3.dist-info}/RECORD +40 -14
  39. {redis-7.0.0b2.dist-info → redis-7.0.0b3.dist-info}/WHEEL +0 -0
  40. {redis-7.0.0b2.dist-info → redis-7.0.0b3.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,524 @@
1
+ import logging
2
+ import threading
3
+ from concurrent.futures import as_completed
4
+ from concurrent.futures.thread import ThreadPoolExecutor
5
+ from typing import Any, Callable, List, Optional
6
+
7
+ from redis.background import BackgroundScheduler
8
+ from redis.client import PubSubWorkerThread
9
+ from redis.commands import CoreCommands, RedisModuleCommands
10
+ from redis.multidb.circuit import CircuitBreaker
11
+ from redis.multidb.circuit import State as CBState
12
+ from redis.multidb.command_executor import DefaultCommandExecutor
13
+ from redis.multidb.config import DEFAULT_GRACE_PERIOD, MultiDbConfig
14
+ from redis.multidb.database import Database, Databases, SyncDatabase
15
+ from redis.multidb.exception import NoValidDatabaseException, UnhealthyDatabaseException
16
+ from redis.multidb.failure_detector import FailureDetector
17
+ from redis.multidb.healthcheck import HealthCheck, HealthCheckPolicy
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+
22
+ class MultiDBClient(RedisModuleCommands, CoreCommands):
23
+ """
24
+ Client that operates on multiple logical Redis databases.
25
+ Should be used in Active-Active database setups.
26
+ """
27
+
28
+ def __init__(self, config: MultiDbConfig):
29
+ self._databases = config.databases()
30
+ self._health_checks = config.default_health_checks()
31
+
32
+ if config.health_checks is not None:
33
+ self._health_checks.extend(config.health_checks)
34
+
35
+ self._health_check_interval = config.health_check_interval
36
+ self._health_check_policy: HealthCheckPolicy = config.health_check_policy.value(
37
+ config.health_check_probes, config.health_check_probes_delay
38
+ )
39
+ self._failure_detectors = config.default_failure_detectors()
40
+
41
+ if config.failure_detectors is not None:
42
+ self._failure_detectors.extend(config.failure_detectors)
43
+
44
+ self._failover_strategy = (
45
+ config.default_failover_strategy()
46
+ if config.failover_strategy is None
47
+ else config.failover_strategy
48
+ )
49
+ self._failover_strategy.set_databases(self._databases)
50
+ self._auto_fallback_interval = config.auto_fallback_interval
51
+ self._event_dispatcher = config.event_dispatcher
52
+ self._command_retry = config.command_retry
53
+ self._command_retry.update_supported_errors((ConnectionRefusedError,))
54
+ self.command_executor = DefaultCommandExecutor(
55
+ failure_detectors=self._failure_detectors,
56
+ databases=self._databases,
57
+ command_retry=self._command_retry,
58
+ failover_strategy=self._failover_strategy,
59
+ failover_attempts=config.failover_attempts,
60
+ failover_delay=config.failover_delay,
61
+ event_dispatcher=self._event_dispatcher,
62
+ auto_fallback_interval=self._auto_fallback_interval,
63
+ )
64
+ self.initialized = False
65
+ self._hc_lock = threading.RLock()
66
+ self._bg_scheduler = BackgroundScheduler()
67
+ self._config = config
68
+
69
+ def initialize(self):
70
+ """
71
+ Perform initialization of databases to define their initial state.
72
+ """
73
+
74
+ def raise_exception_on_failed_hc(error):
75
+ raise error
76
+
77
+ # Initial databases check to define initial state
78
+ self._check_databases_health(on_error=raise_exception_on_failed_hc)
79
+
80
+ # Starts recurring health checks on the background.
81
+ self._bg_scheduler.run_recurring(
82
+ self._health_check_interval,
83
+ self._check_databases_health,
84
+ )
85
+
86
+ is_active_db_found = False
87
+
88
+ for database, weight in self._databases:
89
+ # Set on state changed callback for each circuit.
90
+ database.circuit.on_state_changed(self._on_circuit_state_change_callback)
91
+
92
+ # Set states according to a weights and circuit state
93
+ if database.circuit.state == CBState.CLOSED and not is_active_db_found:
94
+ self.command_executor.active_database = database
95
+ is_active_db_found = True
96
+
97
+ if not is_active_db_found:
98
+ raise NoValidDatabaseException(
99
+ "Initial connection failed - no active database found"
100
+ )
101
+
102
+ self.initialized = True
103
+
104
+ def get_databases(self) -> Databases:
105
+ """
106
+ Returns a sorted (by weight) list of all databases.
107
+ """
108
+ return self._databases
109
+
110
+ def set_active_database(self, database: SyncDatabase) -> None:
111
+ """
112
+ Promote one of the existing databases to become an active.
113
+ """
114
+ exists = None
115
+
116
+ for existing_db, _ in self._databases:
117
+ if existing_db == database:
118
+ exists = True
119
+ break
120
+
121
+ if not exists:
122
+ raise ValueError("Given database is not a member of database list")
123
+
124
+ self._check_db_health(database)
125
+
126
+ if database.circuit.state == CBState.CLOSED:
127
+ highest_weighted_db, _ = self._databases.get_top_n(1)[0]
128
+ self.command_executor.active_database = database
129
+ return
130
+
131
+ raise NoValidDatabaseException(
132
+ "Cannot set active database, database is unhealthy"
133
+ )
134
+
135
+ def add_database(self, database: SyncDatabase):
136
+ """
137
+ Adds a new database to the database list.
138
+ """
139
+ for existing_db, _ in self._databases:
140
+ if existing_db == database:
141
+ raise ValueError("Given database already exists")
142
+
143
+ self._check_db_health(database)
144
+
145
+ highest_weighted_db, highest_weight = self._databases.get_top_n(1)[0]
146
+ self._databases.add(database, database.weight)
147
+ self._change_active_database(database, highest_weighted_db)
148
+
149
+ def _change_active_database(
150
+ self, new_database: SyncDatabase, highest_weight_database: SyncDatabase
151
+ ):
152
+ if (
153
+ new_database.weight > highest_weight_database.weight
154
+ and new_database.circuit.state == CBState.CLOSED
155
+ ):
156
+ self.command_executor.active_database = new_database
157
+
158
+ def remove_database(self, database: Database):
159
+ """
160
+ Removes a database from the database list.
161
+ """
162
+ weight = self._databases.remove(database)
163
+ highest_weighted_db, highest_weight = self._databases.get_top_n(1)[0]
164
+
165
+ if (
166
+ highest_weight <= weight
167
+ and highest_weighted_db.circuit.state == CBState.CLOSED
168
+ ):
169
+ self.command_executor.active_database = highest_weighted_db
170
+
171
+ def update_database_weight(self, database: SyncDatabase, weight: float):
172
+ """
173
+ Updates a database from the database list.
174
+ """
175
+ exists = None
176
+
177
+ for existing_db, _ in self._databases:
178
+ if existing_db == database:
179
+ exists = True
180
+ break
181
+
182
+ if not exists:
183
+ raise ValueError("Given database is not a member of database list")
184
+
185
+ highest_weighted_db, highest_weight = self._databases.get_top_n(1)[0]
186
+ self._databases.update_weight(database, weight)
187
+ database.weight = weight
188
+ self._change_active_database(database, highest_weighted_db)
189
+
190
+ def add_failure_detector(self, failure_detector: FailureDetector):
191
+ """
192
+ Adds a new failure detector to the database.
193
+ """
194
+ self._failure_detectors.append(failure_detector)
195
+
196
+ def add_health_check(self, healthcheck: HealthCheck):
197
+ """
198
+ Adds a new health check to the database.
199
+ """
200
+ with self._hc_lock:
201
+ self._health_checks.append(healthcheck)
202
+
203
+ def execute_command(self, *args, **options):
204
+ """
205
+ Executes a single command and return its result.
206
+ """
207
+ if not self.initialized:
208
+ self.initialize()
209
+
210
+ return self.command_executor.execute_command(*args, **options)
211
+
212
+ def pipeline(self):
213
+ """
214
+ Enters into pipeline mode of the client.
215
+ """
216
+ return Pipeline(self)
217
+
218
+ def transaction(self, func: Callable[["Pipeline"], None], *watches, **options):
219
+ """
220
+ Executes callable as transaction.
221
+ """
222
+ if not self.initialized:
223
+ self.initialize()
224
+
225
+ return self.command_executor.execute_transaction(func, *watches, *options)
226
+
227
+ def pubsub(self, **kwargs):
228
+ """
229
+ Return a Publish/Subscribe object. With this object, you can
230
+ subscribe to channels and listen for messages that get published to
231
+ them.
232
+ """
233
+ if not self.initialized:
234
+ self.initialize()
235
+
236
+ return PubSub(self, **kwargs)
237
+
238
+ def _check_db_health(self, database: SyncDatabase) -> bool:
239
+ """
240
+ Runs health checks on the given database until first failure.
241
+ """
242
+ # Health check will setup circuit state
243
+ is_healthy = self._health_check_policy.execute(self._health_checks, database)
244
+
245
+ if not is_healthy:
246
+ if database.circuit.state != CBState.OPEN:
247
+ database.circuit.state = CBState.OPEN
248
+ return is_healthy
249
+ elif is_healthy and database.circuit.state != CBState.CLOSED:
250
+ database.circuit.state = CBState.CLOSED
251
+
252
+ return is_healthy
253
+
254
+ def _check_databases_health(self, on_error: Callable[[Exception], None] = None):
255
+ """
256
+ Runs health checks as a recurring task.
257
+ Runs health checks against all databases.
258
+ """
259
+ with ThreadPoolExecutor(max_workers=len(self._databases)) as executor:
260
+ # Submit all health checks
261
+ futures = {
262
+ executor.submit(self._check_db_health, database)
263
+ for database, _ in self._databases
264
+ }
265
+
266
+ try:
267
+ for future in as_completed(
268
+ futures, timeout=self._health_check_interval
269
+ ):
270
+ try:
271
+ future.result()
272
+ except UnhealthyDatabaseException as e:
273
+ unhealthy_db = e.database
274
+ unhealthy_db.circuit.state = CBState.OPEN
275
+
276
+ logger.exception(
277
+ "Health check failed, due to exception",
278
+ exc_info=e.original_exception,
279
+ )
280
+
281
+ if on_error:
282
+ on_error(e.original_exception)
283
+ except TimeoutError:
284
+ raise TimeoutError(
285
+ "Health check execution exceeds health_check_interval"
286
+ )
287
+
288
+ def _on_circuit_state_change_callback(
289
+ self, circuit: CircuitBreaker, old_state: CBState, new_state: CBState
290
+ ):
291
+ if new_state == CBState.HALF_OPEN:
292
+ self._check_db_health(circuit.database)
293
+ return
294
+
295
+ if old_state == CBState.CLOSED and new_state == CBState.OPEN:
296
+ self._bg_scheduler.run_once(
297
+ DEFAULT_GRACE_PERIOD, _half_open_circuit, circuit
298
+ )
299
+
300
+ def close(self):
301
+ self.command_executor.active_database.client.close()
302
+
303
+
304
+ def _half_open_circuit(circuit: CircuitBreaker):
305
+ circuit.state = CBState.HALF_OPEN
306
+
307
+
308
+ class Pipeline(RedisModuleCommands, CoreCommands):
309
+ """
310
+ Pipeline implementation for multiple logical Redis databases.
311
+ """
312
+
313
+ def __init__(self, client: MultiDBClient):
314
+ self._command_stack = []
315
+ self._client = client
316
+
317
+ def __enter__(self) -> "Pipeline":
318
+ return self
319
+
320
+ def __exit__(self, exc_type, exc_value, traceback):
321
+ self.reset()
322
+
323
+ def __del__(self):
324
+ try:
325
+ self.reset()
326
+ except Exception:
327
+ pass
328
+
329
+ def __len__(self) -> int:
330
+ return len(self._command_stack)
331
+
332
+ def __bool__(self) -> bool:
333
+ """Pipeline instances should always evaluate to True"""
334
+ return True
335
+
336
+ def reset(self) -> None:
337
+ self._command_stack = []
338
+
339
+ def close(self) -> None:
340
+ """Close the pipeline"""
341
+ self.reset()
342
+
343
+ def pipeline_execute_command(self, *args, **options) -> "Pipeline":
344
+ """
345
+ Stage a command to be executed when execute() is next called
346
+
347
+ Returns the current Pipeline object back so commands can be
348
+ chained together, such as:
349
+
350
+ pipe = pipe.set('foo', 'bar').incr('baz').decr('bang')
351
+
352
+ At some other point, you can then run: pipe.execute(),
353
+ which will execute all commands queued in the pipe.
354
+ """
355
+ self._command_stack.append((args, options))
356
+ return self
357
+
358
+ def execute_command(self, *args, **kwargs):
359
+ """Adds a command to the stack"""
360
+ return self.pipeline_execute_command(*args, **kwargs)
361
+
362
+ def execute(self) -> List[Any]:
363
+ """Execute all the commands in the current pipeline"""
364
+ if not self._client.initialized:
365
+ self._client.initialize()
366
+
367
+ try:
368
+ return self._client.command_executor.execute_pipeline(
369
+ tuple(self._command_stack)
370
+ )
371
+ finally:
372
+ self.reset()
373
+
374
+
375
+ class PubSub:
376
+ """
377
+ PubSub object for multi database client.
378
+ """
379
+
380
+ def __init__(self, client: MultiDBClient, **kwargs):
381
+ """Initialize the PubSub object for a multi-database client.
382
+
383
+ Args:
384
+ client: MultiDBClient instance to use for pub/sub operations
385
+ **kwargs: Additional keyword arguments to pass to the underlying pubsub implementation
386
+ """
387
+
388
+ self._client = client
389
+ self._client.command_executor.pubsub(**kwargs)
390
+
391
+ def __enter__(self) -> "PubSub":
392
+ return self
393
+
394
+ def __del__(self) -> None:
395
+ try:
396
+ # if this object went out of scope prior to shutting down
397
+ # subscriptions, close the connection manually before
398
+ # returning it to the connection pool
399
+ self.reset()
400
+ except Exception:
401
+ pass
402
+
403
+ def reset(self) -> None:
404
+ return self._client.command_executor.execute_pubsub_method("reset")
405
+
406
+ def close(self) -> None:
407
+ self.reset()
408
+
409
+ @property
410
+ def subscribed(self) -> bool:
411
+ return self._client.command_executor.active_pubsub.subscribed
412
+
413
+ def execute_command(self, *args):
414
+ return self._client.command_executor.execute_pubsub_method(
415
+ "execute_command", *args
416
+ )
417
+
418
+ def psubscribe(self, *args, **kwargs):
419
+ """
420
+ Subscribe to channel patterns. Patterns supplied as keyword arguments
421
+ expect a pattern name as the key and a callable as the value. A
422
+ pattern's callable will be invoked automatically when a message is
423
+ received on that pattern rather than producing a message via
424
+ ``listen()``.
425
+ """
426
+ return self._client.command_executor.execute_pubsub_method(
427
+ "psubscribe", *args, **kwargs
428
+ )
429
+
430
+ def punsubscribe(self, *args):
431
+ """
432
+ Unsubscribe from the supplied patterns. If empty, unsubscribe from
433
+ all patterns.
434
+ """
435
+ return self._client.command_executor.execute_pubsub_method(
436
+ "punsubscribe", *args
437
+ )
438
+
439
+ def subscribe(self, *args, **kwargs):
440
+ """
441
+ Subscribe to channels. Channels supplied as keyword arguments expect
442
+ a channel name as the key and a callable as the value. A channel's
443
+ callable will be invoked automatically when a message is received on
444
+ that channel rather than producing a message via ``listen()`` or
445
+ ``get_message()``.
446
+ """
447
+ return self._client.command_executor.execute_pubsub_method(
448
+ "subscribe", *args, **kwargs
449
+ )
450
+
451
+ def unsubscribe(self, *args):
452
+ """
453
+ Unsubscribe from the supplied channels. If empty, unsubscribe from
454
+ all channels
455
+ """
456
+ return self._client.command_executor.execute_pubsub_method("unsubscribe", *args)
457
+
458
+ def ssubscribe(self, *args, **kwargs):
459
+ """
460
+ Subscribes the client to the specified shard channels.
461
+ Channels supplied as keyword arguments expect a channel name as the key
462
+ and a callable as the value. A channel's callable will be invoked automatically
463
+ when a message is received on that channel rather than producing a message via
464
+ ``listen()`` or ``get_sharded_message()``.
465
+ """
466
+ return self._client.command_executor.execute_pubsub_method(
467
+ "ssubscribe", *args, **kwargs
468
+ )
469
+
470
+ def sunsubscribe(self, *args):
471
+ """
472
+ Unsubscribe from the supplied shard_channels. If empty, unsubscribe from
473
+ all shard_channels
474
+ """
475
+ return self._client.command_executor.execute_pubsub_method(
476
+ "sunsubscribe", *args
477
+ )
478
+
479
+ def get_message(
480
+ self, ignore_subscribe_messages: bool = False, timeout: float = 0.0
481
+ ):
482
+ """
483
+ Get the next message if one is available, otherwise None.
484
+
485
+ If timeout is specified, the system will wait for `timeout` seconds
486
+ before returning. Timeout should be specified as a floating point
487
+ number, or None, to wait indefinitely.
488
+ """
489
+ return self._client.command_executor.execute_pubsub_method(
490
+ "get_message",
491
+ ignore_subscribe_messages=ignore_subscribe_messages,
492
+ timeout=timeout,
493
+ )
494
+
495
+ def get_sharded_message(
496
+ self, ignore_subscribe_messages: bool = False, timeout: float = 0.0
497
+ ):
498
+ """
499
+ Get the next message if one is available in a sharded channel, otherwise None.
500
+
501
+ If timeout is specified, the system will wait for `timeout` seconds
502
+ before returning. Timeout should be specified as a floating point
503
+ number, or None, to wait indefinitely.
504
+ """
505
+ return self._client.command_executor.execute_pubsub_method(
506
+ "get_sharded_message",
507
+ ignore_subscribe_messages=ignore_subscribe_messages,
508
+ timeout=timeout,
509
+ )
510
+
511
+ def run_in_thread(
512
+ self,
513
+ sleep_time: float = 0.0,
514
+ daemon: bool = False,
515
+ exception_handler: Optional[Callable] = None,
516
+ sharded_pubsub: bool = False,
517
+ ) -> "PubSubWorkerThread":
518
+ return self._client.command_executor.execute_pubsub_run(
519
+ sleep_time,
520
+ daemon=daemon,
521
+ exception_handler=exception_handler,
522
+ pubsub=self,
523
+ sharded_pubsub=sharded_pubsub,
524
+ )