redis 7.0.0b2__py3-none-any.whl → 7.0.0b3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. redis/__init__.py +1 -1
  2. redis/asyncio/client.py +14 -5
  3. redis/asyncio/cluster.py +5 -1
  4. redis/asyncio/connection.py +18 -0
  5. redis/asyncio/http/__init__.py +0 -0
  6. redis/asyncio/http/http_client.py +265 -0
  7. redis/asyncio/multidb/__init__.py +0 -0
  8. redis/asyncio/multidb/client.py +528 -0
  9. redis/asyncio/multidb/command_executor.py +339 -0
  10. redis/asyncio/multidb/config.py +210 -0
  11. redis/asyncio/multidb/database.py +69 -0
  12. redis/asyncio/multidb/event.py +84 -0
  13. redis/asyncio/multidb/failover.py +125 -0
  14. redis/asyncio/multidb/failure_detector.py +38 -0
  15. redis/asyncio/multidb/healthcheck.py +292 -0
  16. redis/background.py +204 -0
  17. redis/client.py +22 -3
  18. redis/cluster.py +3 -1
  19. redis/commands/core.py +10 -3
  20. redis/data_structure.py +81 -0
  21. redis/event.py +84 -10
  22. redis/http/__init__.py +0 -0
  23. redis/http/http_client.py +425 -0
  24. redis/multidb/__init__.py +0 -0
  25. redis/multidb/circuit.py +144 -0
  26. redis/multidb/client.py +524 -0
  27. redis/multidb/command_executor.py +350 -0
  28. redis/multidb/config.py +207 -0
  29. redis/multidb/database.py +130 -0
  30. redis/multidb/event.py +89 -0
  31. redis/multidb/exception.py +17 -0
  32. redis/multidb/failover.py +125 -0
  33. redis/multidb/failure_detector.py +104 -0
  34. redis/multidb/healthcheck.py +289 -0
  35. redis/retry.py +14 -1
  36. redis/utils.py +14 -0
  37. {redis-7.0.0b2.dist-info → redis-7.0.0b3.dist-info}/METADATA +3 -1
  38. {redis-7.0.0b2.dist-info → redis-7.0.0b3.dist-info}/RECORD +40 -14
  39. {redis-7.0.0b2.dist-info → redis-7.0.0b3.dist-info}/WHEEL +0 -0
  40. {redis-7.0.0b2.dist-info → redis-7.0.0b3.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,528 @@
1
+ import asyncio
2
+ import logging
3
+ from typing import Any, Awaitable, Callable, Coroutine, List, Optional, Union
4
+
5
+ from redis.asyncio.client import PubSubHandler
6
+ from redis.asyncio.multidb.command_executor import DefaultCommandExecutor
7
+ from redis.asyncio.multidb.config import DEFAULT_GRACE_PERIOD, MultiDbConfig
8
+ from redis.asyncio.multidb.database import AsyncDatabase, Databases
9
+ from redis.asyncio.multidb.failure_detector import AsyncFailureDetector
10
+ from redis.asyncio.multidb.healthcheck import HealthCheck, HealthCheckPolicy
11
+ from redis.background import BackgroundScheduler
12
+ from redis.commands import AsyncCoreCommands, AsyncRedisModuleCommands
13
+ from redis.multidb.circuit import CircuitBreaker
14
+ from redis.multidb.circuit import State as CBState
15
+ from redis.multidb.exception import NoValidDatabaseException, UnhealthyDatabaseException
16
+ from redis.typing import ChannelT, EncodableT, KeyT
17
+
18
+ logger = logging.getLogger(__name__)
19
+
20
+
21
+ class MultiDBClient(AsyncRedisModuleCommands, AsyncCoreCommands):
22
+ """
23
+ Client that operates on multiple logical Redis databases.
24
+ Should be used in Active-Active database setups.
25
+ """
26
+
27
+ def __init__(self, config: MultiDbConfig):
28
+ self._databases = config.databases()
29
+ self._health_checks = config.default_health_checks()
30
+
31
+ if config.health_checks is not None:
32
+ self._health_checks.extend(config.health_checks)
33
+
34
+ self._health_check_interval = config.health_check_interval
35
+ self._health_check_policy: HealthCheckPolicy = config.health_check_policy.value(
36
+ config.health_check_probes, config.health_check_delay
37
+ )
38
+ self._failure_detectors = config.default_failure_detectors()
39
+
40
+ if config.failure_detectors is not None:
41
+ self._failure_detectors.extend(config.failure_detectors)
42
+
43
+ self._failover_strategy = (
44
+ config.default_failover_strategy()
45
+ if config.failover_strategy is None
46
+ else config.failover_strategy
47
+ )
48
+ self._failover_strategy.set_databases(self._databases)
49
+ self._auto_fallback_interval = config.auto_fallback_interval
50
+ self._event_dispatcher = config.event_dispatcher
51
+ self._command_retry = config.command_retry
52
+ self._command_retry.update_supported_errors([ConnectionRefusedError])
53
+ self.command_executor = DefaultCommandExecutor(
54
+ failure_detectors=self._failure_detectors,
55
+ databases=self._databases,
56
+ command_retry=self._command_retry,
57
+ failover_strategy=self._failover_strategy,
58
+ failover_attempts=config.failover_attempts,
59
+ failover_delay=config.failover_delay,
60
+ event_dispatcher=self._event_dispatcher,
61
+ auto_fallback_interval=self._auto_fallback_interval,
62
+ )
63
+ self.initialized = False
64
+ self._hc_lock = asyncio.Lock()
65
+ self._bg_scheduler = BackgroundScheduler()
66
+ self._config = config
67
+ self._recurring_hc_task = None
68
+ self._hc_tasks = []
69
+ self._half_open_state_task = None
70
+
71
+ async def __aenter__(self: "MultiDBClient") -> "MultiDBClient":
72
+ if not self.initialized:
73
+ await self.initialize()
74
+ return self
75
+
76
+ async def __aexit__(self, exc_type, exc_value, traceback):
77
+ if self._recurring_hc_task:
78
+ self._recurring_hc_task.cancel()
79
+ if self._half_open_state_task:
80
+ self._half_open_state_task.cancel()
81
+ for hc_task in self._hc_tasks:
82
+ hc_task.cancel()
83
+
84
+ async def initialize(self):
85
+ """
86
+ Perform initialization of databases to define their initial state.
87
+ """
88
+
89
+ async def raise_exception_on_failed_hc(error):
90
+ raise error
91
+
92
+ # Initial databases check to define initial state
93
+ await self._check_databases_health(on_error=raise_exception_on_failed_hc)
94
+
95
+ # Starts recurring health checks on the background.
96
+ self._recurring_hc_task = asyncio.create_task(
97
+ self._bg_scheduler.run_recurring_async(
98
+ self._health_check_interval,
99
+ self._check_databases_health,
100
+ )
101
+ )
102
+
103
+ is_active_db_found = False
104
+
105
+ for database, weight in self._databases:
106
+ # Set on state changed callback for each circuit.
107
+ database.circuit.on_state_changed(self._on_circuit_state_change_callback)
108
+
109
+ # Set states according to a weights and circuit state
110
+ if database.circuit.state == CBState.CLOSED and not is_active_db_found:
111
+ await self.command_executor.set_active_database(database)
112
+ is_active_db_found = True
113
+
114
+ if not is_active_db_found:
115
+ raise NoValidDatabaseException(
116
+ "Initial connection failed - no active database found"
117
+ )
118
+
119
+ self.initialized = True
120
+
121
+ def get_databases(self) -> Databases:
122
+ """
123
+ Returns a sorted (by weight) list of all databases.
124
+ """
125
+ return self._databases
126
+
127
+ async def set_active_database(self, database: AsyncDatabase) -> None:
128
+ """
129
+ Promote one of the existing databases to become an active.
130
+ """
131
+ exists = None
132
+
133
+ for existing_db, _ in self._databases:
134
+ if existing_db == database:
135
+ exists = True
136
+ break
137
+
138
+ if not exists:
139
+ raise ValueError("Given database is not a member of database list")
140
+
141
+ await self._check_db_health(database)
142
+
143
+ if database.circuit.state == CBState.CLOSED:
144
+ highest_weighted_db, _ = self._databases.get_top_n(1)[0]
145
+ await self.command_executor.set_active_database(database)
146
+ return
147
+
148
+ raise NoValidDatabaseException(
149
+ "Cannot set active database, database is unhealthy"
150
+ )
151
+
152
+ async def add_database(self, database: AsyncDatabase):
153
+ """
154
+ Adds a new database to the database list.
155
+ """
156
+ for existing_db, _ in self._databases:
157
+ if existing_db == database:
158
+ raise ValueError("Given database already exists")
159
+
160
+ await self._check_db_health(database)
161
+
162
+ highest_weighted_db, highest_weight = self._databases.get_top_n(1)[0]
163
+ self._databases.add(database, database.weight)
164
+ await self._change_active_database(database, highest_weighted_db)
165
+
166
+ async def _change_active_database(
167
+ self, new_database: AsyncDatabase, highest_weight_database: AsyncDatabase
168
+ ):
169
+ if (
170
+ new_database.weight > highest_weight_database.weight
171
+ and new_database.circuit.state == CBState.CLOSED
172
+ ):
173
+ await self.command_executor.set_active_database(new_database)
174
+
175
+ async def remove_database(self, database: AsyncDatabase):
176
+ """
177
+ Removes a database from the database list.
178
+ """
179
+ weight = self._databases.remove(database)
180
+ highest_weighted_db, highest_weight = self._databases.get_top_n(1)[0]
181
+
182
+ if (
183
+ highest_weight <= weight
184
+ and highest_weighted_db.circuit.state == CBState.CLOSED
185
+ ):
186
+ await self.command_executor.set_active_database(highest_weighted_db)
187
+
188
+ async def update_database_weight(self, database: AsyncDatabase, weight: float):
189
+ """
190
+ Updates a database from the database list.
191
+ """
192
+ exists = None
193
+
194
+ for existing_db, _ in self._databases:
195
+ if existing_db == database:
196
+ exists = True
197
+ break
198
+
199
+ if not exists:
200
+ raise ValueError("Given database is not a member of database list")
201
+
202
+ highest_weighted_db, highest_weight = self._databases.get_top_n(1)[0]
203
+ self._databases.update_weight(database, weight)
204
+ database.weight = weight
205
+ await self._change_active_database(database, highest_weighted_db)
206
+
207
+ def add_failure_detector(self, failure_detector: AsyncFailureDetector):
208
+ """
209
+ Adds a new failure detector to the database.
210
+ """
211
+ self._failure_detectors.append(failure_detector)
212
+
213
+ async def add_health_check(self, healthcheck: HealthCheck):
214
+ """
215
+ Adds a new health check to the database.
216
+ """
217
+ async with self._hc_lock:
218
+ self._health_checks.append(healthcheck)
219
+
220
+ async def execute_command(self, *args, **options):
221
+ """
222
+ Executes a single command and return its result.
223
+ """
224
+ if not self.initialized:
225
+ await self.initialize()
226
+
227
+ return await self.command_executor.execute_command(*args, **options)
228
+
229
+ def pipeline(self):
230
+ """
231
+ Enters into pipeline mode of the client.
232
+ """
233
+ return Pipeline(self)
234
+
235
+ async def transaction(
236
+ self,
237
+ func: Callable[["Pipeline"], Union[Any, Awaitable[Any]]],
238
+ *watches: KeyT,
239
+ shard_hint: Optional[str] = None,
240
+ value_from_callable: bool = False,
241
+ watch_delay: Optional[float] = None,
242
+ ):
243
+ """
244
+ Executes callable as transaction.
245
+ """
246
+ if not self.initialized:
247
+ await self.initialize()
248
+
249
+ return await self.command_executor.execute_transaction(
250
+ func,
251
+ *watches,
252
+ shard_hint=shard_hint,
253
+ value_from_callable=value_from_callable,
254
+ watch_delay=watch_delay,
255
+ )
256
+
257
+ async def pubsub(self, **kwargs):
258
+ """
259
+ Return a Publish/Subscribe object. With this object, you can
260
+ subscribe to channels and listen for messages that get published to
261
+ them.
262
+ """
263
+ if not self.initialized:
264
+ await self.initialize()
265
+
266
+ return PubSub(self, **kwargs)
267
+
268
+ async def _check_databases_health(
269
+ self,
270
+ on_error: Optional[Callable[[Exception], Coroutine[Any, Any, None]]] = None,
271
+ ):
272
+ """
273
+ Runs health checks as a recurring task.
274
+ Runs health checks against all databases.
275
+ """
276
+ try:
277
+ self._hc_tasks = [
278
+ asyncio.create_task(self._check_db_health(database))
279
+ for database, _ in self._databases
280
+ ]
281
+ results = await asyncio.wait_for(
282
+ asyncio.gather(
283
+ *self._hc_tasks,
284
+ return_exceptions=True,
285
+ ),
286
+ timeout=self._health_check_interval,
287
+ )
288
+ except asyncio.TimeoutError:
289
+ raise asyncio.TimeoutError(
290
+ "Health check execution exceeds health_check_interval"
291
+ )
292
+
293
+ for result in results:
294
+ if isinstance(result, UnhealthyDatabaseException):
295
+ unhealthy_db = result.database
296
+ unhealthy_db.circuit.state = CBState.OPEN
297
+
298
+ logger.exception(
299
+ "Health check failed, due to exception",
300
+ exc_info=result.original_exception,
301
+ )
302
+
303
+ if on_error:
304
+ on_error(result.original_exception)
305
+
306
+ async def _check_db_health(self, database: AsyncDatabase) -> bool:
307
+ """
308
+ Runs health checks on the given database until first failure.
309
+ """
310
+ # Health check will setup circuit state
311
+ is_healthy = await self._health_check_policy.execute(
312
+ self._health_checks, database
313
+ )
314
+
315
+ if not is_healthy:
316
+ if database.circuit.state != CBState.OPEN:
317
+ database.circuit.state = CBState.OPEN
318
+ return is_healthy
319
+ elif is_healthy and database.circuit.state != CBState.CLOSED:
320
+ database.circuit.state = CBState.CLOSED
321
+
322
+ return is_healthy
323
+
324
+ def _on_circuit_state_change_callback(
325
+ self, circuit: CircuitBreaker, old_state: CBState, new_state: CBState
326
+ ):
327
+ loop = asyncio.get_running_loop()
328
+
329
+ if new_state == CBState.HALF_OPEN:
330
+ self._half_open_state_task = asyncio.create_task(
331
+ self._check_db_health(circuit.database)
332
+ )
333
+ return
334
+
335
+ if old_state == CBState.CLOSED and new_state == CBState.OPEN:
336
+ loop.call_later(DEFAULT_GRACE_PERIOD, _half_open_circuit, circuit)
337
+
338
+ async def aclose(self):
339
+ if self.command_executor.active_database:
340
+ await self.command_executor.active_database.client.aclose()
341
+
342
+
343
+ def _half_open_circuit(circuit: CircuitBreaker):
344
+ circuit.state = CBState.HALF_OPEN
345
+
346
+
347
+ class Pipeline(AsyncRedisModuleCommands, AsyncCoreCommands):
348
+ """
349
+ Pipeline implementation for multiple logical Redis databases.
350
+ """
351
+
352
+ def __init__(self, client: MultiDBClient):
353
+ self._command_stack = []
354
+ self._client = client
355
+
356
+ async def __aenter__(self: "Pipeline") -> "Pipeline":
357
+ return self
358
+
359
+ async def __aexit__(self, exc_type, exc_value, traceback):
360
+ await self.reset()
361
+ await self._client.__aexit__(exc_type, exc_value, traceback)
362
+
363
+ def __await__(self):
364
+ return self._async_self().__await__()
365
+
366
+ async def _async_self(self):
367
+ return self
368
+
369
+ def __len__(self) -> int:
370
+ return len(self._command_stack)
371
+
372
+ def __bool__(self) -> bool:
373
+ """Pipeline instances should always evaluate to True"""
374
+ return True
375
+
376
+ async def reset(self) -> None:
377
+ self._command_stack = []
378
+
379
+ async def aclose(self) -> None:
380
+ """Close the pipeline"""
381
+ await self.reset()
382
+
383
+ def pipeline_execute_command(self, *args, **options) -> "Pipeline":
384
+ """
385
+ Stage a command to be executed when execute() is next called
386
+
387
+ Returns the current Pipeline object back so commands can be
388
+ chained together, such as:
389
+
390
+ pipe = pipe.set('foo', 'bar').incr('baz').decr('bang')
391
+
392
+ At some other point, you can then run: pipe.execute(),
393
+ which will execute all commands queued in the pipe.
394
+ """
395
+ self._command_stack.append((args, options))
396
+ return self
397
+
398
+ def execute_command(self, *args, **kwargs):
399
+ """Adds a command to the stack"""
400
+ return self.pipeline_execute_command(*args, **kwargs)
401
+
402
+ async def execute(self) -> List[Any]:
403
+ """Execute all the commands in the current pipeline"""
404
+ if not self._client.initialized:
405
+ await self._client.initialize()
406
+
407
+ try:
408
+ return await self._client.command_executor.execute_pipeline(
409
+ tuple(self._command_stack)
410
+ )
411
+ finally:
412
+ await self.reset()
413
+
414
+
415
+ class PubSub:
416
+ """
417
+ PubSub object for multi database client.
418
+ """
419
+
420
+ def __init__(self, client: MultiDBClient, **kwargs):
421
+ """Initialize the PubSub object for a multi-database client.
422
+
423
+ Args:
424
+ client: MultiDBClient instance to use for pub/sub operations
425
+ **kwargs: Additional keyword arguments to pass to the underlying pubsub implementation
426
+ """
427
+
428
+ self._client = client
429
+ self._client.command_executor.pubsub(**kwargs)
430
+
431
+ async def __aenter__(self) -> "PubSub":
432
+ return self
433
+
434
+ async def __aexit__(self, exc_type, exc_value, traceback) -> None:
435
+ await self.aclose()
436
+
437
+ async def aclose(self):
438
+ return await self._client.command_executor.execute_pubsub_method("aclose")
439
+
440
+ @property
441
+ def subscribed(self) -> bool:
442
+ return self._client.command_executor.active_pubsub.subscribed
443
+
444
+ async def execute_command(self, *args: EncodableT):
445
+ return await self._client.command_executor.execute_pubsub_method(
446
+ "execute_command", *args
447
+ )
448
+
449
+ async def psubscribe(self, *args: ChannelT, **kwargs: PubSubHandler):
450
+ """
451
+ Subscribe to channel patterns. Patterns supplied as keyword arguments
452
+ expect a pattern name as the key and a callable as the value. A
453
+ pattern's callable will be invoked automatically when a message is
454
+ received on that pattern rather than producing a message via
455
+ ``listen()``.
456
+ """
457
+ return await self._client.command_executor.execute_pubsub_method(
458
+ "psubscribe", *args, **kwargs
459
+ )
460
+
461
+ async def punsubscribe(self, *args: ChannelT):
462
+ """
463
+ Unsubscribe from the supplied patterns. If empty, unsubscribe from
464
+ all patterns.
465
+ """
466
+ return await self._client.command_executor.execute_pubsub_method(
467
+ "punsubscribe", *args
468
+ )
469
+
470
+ async def subscribe(self, *args: ChannelT, **kwargs: Callable):
471
+ """
472
+ Subscribe to channels. Channels supplied as keyword arguments expect
473
+ a channel name as the key and a callable as the value. A channel's
474
+ callable will be invoked automatically when a message is received on
475
+ that channel rather than producing a message via ``listen()`` or
476
+ ``get_message()``.
477
+ """
478
+ return await self._client.command_executor.execute_pubsub_method(
479
+ "subscribe", *args, **kwargs
480
+ )
481
+
482
+ async def unsubscribe(self, *args):
483
+ """
484
+ Unsubscribe from the supplied channels. If empty, unsubscribe from
485
+ all channels
486
+ """
487
+ return await self._client.command_executor.execute_pubsub_method(
488
+ "unsubscribe", *args
489
+ )
490
+
491
+ async def get_message(
492
+ self, ignore_subscribe_messages: bool = False, timeout: Optional[float] = 0.0
493
+ ):
494
+ """
495
+ Get the next message if one is available, otherwise None.
496
+
497
+ If timeout is specified, the system will wait for `timeout` seconds
498
+ before returning. Timeout should be specified as a floating point
499
+ number or None to wait indefinitely.
500
+ """
501
+ return await self._client.command_executor.execute_pubsub_method(
502
+ "get_message",
503
+ ignore_subscribe_messages=ignore_subscribe_messages,
504
+ timeout=timeout,
505
+ )
506
+
507
+ async def run(
508
+ self,
509
+ *,
510
+ exception_handler=None,
511
+ poll_timeout: float = 1.0,
512
+ ) -> None:
513
+ """Process pub/sub messages using registered callbacks.
514
+
515
+ This is the equivalent of :py:meth:`redis.PubSub.run_in_thread` in
516
+ redis-py, but it is a coroutine. To launch it as a separate task, use
517
+ ``asyncio.create_task``:
518
+
519
+ >>> task = asyncio.create_task(pubsub.run())
520
+
521
+ To shut it down, use asyncio cancellation:
522
+
523
+ >>> task.cancel()
524
+ >>> await task
525
+ """
526
+ return await self._client.command_executor.execute_pubsub_run(
527
+ sleep_time=poll_timeout, exception_handler=exception_handler, pubsub=self
528
+ )