redis 7.0.0b1__py3-none-any.whl → 7.0.0b3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- redis/__init__.py +1 -1
- redis/_parsers/base.py +36 -22
- redis/asyncio/client.py +20 -6
- redis/asyncio/cluster.py +11 -2
- redis/asyncio/connection.py +61 -1
- redis/asyncio/http/__init__.py +0 -0
- redis/asyncio/http/http_client.py +265 -0
- redis/asyncio/multidb/__init__.py +0 -0
- redis/asyncio/multidb/client.py +528 -0
- redis/asyncio/multidb/command_executor.py +339 -0
- redis/asyncio/multidb/config.py +210 -0
- redis/asyncio/multidb/database.py +69 -0
- redis/asyncio/multidb/event.py +84 -0
- redis/asyncio/multidb/failover.py +125 -0
- redis/asyncio/multidb/failure_detector.py +38 -0
- redis/asyncio/multidb/healthcheck.py +292 -0
- redis/background.py +204 -0
- redis/client.py +41 -18
- redis/cluster.py +5 -1
- redis/commands/core.py +10 -3
- redis/connection.py +206 -151
- redis/data_structure.py +81 -0
- redis/event.py +84 -10
- redis/http/__init__.py +0 -0
- redis/http/http_client.py +425 -0
- redis/{maintenance_events.py → maint_notifications.py} +154 -140
- redis/multidb/__init__.py +0 -0
- redis/multidb/circuit.py +144 -0
- redis/multidb/client.py +524 -0
- redis/multidb/command_executor.py +350 -0
- redis/multidb/config.py +207 -0
- redis/multidb/database.py +130 -0
- redis/multidb/event.py +89 -0
- redis/multidb/exception.py +17 -0
- redis/multidb/failover.py +125 -0
- redis/multidb/failure_detector.py +104 -0
- redis/multidb/healthcheck.py +289 -0
- redis/retry.py +14 -1
- redis/utils.py +14 -0
- {redis-7.0.0b1.dist-info → redis-7.0.0b3.dist-info}/METADATA +3 -1
- {redis-7.0.0b1.dist-info → redis-7.0.0b3.dist-info}/RECORD +43 -17
- {redis-7.0.0b1.dist-info → redis-7.0.0b3.dist-info}/WHEEL +0 -0
- {redis-7.0.0b1.dist-info → redis-7.0.0b3.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
import time
|
|
2
|
+
from abc import ABC, abstractmethod
|
|
3
|
+
|
|
4
|
+
from redis.asyncio.multidb.database import AsyncDatabase, Databases
|
|
5
|
+
from redis.data_structure import WeightedList
|
|
6
|
+
from redis.multidb.circuit import State as CBState
|
|
7
|
+
from redis.multidb.exception import (
|
|
8
|
+
NoValidDatabaseException,
|
|
9
|
+
TemporaryUnavailableException,
|
|
10
|
+
)
|
|
11
|
+
|
|
12
|
+
DEFAULT_FAILOVER_ATTEMPTS = 10
|
|
13
|
+
DEFAULT_FAILOVER_DELAY = 12
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class AsyncFailoverStrategy(ABC):
|
|
17
|
+
@abstractmethod
|
|
18
|
+
async def database(self) -> AsyncDatabase:
|
|
19
|
+
"""Select the database according to the strategy."""
|
|
20
|
+
pass
|
|
21
|
+
|
|
22
|
+
@abstractmethod
|
|
23
|
+
def set_databases(self, databases: Databases) -> None:
|
|
24
|
+
"""Set the database strategy operates on."""
|
|
25
|
+
pass
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class FailoverStrategyExecutor(ABC):
|
|
29
|
+
@property
|
|
30
|
+
@abstractmethod
|
|
31
|
+
def failover_attempts(self) -> int:
|
|
32
|
+
"""The number of failover attempts."""
|
|
33
|
+
pass
|
|
34
|
+
|
|
35
|
+
@property
|
|
36
|
+
@abstractmethod
|
|
37
|
+
def failover_delay(self) -> float:
|
|
38
|
+
"""The delay between failover attempts."""
|
|
39
|
+
pass
|
|
40
|
+
|
|
41
|
+
@property
|
|
42
|
+
@abstractmethod
|
|
43
|
+
def strategy(self) -> AsyncFailoverStrategy:
|
|
44
|
+
"""The strategy to execute."""
|
|
45
|
+
pass
|
|
46
|
+
|
|
47
|
+
@abstractmethod
|
|
48
|
+
async def execute(self) -> AsyncDatabase:
|
|
49
|
+
"""Execute the failover strategy."""
|
|
50
|
+
pass
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
class WeightBasedFailoverStrategy(AsyncFailoverStrategy):
|
|
54
|
+
"""
|
|
55
|
+
Failover strategy based on database weights.
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
def __init__(self):
|
|
59
|
+
self._databases = WeightedList()
|
|
60
|
+
|
|
61
|
+
async def database(self) -> AsyncDatabase:
|
|
62
|
+
for database, _ in self._databases:
|
|
63
|
+
if database.circuit.state == CBState.CLOSED:
|
|
64
|
+
return database
|
|
65
|
+
|
|
66
|
+
raise NoValidDatabaseException("No valid database available for communication")
|
|
67
|
+
|
|
68
|
+
def set_databases(self, databases: Databases) -> None:
|
|
69
|
+
self._databases = databases
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class DefaultFailoverStrategyExecutor(FailoverStrategyExecutor):
|
|
73
|
+
"""
|
|
74
|
+
Executes given failover strategy.
|
|
75
|
+
"""
|
|
76
|
+
|
|
77
|
+
def __init__(
|
|
78
|
+
self,
|
|
79
|
+
strategy: AsyncFailoverStrategy,
|
|
80
|
+
failover_attempts: int = DEFAULT_FAILOVER_ATTEMPTS,
|
|
81
|
+
failover_delay: float = DEFAULT_FAILOVER_DELAY,
|
|
82
|
+
):
|
|
83
|
+
self._strategy = strategy
|
|
84
|
+
self._failover_attempts = failover_attempts
|
|
85
|
+
self._failover_delay = failover_delay
|
|
86
|
+
self._next_attempt_ts: int = 0
|
|
87
|
+
self._failover_counter: int = 0
|
|
88
|
+
|
|
89
|
+
@property
|
|
90
|
+
def failover_attempts(self) -> int:
|
|
91
|
+
return self._failover_attempts
|
|
92
|
+
|
|
93
|
+
@property
|
|
94
|
+
def failover_delay(self) -> float:
|
|
95
|
+
return self._failover_delay
|
|
96
|
+
|
|
97
|
+
@property
|
|
98
|
+
def strategy(self) -> AsyncFailoverStrategy:
|
|
99
|
+
return self._strategy
|
|
100
|
+
|
|
101
|
+
async def execute(self) -> AsyncDatabase:
|
|
102
|
+
try:
|
|
103
|
+
database = await self._strategy.database()
|
|
104
|
+
self._reset()
|
|
105
|
+
return database
|
|
106
|
+
except NoValidDatabaseException as e:
|
|
107
|
+
if self._next_attempt_ts == 0:
|
|
108
|
+
self._next_attempt_ts = time.time() + self._failover_delay
|
|
109
|
+
self._failover_counter += 1
|
|
110
|
+
elif time.time() >= self._next_attempt_ts:
|
|
111
|
+
self._next_attempt_ts += self._failover_delay
|
|
112
|
+
self._failover_counter += 1
|
|
113
|
+
|
|
114
|
+
if self._failover_counter > self._failover_attempts:
|
|
115
|
+
self._reset()
|
|
116
|
+
raise e
|
|
117
|
+
else:
|
|
118
|
+
raise TemporaryUnavailableException(
|
|
119
|
+
"No database connections currently available. "
|
|
120
|
+
"This is a temporary condition - please retry the operation."
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
def _reset(self) -> None:
|
|
124
|
+
self._next_attempt_ts = 0
|
|
125
|
+
self._failover_counter = 0
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
|
|
3
|
+
from redis.multidb.failure_detector import FailureDetector
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class AsyncFailureDetector(ABC):
|
|
7
|
+
@abstractmethod
|
|
8
|
+
async def register_failure(self, exception: Exception, cmd: tuple) -> None:
|
|
9
|
+
"""Register a failure that occurred during command execution."""
|
|
10
|
+
pass
|
|
11
|
+
|
|
12
|
+
@abstractmethod
|
|
13
|
+
async def register_command_execution(self, cmd: tuple) -> None:
|
|
14
|
+
"""Register a command execution."""
|
|
15
|
+
pass
|
|
16
|
+
|
|
17
|
+
@abstractmethod
|
|
18
|
+
def set_command_executor(self, command_executor) -> None:
|
|
19
|
+
"""Set the command executor for this failure."""
|
|
20
|
+
pass
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class FailureDetectorAsyncWrapper(AsyncFailureDetector):
|
|
24
|
+
"""
|
|
25
|
+
Async wrapper for the failure detector.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
def __init__(self, failure_detector: FailureDetector) -> None:
|
|
29
|
+
self._failure_detector = failure_detector
|
|
30
|
+
|
|
31
|
+
async def register_failure(self, exception: Exception, cmd: tuple) -> None:
|
|
32
|
+
self._failure_detector.register_failure(exception, cmd)
|
|
33
|
+
|
|
34
|
+
async def register_command_execution(self, cmd: tuple) -> None:
|
|
35
|
+
self._failure_detector.register_command_execution(cmd)
|
|
36
|
+
|
|
37
|
+
def set_command_executor(self, command_executor) -> None:
|
|
38
|
+
self._failure_detector.set_command_executor(command_executor)
|
|
@@ -0,0 +1,292 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import logging
|
|
3
|
+
from abc import ABC, abstractmethod
|
|
4
|
+
from enum import Enum
|
|
5
|
+
from typing import List, Optional, Tuple, Union
|
|
6
|
+
|
|
7
|
+
from redis.asyncio import Redis
|
|
8
|
+
from redis.asyncio.http.http_client import DEFAULT_TIMEOUT, AsyncHTTPClientWrapper
|
|
9
|
+
from redis.backoff import NoBackoff
|
|
10
|
+
from redis.http.http_client import HttpClient
|
|
11
|
+
from redis.multidb.exception import UnhealthyDatabaseException
|
|
12
|
+
from redis.retry import Retry
|
|
13
|
+
|
|
14
|
+
DEFAULT_HEALTH_CHECK_PROBES = 3
|
|
15
|
+
DEFAULT_HEALTH_CHECK_INTERVAL = 5
|
|
16
|
+
DEFAULT_HEALTH_CHECK_DELAY = 0.5
|
|
17
|
+
DEFAULT_LAG_AWARE_TOLERANCE = 5000
|
|
18
|
+
|
|
19
|
+
logger = logging.getLogger(__name__)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class HealthCheck(ABC):
|
|
23
|
+
@abstractmethod
|
|
24
|
+
async def check_health(self, database) -> bool:
|
|
25
|
+
"""Function to determine the health status."""
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class HealthCheckPolicy(ABC):
|
|
30
|
+
"""
|
|
31
|
+
Health checks execution policy.
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
@property
|
|
35
|
+
@abstractmethod
|
|
36
|
+
def health_check_probes(self) -> int:
|
|
37
|
+
"""Number of probes to execute health checks."""
|
|
38
|
+
pass
|
|
39
|
+
|
|
40
|
+
@property
|
|
41
|
+
@abstractmethod
|
|
42
|
+
def health_check_delay(self) -> float:
|
|
43
|
+
"""Delay between health check probes."""
|
|
44
|
+
pass
|
|
45
|
+
|
|
46
|
+
@abstractmethod
|
|
47
|
+
async def execute(self, health_checks: List[HealthCheck], database) -> bool:
|
|
48
|
+
"""Execute health checks and return database health status."""
|
|
49
|
+
pass
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class AbstractHealthCheckPolicy(HealthCheckPolicy):
|
|
53
|
+
def __init__(self, health_check_probes: int, health_check_delay: float):
|
|
54
|
+
if health_check_probes < 1:
|
|
55
|
+
raise ValueError("health_check_probes must be greater than 0")
|
|
56
|
+
self._health_check_probes = health_check_probes
|
|
57
|
+
self._health_check_delay = health_check_delay
|
|
58
|
+
|
|
59
|
+
@property
|
|
60
|
+
def health_check_probes(self) -> int:
|
|
61
|
+
return self._health_check_probes
|
|
62
|
+
|
|
63
|
+
@property
|
|
64
|
+
def health_check_delay(self) -> float:
|
|
65
|
+
return self._health_check_delay
|
|
66
|
+
|
|
67
|
+
@abstractmethod
|
|
68
|
+
async def execute(self, health_checks: List[HealthCheck], database) -> bool:
|
|
69
|
+
pass
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class HealthyAllPolicy(AbstractHealthCheckPolicy):
|
|
73
|
+
"""
|
|
74
|
+
Policy that returns True if all health check probes are successful.
|
|
75
|
+
"""
|
|
76
|
+
|
|
77
|
+
def __init__(self, health_check_probes: int, health_check_delay: float):
|
|
78
|
+
super().__init__(health_check_probes, health_check_delay)
|
|
79
|
+
|
|
80
|
+
async def execute(self, health_checks: List[HealthCheck], database) -> bool:
|
|
81
|
+
for health_check in health_checks:
|
|
82
|
+
for attempt in range(self.health_check_probes):
|
|
83
|
+
try:
|
|
84
|
+
if not await health_check.check_health(database):
|
|
85
|
+
return False
|
|
86
|
+
except Exception as e:
|
|
87
|
+
raise UnhealthyDatabaseException("Unhealthy database", database, e)
|
|
88
|
+
|
|
89
|
+
if attempt < self.health_check_probes - 1:
|
|
90
|
+
await asyncio.sleep(self._health_check_delay)
|
|
91
|
+
return True
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
class HealthyMajorityPolicy(AbstractHealthCheckPolicy):
|
|
95
|
+
"""
|
|
96
|
+
Policy that returns True if a majority of health check probes are successful.
|
|
97
|
+
"""
|
|
98
|
+
|
|
99
|
+
def __init__(self, health_check_probes: int, health_check_delay: float):
|
|
100
|
+
super().__init__(health_check_probes, health_check_delay)
|
|
101
|
+
|
|
102
|
+
async def execute(self, health_checks: List[HealthCheck], database) -> bool:
|
|
103
|
+
for health_check in health_checks:
|
|
104
|
+
if self.health_check_probes % 2 == 0:
|
|
105
|
+
allowed_unsuccessful_probes = self.health_check_probes / 2
|
|
106
|
+
else:
|
|
107
|
+
allowed_unsuccessful_probes = (self.health_check_probes + 1) / 2
|
|
108
|
+
|
|
109
|
+
for attempt in range(self.health_check_probes):
|
|
110
|
+
try:
|
|
111
|
+
if not await health_check.check_health(database):
|
|
112
|
+
allowed_unsuccessful_probes -= 1
|
|
113
|
+
if allowed_unsuccessful_probes <= 0:
|
|
114
|
+
return False
|
|
115
|
+
except Exception as e:
|
|
116
|
+
allowed_unsuccessful_probes -= 1
|
|
117
|
+
if allowed_unsuccessful_probes <= 0:
|
|
118
|
+
raise UnhealthyDatabaseException(
|
|
119
|
+
"Unhealthy database", database, e
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
if attempt < self.health_check_probes - 1:
|
|
123
|
+
await asyncio.sleep(self._health_check_delay)
|
|
124
|
+
return True
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
class HealthyAnyPolicy(AbstractHealthCheckPolicy):
|
|
128
|
+
"""
|
|
129
|
+
Policy that returns True if at least one health check probe is successful.
|
|
130
|
+
"""
|
|
131
|
+
|
|
132
|
+
def __init__(self, health_check_probes: int, health_check_delay: float):
|
|
133
|
+
super().__init__(health_check_probes, health_check_delay)
|
|
134
|
+
|
|
135
|
+
async def execute(self, health_checks: List[HealthCheck], database) -> bool:
|
|
136
|
+
is_healthy = False
|
|
137
|
+
|
|
138
|
+
for health_check in health_checks:
|
|
139
|
+
exception = None
|
|
140
|
+
|
|
141
|
+
for attempt in range(self.health_check_probes):
|
|
142
|
+
try:
|
|
143
|
+
if await health_check.check_health(database):
|
|
144
|
+
is_healthy = True
|
|
145
|
+
break
|
|
146
|
+
else:
|
|
147
|
+
is_healthy = False
|
|
148
|
+
except Exception as e:
|
|
149
|
+
exception = UnhealthyDatabaseException(
|
|
150
|
+
"Unhealthy database", database, e
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
if attempt < self.health_check_probes - 1:
|
|
154
|
+
await asyncio.sleep(self._health_check_delay)
|
|
155
|
+
|
|
156
|
+
if not is_healthy and not exception:
|
|
157
|
+
return is_healthy
|
|
158
|
+
elif not is_healthy and exception:
|
|
159
|
+
raise exception
|
|
160
|
+
|
|
161
|
+
return is_healthy
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
class HealthCheckPolicies(Enum):
|
|
165
|
+
HEALTHY_ALL = HealthyAllPolicy
|
|
166
|
+
HEALTHY_MAJORITY = HealthyMajorityPolicy
|
|
167
|
+
HEALTHY_ANY = HealthyAnyPolicy
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
DEFAULT_HEALTH_CHECK_POLICY: HealthCheckPolicies = HealthCheckPolicies.HEALTHY_ALL
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
class EchoHealthCheck(HealthCheck):
|
|
174
|
+
"""
|
|
175
|
+
Health check based on ECHO command.
|
|
176
|
+
"""
|
|
177
|
+
|
|
178
|
+
async def check_health(self, database) -> bool:
|
|
179
|
+
expected_message = ["healthcheck", b"healthcheck"]
|
|
180
|
+
|
|
181
|
+
if isinstance(database.client, Redis):
|
|
182
|
+
actual_message = await database.client.execute_command(
|
|
183
|
+
"ECHO", "healthcheck"
|
|
184
|
+
)
|
|
185
|
+
return actual_message in expected_message
|
|
186
|
+
else:
|
|
187
|
+
# For a cluster checks if all nodes are healthy.
|
|
188
|
+
all_nodes = database.client.get_nodes()
|
|
189
|
+
for node in all_nodes:
|
|
190
|
+
actual_message = await node.execute_command("ECHO", "healthcheck")
|
|
191
|
+
|
|
192
|
+
if actual_message not in expected_message:
|
|
193
|
+
return False
|
|
194
|
+
|
|
195
|
+
return True
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
class LagAwareHealthCheck(HealthCheck):
|
|
199
|
+
"""
|
|
200
|
+
Health check available for Redis Enterprise deployments.
|
|
201
|
+
Verify via REST API that the database is healthy based on different lags.
|
|
202
|
+
"""
|
|
203
|
+
|
|
204
|
+
def __init__(
|
|
205
|
+
self,
|
|
206
|
+
rest_api_port: int = 9443,
|
|
207
|
+
lag_aware_tolerance: int = DEFAULT_LAG_AWARE_TOLERANCE,
|
|
208
|
+
timeout: float = DEFAULT_TIMEOUT,
|
|
209
|
+
auth_basic: Optional[Tuple[str, str]] = None,
|
|
210
|
+
verify_tls: bool = True,
|
|
211
|
+
# TLS verification (server) options
|
|
212
|
+
ca_file: Optional[str] = None,
|
|
213
|
+
ca_path: Optional[str] = None,
|
|
214
|
+
ca_data: Optional[Union[str, bytes]] = None,
|
|
215
|
+
# Mutual TLS (client cert) options
|
|
216
|
+
client_cert_file: Optional[str] = None,
|
|
217
|
+
client_key_file: Optional[str] = None,
|
|
218
|
+
client_key_password: Optional[str] = None,
|
|
219
|
+
):
|
|
220
|
+
"""
|
|
221
|
+
Initialize LagAwareHealthCheck with the specified parameters.
|
|
222
|
+
|
|
223
|
+
Args:
|
|
224
|
+
rest_api_port: Port number for Redis Enterprise REST API (default: 9443)
|
|
225
|
+
lag_aware_tolerance: Tolerance in lag between databases in MS (default: 100)
|
|
226
|
+
timeout: Request timeout in seconds (default: DEFAULT_TIMEOUT)
|
|
227
|
+
auth_basic: Tuple of (username, password) for basic authentication
|
|
228
|
+
verify_tls: Whether to verify TLS certificates (default: True)
|
|
229
|
+
ca_file: Path to CA certificate file for TLS verification
|
|
230
|
+
ca_path: Path to CA certificates directory for TLS verification
|
|
231
|
+
ca_data: CA certificate data as string or bytes
|
|
232
|
+
client_cert_file: Path to client certificate file for mutual TLS
|
|
233
|
+
client_key_file: Path to client private key file for mutual TLS
|
|
234
|
+
client_key_password: Password for encrypted client private key
|
|
235
|
+
"""
|
|
236
|
+
self._http_client = AsyncHTTPClientWrapper(
|
|
237
|
+
HttpClient(
|
|
238
|
+
timeout=timeout,
|
|
239
|
+
auth_basic=auth_basic,
|
|
240
|
+
retry=Retry(NoBackoff(), retries=0),
|
|
241
|
+
verify_tls=verify_tls,
|
|
242
|
+
ca_file=ca_file,
|
|
243
|
+
ca_path=ca_path,
|
|
244
|
+
ca_data=ca_data,
|
|
245
|
+
client_cert_file=client_cert_file,
|
|
246
|
+
client_key_file=client_key_file,
|
|
247
|
+
client_key_password=client_key_password,
|
|
248
|
+
)
|
|
249
|
+
)
|
|
250
|
+
self._rest_api_port = rest_api_port
|
|
251
|
+
self._lag_aware_tolerance = lag_aware_tolerance
|
|
252
|
+
|
|
253
|
+
async def check_health(self, database) -> bool:
|
|
254
|
+
if database.health_check_url is None:
|
|
255
|
+
raise ValueError(
|
|
256
|
+
"Database health check url is not set. Please check DatabaseConfig for the current database."
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
if isinstance(database.client, Redis):
|
|
260
|
+
db_host = database.client.get_connection_kwargs()["host"]
|
|
261
|
+
else:
|
|
262
|
+
db_host = database.client.startup_nodes[0].host
|
|
263
|
+
|
|
264
|
+
base_url = f"{database.health_check_url}:{self._rest_api_port}"
|
|
265
|
+
self._http_client.client.base_url = base_url
|
|
266
|
+
|
|
267
|
+
# Find bdb matching to the current database host
|
|
268
|
+
matching_bdb = None
|
|
269
|
+
for bdb in await self._http_client.get("/v1/bdbs"):
|
|
270
|
+
for endpoint in bdb["endpoints"]:
|
|
271
|
+
if endpoint["dns_name"] == db_host:
|
|
272
|
+
matching_bdb = bdb
|
|
273
|
+
break
|
|
274
|
+
|
|
275
|
+
# In case if the host was set as public IP
|
|
276
|
+
for addr in endpoint["addr"]:
|
|
277
|
+
if addr == db_host:
|
|
278
|
+
matching_bdb = bdb
|
|
279
|
+
break
|
|
280
|
+
|
|
281
|
+
if matching_bdb is None:
|
|
282
|
+
logger.warning("LagAwareHealthCheck failed: Couldn't find a matching bdb")
|
|
283
|
+
raise ValueError("Could not find a matching bdb")
|
|
284
|
+
|
|
285
|
+
url = (
|
|
286
|
+
f"/v1/bdbs/{matching_bdb['uid']}/availability"
|
|
287
|
+
f"?extend_check=lag&availability_lag_tolerance_ms={self._lag_aware_tolerance}"
|
|
288
|
+
)
|
|
289
|
+
await self._http_client.get(url, expect_json=False)
|
|
290
|
+
|
|
291
|
+
# Status checked in an http client, otherwise HttpError will be raised
|
|
292
|
+
return True
|
redis/background.py
ADDED
|
@@ -0,0 +1,204 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import threading
|
|
3
|
+
from typing import Any, Callable, Coroutine
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class BackgroundScheduler:
|
|
7
|
+
"""
|
|
8
|
+
Schedules background tasks execution either in separate thread or in the running event loop.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
def __init__(self):
|
|
12
|
+
self._next_timer = None
|
|
13
|
+
self._event_loops = []
|
|
14
|
+
self._lock = threading.Lock()
|
|
15
|
+
self._stopped = False
|
|
16
|
+
|
|
17
|
+
def __del__(self):
|
|
18
|
+
self.stop()
|
|
19
|
+
|
|
20
|
+
def stop(self):
|
|
21
|
+
"""
|
|
22
|
+
Stop all scheduled tasks and clean up resources.
|
|
23
|
+
"""
|
|
24
|
+
with self._lock:
|
|
25
|
+
if self._stopped:
|
|
26
|
+
return
|
|
27
|
+
self._stopped = True
|
|
28
|
+
|
|
29
|
+
if self._next_timer:
|
|
30
|
+
self._next_timer.cancel()
|
|
31
|
+
self._next_timer = None
|
|
32
|
+
|
|
33
|
+
# Stop all event loops
|
|
34
|
+
for loop in self._event_loops:
|
|
35
|
+
if loop.is_running():
|
|
36
|
+
loop.call_soon_threadsafe(loop.stop)
|
|
37
|
+
|
|
38
|
+
self._event_loops.clear()
|
|
39
|
+
|
|
40
|
+
def run_once(self, delay: float, callback: Callable, *args):
|
|
41
|
+
"""
|
|
42
|
+
Runs callable task once after certain delay in seconds.
|
|
43
|
+
"""
|
|
44
|
+
with self._lock:
|
|
45
|
+
if self._stopped:
|
|
46
|
+
return
|
|
47
|
+
|
|
48
|
+
# Run loop in a separate thread to unblock main thread.
|
|
49
|
+
loop = asyncio.new_event_loop()
|
|
50
|
+
|
|
51
|
+
with self._lock:
|
|
52
|
+
self._event_loops.append(loop)
|
|
53
|
+
|
|
54
|
+
thread = threading.Thread(
|
|
55
|
+
target=_start_event_loop_in_thread,
|
|
56
|
+
args=(loop, self._call_later, delay, callback, *args),
|
|
57
|
+
daemon=True,
|
|
58
|
+
)
|
|
59
|
+
thread.start()
|
|
60
|
+
|
|
61
|
+
def run_recurring(self, interval: float, callback: Callable, *args):
|
|
62
|
+
"""
|
|
63
|
+
Runs recurring callable task with given interval in seconds.
|
|
64
|
+
"""
|
|
65
|
+
with self._lock:
|
|
66
|
+
if self._stopped:
|
|
67
|
+
return
|
|
68
|
+
|
|
69
|
+
# Run loop in a separate thread to unblock main thread.
|
|
70
|
+
loop = asyncio.new_event_loop()
|
|
71
|
+
|
|
72
|
+
with self._lock:
|
|
73
|
+
self._event_loops.append(loop)
|
|
74
|
+
|
|
75
|
+
thread = threading.Thread(
|
|
76
|
+
target=_start_event_loop_in_thread,
|
|
77
|
+
args=(loop, self._call_later_recurring, interval, callback, *args),
|
|
78
|
+
daemon=True,
|
|
79
|
+
)
|
|
80
|
+
thread.start()
|
|
81
|
+
|
|
82
|
+
async def run_recurring_async(
|
|
83
|
+
self, interval: float, coro: Callable[..., Coroutine[Any, Any, Any]], *args
|
|
84
|
+
):
|
|
85
|
+
"""
|
|
86
|
+
Runs recurring coroutine with given interval in seconds in the current event loop.
|
|
87
|
+
To be used only from an async context. No additional threads are created.
|
|
88
|
+
"""
|
|
89
|
+
with self._lock:
|
|
90
|
+
if self._stopped:
|
|
91
|
+
return
|
|
92
|
+
|
|
93
|
+
loop = asyncio.get_running_loop()
|
|
94
|
+
wrapped = _async_to_sync_wrapper(loop, coro, *args)
|
|
95
|
+
|
|
96
|
+
def tick():
|
|
97
|
+
with self._lock:
|
|
98
|
+
if self._stopped:
|
|
99
|
+
return
|
|
100
|
+
# Schedule the coroutine
|
|
101
|
+
wrapped()
|
|
102
|
+
# Schedule next tick
|
|
103
|
+
self._next_timer = loop.call_later(interval, tick)
|
|
104
|
+
|
|
105
|
+
# Schedule first tick
|
|
106
|
+
self._next_timer = loop.call_later(interval, tick)
|
|
107
|
+
|
|
108
|
+
def _call_later(
|
|
109
|
+
self, loop: asyncio.AbstractEventLoop, delay: float, callback: Callable, *args
|
|
110
|
+
):
|
|
111
|
+
with self._lock:
|
|
112
|
+
if self._stopped:
|
|
113
|
+
return
|
|
114
|
+
self._next_timer = loop.call_later(delay, callback, *args)
|
|
115
|
+
|
|
116
|
+
def _call_later_recurring(
|
|
117
|
+
self,
|
|
118
|
+
loop: asyncio.AbstractEventLoop,
|
|
119
|
+
interval: float,
|
|
120
|
+
callback: Callable,
|
|
121
|
+
*args,
|
|
122
|
+
):
|
|
123
|
+
with self._lock:
|
|
124
|
+
if self._stopped:
|
|
125
|
+
return
|
|
126
|
+
self._call_later(
|
|
127
|
+
loop, interval, self._execute_recurring, loop, interval, callback, *args
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
def _execute_recurring(
|
|
131
|
+
self,
|
|
132
|
+
loop: asyncio.AbstractEventLoop,
|
|
133
|
+
interval: float,
|
|
134
|
+
callback: Callable,
|
|
135
|
+
*args,
|
|
136
|
+
):
|
|
137
|
+
"""
|
|
138
|
+
Executes recurring callable task with given interval in seconds.
|
|
139
|
+
"""
|
|
140
|
+
with self._lock:
|
|
141
|
+
if self._stopped:
|
|
142
|
+
return
|
|
143
|
+
|
|
144
|
+
try:
|
|
145
|
+
callback(*args)
|
|
146
|
+
except Exception:
|
|
147
|
+
# Silently ignore exceptions during shutdown
|
|
148
|
+
pass
|
|
149
|
+
|
|
150
|
+
with self._lock:
|
|
151
|
+
if self._stopped:
|
|
152
|
+
return
|
|
153
|
+
|
|
154
|
+
self._call_later(
|
|
155
|
+
loop, interval, self._execute_recurring, loop, interval, callback, *args
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
def _start_event_loop_in_thread(
|
|
160
|
+
event_loop: asyncio.AbstractEventLoop, call_soon_cb: Callable, *args
|
|
161
|
+
):
|
|
162
|
+
"""
|
|
163
|
+
Starts event loop in a thread and schedule callback as soon as event loop is ready.
|
|
164
|
+
Used to be able to schedule tasks using loop.call_later.
|
|
165
|
+
|
|
166
|
+
:param event_loop:
|
|
167
|
+
:return:
|
|
168
|
+
"""
|
|
169
|
+
asyncio.set_event_loop(event_loop)
|
|
170
|
+
event_loop.call_soon(call_soon_cb, event_loop, *args)
|
|
171
|
+
try:
|
|
172
|
+
event_loop.run_forever()
|
|
173
|
+
finally:
|
|
174
|
+
try:
|
|
175
|
+
# Clean up pending tasks
|
|
176
|
+
pending = asyncio.all_tasks(event_loop)
|
|
177
|
+
for task in pending:
|
|
178
|
+
task.cancel()
|
|
179
|
+
# Run loop once more to process cancellations
|
|
180
|
+
event_loop.run_until_complete(
|
|
181
|
+
asyncio.gather(*pending, return_exceptions=True)
|
|
182
|
+
)
|
|
183
|
+
except Exception:
|
|
184
|
+
pass
|
|
185
|
+
finally:
|
|
186
|
+
event_loop.close()
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
def _async_to_sync_wrapper(loop, coro_func, *args, **kwargs):
|
|
190
|
+
"""
|
|
191
|
+
Wraps an asynchronous function so it can be used with loop.call_later.
|
|
192
|
+
|
|
193
|
+
:param loop: The event loop in which the coroutine will be executed.
|
|
194
|
+
:param coro_func: The coroutine function to wrap.
|
|
195
|
+
:param args: Positional arguments to pass to the coroutine function.
|
|
196
|
+
:param kwargs: Keyword arguments to pass to the coroutine function.
|
|
197
|
+
:return: A regular function suitable for loop.call_later.
|
|
198
|
+
"""
|
|
199
|
+
|
|
200
|
+
def wrapped():
|
|
201
|
+
# Schedule the coroutine in the event loop
|
|
202
|
+
asyncio.ensure_future(coro_func(*args, **kwargs), loop=loop)
|
|
203
|
+
|
|
204
|
+
return wrapped
|