redis 7.0.0b2__py3-none-any.whl → 7.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. redis/__init__.py +1 -1
  2. redis/_parsers/base.py +6 -0
  3. redis/_parsers/helpers.py +64 -6
  4. redis/asyncio/client.py +14 -5
  5. redis/asyncio/cluster.py +5 -1
  6. redis/asyncio/connection.py +19 -1
  7. redis/asyncio/http/__init__.py +0 -0
  8. redis/asyncio/http/http_client.py +265 -0
  9. redis/asyncio/multidb/__init__.py +0 -0
  10. redis/asyncio/multidb/client.py +530 -0
  11. redis/asyncio/multidb/command_executor.py +339 -0
  12. redis/asyncio/multidb/config.py +210 -0
  13. redis/asyncio/multidb/database.py +69 -0
  14. redis/asyncio/multidb/event.py +84 -0
  15. redis/asyncio/multidb/failover.py +125 -0
  16. redis/asyncio/multidb/failure_detector.py +38 -0
  17. redis/asyncio/multidb/healthcheck.py +285 -0
  18. redis/background.py +204 -0
  19. redis/client.py +49 -27
  20. redis/cluster.py +9 -1
  21. redis/commands/core.py +64 -29
  22. redis/commands/json/commands.py +2 -2
  23. redis/commands/search/__init__.py +2 -2
  24. redis/commands/search/aggregation.py +24 -26
  25. redis/commands/search/commands.py +10 -10
  26. redis/commands/search/field.py +2 -2
  27. redis/commands/search/query.py +12 -12
  28. redis/connection.py +1613 -1263
  29. redis/data_structure.py +81 -0
  30. redis/event.py +84 -10
  31. redis/exceptions.py +8 -0
  32. redis/http/__init__.py +0 -0
  33. redis/http/http_client.py +425 -0
  34. redis/maint_notifications.py +18 -7
  35. redis/multidb/__init__.py +0 -0
  36. redis/multidb/circuit.py +144 -0
  37. redis/multidb/client.py +526 -0
  38. redis/multidb/command_executor.py +350 -0
  39. redis/multidb/config.py +207 -0
  40. redis/multidb/database.py +130 -0
  41. redis/multidb/event.py +89 -0
  42. redis/multidb/exception.py +17 -0
  43. redis/multidb/failover.py +125 -0
  44. redis/multidb/failure_detector.py +104 -0
  45. redis/multidb/healthcheck.py +282 -0
  46. redis/retry.py +14 -1
  47. redis/utils.py +34 -0
  48. {redis-7.0.0b2.dist-info → redis-7.0.1.dist-info}/METADATA +17 -4
  49. {redis-7.0.0b2.dist-info → redis-7.0.1.dist-info}/RECORD +51 -25
  50. {redis-7.0.0b2.dist-info → redis-7.0.1.dist-info}/WHEEL +0 -0
  51. {redis-7.0.0b2.dist-info → redis-7.0.1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,125 @@
1
+ import time
2
+ from abc import ABC, abstractmethod
3
+
4
+ from redis.asyncio.multidb.database import AsyncDatabase, Databases
5
+ from redis.data_structure import WeightedList
6
+ from redis.multidb.circuit import State as CBState
7
+ from redis.multidb.exception import (
8
+ NoValidDatabaseException,
9
+ TemporaryUnavailableException,
10
+ )
11
+
12
+ DEFAULT_FAILOVER_ATTEMPTS = 10
13
+ DEFAULT_FAILOVER_DELAY = 12
14
+
15
+
16
+ class AsyncFailoverStrategy(ABC):
17
+ @abstractmethod
18
+ async def database(self) -> AsyncDatabase:
19
+ """Select the database according to the strategy."""
20
+ pass
21
+
22
+ @abstractmethod
23
+ def set_databases(self, databases: Databases) -> None:
24
+ """Set the database strategy operates on."""
25
+ pass
26
+
27
+
28
+ class FailoverStrategyExecutor(ABC):
29
+ @property
30
+ @abstractmethod
31
+ def failover_attempts(self) -> int:
32
+ """The number of failover attempts."""
33
+ pass
34
+
35
+ @property
36
+ @abstractmethod
37
+ def failover_delay(self) -> float:
38
+ """The delay between failover attempts."""
39
+ pass
40
+
41
+ @property
42
+ @abstractmethod
43
+ def strategy(self) -> AsyncFailoverStrategy:
44
+ """The strategy to execute."""
45
+ pass
46
+
47
+ @abstractmethod
48
+ async def execute(self) -> AsyncDatabase:
49
+ """Execute the failover strategy."""
50
+ pass
51
+
52
+
53
+ class WeightBasedFailoverStrategy(AsyncFailoverStrategy):
54
+ """
55
+ Failover strategy based on database weights.
56
+ """
57
+
58
+ def __init__(self):
59
+ self._databases = WeightedList()
60
+
61
+ async def database(self) -> AsyncDatabase:
62
+ for database, _ in self._databases:
63
+ if database.circuit.state == CBState.CLOSED:
64
+ return database
65
+
66
+ raise NoValidDatabaseException("No valid database available for communication")
67
+
68
+ def set_databases(self, databases: Databases) -> None:
69
+ self._databases = databases
70
+
71
+
72
+ class DefaultFailoverStrategyExecutor(FailoverStrategyExecutor):
73
+ """
74
+ Executes given failover strategy.
75
+ """
76
+
77
+ def __init__(
78
+ self,
79
+ strategy: AsyncFailoverStrategy,
80
+ failover_attempts: int = DEFAULT_FAILOVER_ATTEMPTS,
81
+ failover_delay: float = DEFAULT_FAILOVER_DELAY,
82
+ ):
83
+ self._strategy = strategy
84
+ self._failover_attempts = failover_attempts
85
+ self._failover_delay = failover_delay
86
+ self._next_attempt_ts: int = 0
87
+ self._failover_counter: int = 0
88
+
89
+ @property
90
+ def failover_attempts(self) -> int:
91
+ return self._failover_attempts
92
+
93
+ @property
94
+ def failover_delay(self) -> float:
95
+ return self._failover_delay
96
+
97
+ @property
98
+ def strategy(self) -> AsyncFailoverStrategy:
99
+ return self._strategy
100
+
101
+ async def execute(self) -> AsyncDatabase:
102
+ try:
103
+ database = await self._strategy.database()
104
+ self._reset()
105
+ return database
106
+ except NoValidDatabaseException as e:
107
+ if self._next_attempt_ts == 0:
108
+ self._next_attempt_ts = time.time() + self._failover_delay
109
+ self._failover_counter += 1
110
+ elif time.time() >= self._next_attempt_ts:
111
+ self._next_attempt_ts += self._failover_delay
112
+ self._failover_counter += 1
113
+
114
+ if self._failover_counter > self._failover_attempts:
115
+ self._reset()
116
+ raise e
117
+ else:
118
+ raise TemporaryUnavailableException(
119
+ "No database connections currently available. "
120
+ "This is a temporary condition - please retry the operation."
121
+ )
122
+
123
+ def _reset(self) -> None:
124
+ self._next_attempt_ts = 0
125
+ self._failover_counter = 0
@@ -0,0 +1,38 @@
1
+ from abc import ABC, abstractmethod
2
+
3
+ from redis.multidb.failure_detector import FailureDetector
4
+
5
+
6
+ class AsyncFailureDetector(ABC):
7
+ @abstractmethod
8
+ async def register_failure(self, exception: Exception, cmd: tuple) -> None:
9
+ """Register a failure that occurred during command execution."""
10
+ pass
11
+
12
+ @abstractmethod
13
+ async def register_command_execution(self, cmd: tuple) -> None:
14
+ """Register a command execution."""
15
+ pass
16
+
17
+ @abstractmethod
18
+ def set_command_executor(self, command_executor) -> None:
19
+ """Set the command executor for this failure."""
20
+ pass
21
+
22
+
23
+ class FailureDetectorAsyncWrapper(AsyncFailureDetector):
24
+ """
25
+ Async wrapper for the failure detector.
26
+ """
27
+
28
+ def __init__(self, failure_detector: FailureDetector) -> None:
29
+ self._failure_detector = failure_detector
30
+
31
+ async def register_failure(self, exception: Exception, cmd: tuple) -> None:
32
+ self._failure_detector.register_failure(exception, cmd)
33
+
34
+ async def register_command_execution(self, cmd: tuple) -> None:
35
+ self._failure_detector.register_command_execution(cmd)
36
+
37
+ def set_command_executor(self, command_executor) -> None:
38
+ self._failure_detector.set_command_executor(command_executor)
@@ -0,0 +1,285 @@
1
+ import asyncio
2
+ import logging
3
+ from abc import ABC, abstractmethod
4
+ from enum import Enum
5
+ from typing import List, Optional, Tuple, Union
6
+
7
+ from redis.asyncio import Redis
8
+ from redis.asyncio.http.http_client import DEFAULT_TIMEOUT, AsyncHTTPClientWrapper
9
+ from redis.backoff import NoBackoff
10
+ from redis.http.http_client import HttpClient
11
+ from redis.multidb.exception import UnhealthyDatabaseException
12
+ from redis.retry import Retry
13
+
14
+ DEFAULT_HEALTH_CHECK_PROBES = 3
15
+ DEFAULT_HEALTH_CHECK_INTERVAL = 5
16
+ DEFAULT_HEALTH_CHECK_DELAY = 0.5
17
+ DEFAULT_LAG_AWARE_TOLERANCE = 5000
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+
22
+ class HealthCheck(ABC):
23
+ @abstractmethod
24
+ async def check_health(self, database) -> bool:
25
+ """Function to determine the health status."""
26
+ pass
27
+
28
+
29
+ class HealthCheckPolicy(ABC):
30
+ """
31
+ Health checks execution policy.
32
+ """
33
+
34
+ @property
35
+ @abstractmethod
36
+ def health_check_probes(self) -> int:
37
+ """Number of probes to execute health checks."""
38
+ pass
39
+
40
+ @property
41
+ @abstractmethod
42
+ def health_check_delay(self) -> float:
43
+ """Delay between health check probes."""
44
+ pass
45
+
46
+ @abstractmethod
47
+ async def execute(self, health_checks: List[HealthCheck], database) -> bool:
48
+ """Execute health checks and return database health status."""
49
+ pass
50
+
51
+
52
+ class AbstractHealthCheckPolicy(HealthCheckPolicy):
53
+ def __init__(self, health_check_probes: int, health_check_delay: float):
54
+ if health_check_probes < 1:
55
+ raise ValueError("health_check_probes must be greater than 0")
56
+ self._health_check_probes = health_check_probes
57
+ self._health_check_delay = health_check_delay
58
+
59
+ @property
60
+ def health_check_probes(self) -> int:
61
+ return self._health_check_probes
62
+
63
+ @property
64
+ def health_check_delay(self) -> float:
65
+ return self._health_check_delay
66
+
67
+ @abstractmethod
68
+ async def execute(self, health_checks: List[HealthCheck], database) -> bool:
69
+ pass
70
+
71
+
72
+ class HealthyAllPolicy(AbstractHealthCheckPolicy):
73
+ """
74
+ Policy that returns True if all health check probes are successful.
75
+ """
76
+
77
+ def __init__(self, health_check_probes: int, health_check_delay: float):
78
+ super().__init__(health_check_probes, health_check_delay)
79
+
80
+ async def execute(self, health_checks: List[HealthCheck], database) -> bool:
81
+ for health_check in health_checks:
82
+ for attempt in range(self.health_check_probes):
83
+ try:
84
+ if not await health_check.check_health(database):
85
+ return False
86
+ except Exception as e:
87
+ raise UnhealthyDatabaseException("Unhealthy database", database, e)
88
+
89
+ if attempt < self.health_check_probes - 1:
90
+ await asyncio.sleep(self._health_check_delay)
91
+ return True
92
+
93
+
94
+ class HealthyMajorityPolicy(AbstractHealthCheckPolicy):
95
+ """
96
+ Policy that returns True if a majority of health check probes are successful.
97
+ """
98
+
99
+ def __init__(self, health_check_probes: int, health_check_delay: float):
100
+ super().__init__(health_check_probes, health_check_delay)
101
+
102
+ async def execute(self, health_checks: List[HealthCheck], database) -> bool:
103
+ for health_check in health_checks:
104
+ if self.health_check_probes % 2 == 0:
105
+ allowed_unsuccessful_probes = self.health_check_probes / 2
106
+ else:
107
+ allowed_unsuccessful_probes = (self.health_check_probes + 1) / 2
108
+
109
+ for attempt in range(self.health_check_probes):
110
+ try:
111
+ if not await health_check.check_health(database):
112
+ allowed_unsuccessful_probes -= 1
113
+ if allowed_unsuccessful_probes <= 0:
114
+ return False
115
+ except Exception as e:
116
+ allowed_unsuccessful_probes -= 1
117
+ if allowed_unsuccessful_probes <= 0:
118
+ raise UnhealthyDatabaseException(
119
+ "Unhealthy database", database, e
120
+ )
121
+
122
+ if attempt < self.health_check_probes - 1:
123
+ await asyncio.sleep(self._health_check_delay)
124
+ return True
125
+
126
+
127
+ class HealthyAnyPolicy(AbstractHealthCheckPolicy):
128
+ """
129
+ Policy that returns True if at least one health check probe is successful.
130
+ """
131
+
132
+ def __init__(self, health_check_probes: int, health_check_delay: float):
133
+ super().__init__(health_check_probes, health_check_delay)
134
+
135
+ async def execute(self, health_checks: List[HealthCheck], database) -> bool:
136
+ is_healthy = False
137
+
138
+ for health_check in health_checks:
139
+ exception = None
140
+
141
+ for attempt in range(self.health_check_probes):
142
+ try:
143
+ if await health_check.check_health(database):
144
+ is_healthy = True
145
+ break
146
+ else:
147
+ is_healthy = False
148
+ except Exception as e:
149
+ exception = UnhealthyDatabaseException(
150
+ "Unhealthy database", database, e
151
+ )
152
+
153
+ if attempt < self.health_check_probes - 1:
154
+ await asyncio.sleep(self._health_check_delay)
155
+
156
+ if not is_healthy and not exception:
157
+ return is_healthy
158
+ elif not is_healthy and exception:
159
+ raise exception
160
+
161
+ return is_healthy
162
+
163
+
164
+ class HealthCheckPolicies(Enum):
165
+ HEALTHY_ALL = HealthyAllPolicy
166
+ HEALTHY_MAJORITY = HealthyMajorityPolicy
167
+ HEALTHY_ANY = HealthyAnyPolicy
168
+
169
+
170
+ DEFAULT_HEALTH_CHECK_POLICY: HealthCheckPolicies = HealthCheckPolicies.HEALTHY_ALL
171
+
172
+
173
+ class PingHealthCheck(HealthCheck):
174
+ """
175
+ Health check based on PING command.
176
+ """
177
+
178
+ async def check_health(self, database) -> bool:
179
+ if isinstance(database.client, Redis):
180
+ return await database.client.execute_command("PING")
181
+ else:
182
+ # For a cluster checks if all nodes are healthy.
183
+ all_nodes = database.client.get_nodes()
184
+ for node in all_nodes:
185
+ if not await node.redis_connection.execute_command("PING"):
186
+ return False
187
+
188
+ return True
189
+
190
+
191
+ class LagAwareHealthCheck(HealthCheck):
192
+ """
193
+ Health check available for Redis Enterprise deployments.
194
+ Verify via REST API that the database is healthy based on different lags.
195
+ """
196
+
197
+ def __init__(
198
+ self,
199
+ rest_api_port: int = 9443,
200
+ lag_aware_tolerance: int = DEFAULT_LAG_AWARE_TOLERANCE,
201
+ timeout: float = DEFAULT_TIMEOUT,
202
+ auth_basic: Optional[Tuple[str, str]] = None,
203
+ verify_tls: bool = True,
204
+ # TLS verification (server) options
205
+ ca_file: Optional[str] = None,
206
+ ca_path: Optional[str] = None,
207
+ ca_data: Optional[Union[str, bytes]] = None,
208
+ # Mutual TLS (client cert) options
209
+ client_cert_file: Optional[str] = None,
210
+ client_key_file: Optional[str] = None,
211
+ client_key_password: Optional[str] = None,
212
+ ):
213
+ """
214
+ Initialize LagAwareHealthCheck with the specified parameters.
215
+
216
+ Args:
217
+ rest_api_port: Port number for Redis Enterprise REST API (default: 9443)
218
+ lag_aware_tolerance: Tolerance in lag between databases in MS (default: 100)
219
+ timeout: Request timeout in seconds (default: DEFAULT_TIMEOUT)
220
+ auth_basic: Tuple of (username, password) for basic authentication
221
+ verify_tls: Whether to verify TLS certificates (default: True)
222
+ ca_file: Path to CA certificate file for TLS verification
223
+ ca_path: Path to CA certificates directory for TLS verification
224
+ ca_data: CA certificate data as string or bytes
225
+ client_cert_file: Path to client certificate file for mutual TLS
226
+ client_key_file: Path to client private key file for mutual TLS
227
+ client_key_password: Password for encrypted client private key
228
+ """
229
+ self._http_client = AsyncHTTPClientWrapper(
230
+ HttpClient(
231
+ timeout=timeout,
232
+ auth_basic=auth_basic,
233
+ retry=Retry(NoBackoff(), retries=0),
234
+ verify_tls=verify_tls,
235
+ ca_file=ca_file,
236
+ ca_path=ca_path,
237
+ ca_data=ca_data,
238
+ client_cert_file=client_cert_file,
239
+ client_key_file=client_key_file,
240
+ client_key_password=client_key_password,
241
+ )
242
+ )
243
+ self._rest_api_port = rest_api_port
244
+ self._lag_aware_tolerance = lag_aware_tolerance
245
+
246
+ async def check_health(self, database) -> bool:
247
+ if database.health_check_url is None:
248
+ raise ValueError(
249
+ "Database health check url is not set. Please check DatabaseConfig for the current database."
250
+ )
251
+
252
+ if isinstance(database.client, Redis):
253
+ db_host = database.client.get_connection_kwargs()["host"]
254
+ else:
255
+ db_host = database.client.startup_nodes[0].host
256
+
257
+ base_url = f"{database.health_check_url}:{self._rest_api_port}"
258
+ self._http_client.client.base_url = base_url
259
+
260
+ # Find bdb matching to the current database host
261
+ matching_bdb = None
262
+ for bdb in await self._http_client.get("/v1/bdbs"):
263
+ for endpoint in bdb["endpoints"]:
264
+ if endpoint["dns_name"] == db_host:
265
+ matching_bdb = bdb
266
+ break
267
+
268
+ # In case if the host was set as public IP
269
+ for addr in endpoint["addr"]:
270
+ if addr == db_host:
271
+ matching_bdb = bdb
272
+ break
273
+
274
+ if matching_bdb is None:
275
+ logger.warning("LagAwareHealthCheck failed: Couldn't find a matching bdb")
276
+ raise ValueError("Could not find a matching bdb")
277
+
278
+ url = (
279
+ f"/v1/bdbs/{matching_bdb['uid']}/availability"
280
+ f"?extend_check=lag&availability_lag_tolerance_ms={self._lag_aware_tolerance}"
281
+ )
282
+ await self._http_client.get(url, expect_json=False)
283
+
284
+ # Status checked in an http client, otherwise HttpError will be raised
285
+ return True
redis/background.py ADDED
@@ -0,0 +1,204 @@
1
+ import asyncio
2
+ import threading
3
+ from typing import Any, Callable, Coroutine
4
+
5
+
6
+ class BackgroundScheduler:
7
+ """
8
+ Schedules background tasks execution either in separate thread or in the running event loop.
9
+ """
10
+
11
+ def __init__(self):
12
+ self._next_timer = None
13
+ self._event_loops = []
14
+ self._lock = threading.Lock()
15
+ self._stopped = False
16
+
17
+ def __del__(self):
18
+ self.stop()
19
+
20
+ def stop(self):
21
+ """
22
+ Stop all scheduled tasks and clean up resources.
23
+ """
24
+ with self._lock:
25
+ if self._stopped:
26
+ return
27
+ self._stopped = True
28
+
29
+ if self._next_timer:
30
+ self._next_timer.cancel()
31
+ self._next_timer = None
32
+
33
+ # Stop all event loops
34
+ for loop in self._event_loops:
35
+ if loop.is_running():
36
+ loop.call_soon_threadsafe(loop.stop)
37
+
38
+ self._event_loops.clear()
39
+
40
+ def run_once(self, delay: float, callback: Callable, *args):
41
+ """
42
+ Runs callable task once after certain delay in seconds.
43
+ """
44
+ with self._lock:
45
+ if self._stopped:
46
+ return
47
+
48
+ # Run loop in a separate thread to unblock main thread.
49
+ loop = asyncio.new_event_loop()
50
+
51
+ with self._lock:
52
+ self._event_loops.append(loop)
53
+
54
+ thread = threading.Thread(
55
+ target=_start_event_loop_in_thread,
56
+ args=(loop, self._call_later, delay, callback, *args),
57
+ daemon=True,
58
+ )
59
+ thread.start()
60
+
61
+ def run_recurring(self, interval: float, callback: Callable, *args):
62
+ """
63
+ Runs recurring callable task with given interval in seconds.
64
+ """
65
+ with self._lock:
66
+ if self._stopped:
67
+ return
68
+
69
+ # Run loop in a separate thread to unblock main thread.
70
+ loop = asyncio.new_event_loop()
71
+
72
+ with self._lock:
73
+ self._event_loops.append(loop)
74
+
75
+ thread = threading.Thread(
76
+ target=_start_event_loop_in_thread,
77
+ args=(loop, self._call_later_recurring, interval, callback, *args),
78
+ daemon=True,
79
+ )
80
+ thread.start()
81
+
82
+ async def run_recurring_async(
83
+ self, interval: float, coro: Callable[..., Coroutine[Any, Any, Any]], *args
84
+ ):
85
+ """
86
+ Runs recurring coroutine with given interval in seconds in the current event loop.
87
+ To be used only from an async context. No additional threads are created.
88
+ """
89
+ with self._lock:
90
+ if self._stopped:
91
+ return
92
+
93
+ loop = asyncio.get_running_loop()
94
+ wrapped = _async_to_sync_wrapper(loop, coro, *args)
95
+
96
+ def tick():
97
+ with self._lock:
98
+ if self._stopped:
99
+ return
100
+ # Schedule the coroutine
101
+ wrapped()
102
+ # Schedule next tick
103
+ self._next_timer = loop.call_later(interval, tick)
104
+
105
+ # Schedule first tick
106
+ self._next_timer = loop.call_later(interval, tick)
107
+
108
+ def _call_later(
109
+ self, loop: asyncio.AbstractEventLoop, delay: float, callback: Callable, *args
110
+ ):
111
+ with self._lock:
112
+ if self._stopped:
113
+ return
114
+ self._next_timer = loop.call_later(delay, callback, *args)
115
+
116
+ def _call_later_recurring(
117
+ self,
118
+ loop: asyncio.AbstractEventLoop,
119
+ interval: float,
120
+ callback: Callable,
121
+ *args,
122
+ ):
123
+ with self._lock:
124
+ if self._stopped:
125
+ return
126
+ self._call_later(
127
+ loop, interval, self._execute_recurring, loop, interval, callback, *args
128
+ )
129
+
130
+ def _execute_recurring(
131
+ self,
132
+ loop: asyncio.AbstractEventLoop,
133
+ interval: float,
134
+ callback: Callable,
135
+ *args,
136
+ ):
137
+ """
138
+ Executes recurring callable task with given interval in seconds.
139
+ """
140
+ with self._lock:
141
+ if self._stopped:
142
+ return
143
+
144
+ try:
145
+ callback(*args)
146
+ except Exception:
147
+ # Silently ignore exceptions during shutdown
148
+ pass
149
+
150
+ with self._lock:
151
+ if self._stopped:
152
+ return
153
+
154
+ self._call_later(
155
+ loop, interval, self._execute_recurring, loop, interval, callback, *args
156
+ )
157
+
158
+
159
+ def _start_event_loop_in_thread(
160
+ event_loop: asyncio.AbstractEventLoop, call_soon_cb: Callable, *args
161
+ ):
162
+ """
163
+ Starts event loop in a thread and schedule callback as soon as event loop is ready.
164
+ Used to be able to schedule tasks using loop.call_later.
165
+
166
+ :param event_loop:
167
+ :return:
168
+ """
169
+ asyncio.set_event_loop(event_loop)
170
+ event_loop.call_soon(call_soon_cb, event_loop, *args)
171
+ try:
172
+ event_loop.run_forever()
173
+ finally:
174
+ try:
175
+ # Clean up pending tasks
176
+ pending = asyncio.all_tasks(event_loop)
177
+ for task in pending:
178
+ task.cancel()
179
+ # Run loop once more to process cancellations
180
+ event_loop.run_until_complete(
181
+ asyncio.gather(*pending, return_exceptions=True)
182
+ )
183
+ except Exception:
184
+ pass
185
+ finally:
186
+ event_loop.close()
187
+
188
+
189
+ def _async_to_sync_wrapper(loop, coro_func, *args, **kwargs):
190
+ """
191
+ Wraps an asynchronous function so it can be used with loop.call_later.
192
+
193
+ :param loop: The event loop in which the coroutine will be executed.
194
+ :param coro_func: The coroutine function to wrap.
195
+ :param args: Positional arguments to pass to the coroutine function.
196
+ :param kwargs: Keyword arguments to pass to the coroutine function.
197
+ :return: A regular function suitable for loop.call_later.
198
+ """
199
+
200
+ def wrapped():
201
+ # Schedule the coroutine in the event loop
202
+ asyncio.ensure_future(coro_func(*args, **kwargs), loop=loop)
203
+
204
+ return wrapped