jararaca 0.2.37a12__py3-none-any.whl → 0.4.0a5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- README.md +121 -0
- jararaca/__init__.py +267 -15
- jararaca/__main__.py +4 -0
- jararaca/broker_backend/__init__.py +106 -0
- jararaca/broker_backend/mapper.py +25 -0
- jararaca/broker_backend/redis_broker_backend.py +168 -0
- jararaca/cli.py +840 -103
- jararaca/common/__init__.py +3 -0
- jararaca/core/__init__.py +3 -0
- jararaca/core/providers.py +4 -0
- jararaca/core/uow.py +55 -16
- jararaca/di.py +4 -0
- jararaca/files/entity.py.mako +4 -0
- jararaca/lifecycle.py +6 -2
- jararaca/messagebus/__init__.py +5 -1
- jararaca/messagebus/bus_message_controller.py +4 -0
- jararaca/messagebus/consumers/__init__.py +3 -0
- jararaca/messagebus/decorators.py +90 -85
- jararaca/messagebus/implicit_headers.py +49 -0
- jararaca/messagebus/interceptors/__init__.py +3 -0
- jararaca/messagebus/interceptors/aiopika_publisher_interceptor.py +95 -37
- jararaca/messagebus/interceptors/publisher_interceptor.py +42 -0
- jararaca/messagebus/message.py +31 -0
- jararaca/messagebus/publisher.py +47 -4
- jararaca/messagebus/worker.py +1615 -135
- jararaca/microservice.py +248 -36
- jararaca/observability/constants.py +7 -0
- jararaca/observability/decorators.py +177 -16
- jararaca/observability/fastapi_exception_handler.py +37 -0
- jararaca/observability/hooks.py +109 -0
- jararaca/observability/interceptor.py +8 -2
- jararaca/observability/providers/__init__.py +3 -0
- jararaca/observability/providers/otel.py +213 -18
- jararaca/persistence/base.py +40 -3
- jararaca/persistence/exports.py +4 -0
- jararaca/persistence/interceptors/__init__.py +3 -0
- jararaca/persistence/interceptors/aiosqa_interceptor.py +187 -23
- jararaca/persistence/interceptors/constants.py +5 -0
- jararaca/persistence/interceptors/decorators.py +50 -0
- jararaca/persistence/session.py +3 -0
- jararaca/persistence/sort_filter.py +4 -0
- jararaca/persistence/utilities.py +74 -32
- jararaca/presentation/__init__.py +3 -0
- jararaca/presentation/decorators.py +170 -82
- jararaca/presentation/exceptions.py +23 -0
- jararaca/presentation/hooks.py +4 -0
- jararaca/presentation/http_microservice.py +4 -0
- jararaca/presentation/server.py +120 -41
- jararaca/presentation/websocket/__init__.py +3 -0
- jararaca/presentation/websocket/base_types.py +4 -0
- jararaca/presentation/websocket/context.py +34 -4
- jararaca/presentation/websocket/decorators.py +8 -41
- jararaca/presentation/websocket/redis.py +280 -53
- jararaca/presentation/websocket/types.py +6 -2
- jararaca/presentation/websocket/websocket_interceptor.py +74 -23
- jararaca/reflect/__init__.py +3 -0
- jararaca/reflect/controller_inspect.py +81 -0
- jararaca/reflect/decorators.py +238 -0
- jararaca/reflect/metadata.py +76 -0
- jararaca/rpc/__init__.py +3 -0
- jararaca/rpc/http/__init__.py +101 -0
- jararaca/rpc/http/backends/__init__.py +14 -0
- jararaca/rpc/http/backends/httpx.py +43 -9
- jararaca/rpc/http/backends/otel.py +4 -0
- jararaca/rpc/http/decorators.py +378 -113
- jararaca/rpc/http/httpx.py +3 -0
- jararaca/scheduler/__init__.py +3 -0
- jararaca/scheduler/beat_worker.py +758 -0
- jararaca/scheduler/decorators.py +89 -28
- jararaca/scheduler/types.py +11 -0
- jararaca/tools/app_config/__init__.py +3 -0
- jararaca/tools/app_config/decorators.py +7 -19
- jararaca/tools/app_config/interceptor.py +10 -4
- jararaca/tools/typescript/__init__.py +3 -0
- jararaca/tools/typescript/decorators.py +120 -0
- jararaca/tools/typescript/interface_parser.py +1126 -189
- jararaca/utils/__init__.py +3 -0
- jararaca/utils/rabbitmq_utils.py +372 -0
- jararaca/utils/retry.py +148 -0
- jararaca-0.4.0a5.dist-info/LICENSE +674 -0
- jararaca-0.4.0a5.dist-info/LICENSES/GPL-3.0-or-later.txt +232 -0
- {jararaca-0.2.37a12.dist-info → jararaca-0.4.0a5.dist-info}/METADATA +14 -7
- jararaca-0.4.0a5.dist-info/RECORD +88 -0
- {jararaca-0.2.37a12.dist-info → jararaca-0.4.0a5.dist-info}/WHEEL +1 -1
- pyproject.toml +131 -0
- jararaca/messagebus/types.py +0 -30
- jararaca/scheduler/scheduler.py +0 -154
- jararaca/tools/metadata.py +0 -47
- jararaca-0.2.37a12.dist-info/RECORD +0 -63
- /jararaca-0.2.37a12.dist-info/LICENSE → /LICENSE +0 -0
- {jararaca-0.2.37a12.dist-info → jararaca-0.4.0a5.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,758 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: 2025 Lucas S
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: GPL-3.0-or-later
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
import contextlib
|
|
7
|
+
import logging
|
|
8
|
+
import signal
|
|
9
|
+
import time
|
|
10
|
+
from abc import ABC, abstractmethod
|
|
11
|
+
from dataclasses import dataclass, field
|
|
12
|
+
from datetime import UTC, datetime
|
|
13
|
+
from typing import Any
|
|
14
|
+
from urllib.parse import parse_qs
|
|
15
|
+
|
|
16
|
+
import aio_pika
|
|
17
|
+
import croniter
|
|
18
|
+
import urllib3
|
|
19
|
+
import urllib3.util
|
|
20
|
+
import uvloop
|
|
21
|
+
from aio_pika import connect
|
|
22
|
+
from aio_pika.abc import AbstractChannel, AbstractConnection
|
|
23
|
+
from aio_pika.exceptions import (
|
|
24
|
+
AMQPChannelError,
|
|
25
|
+
AMQPConnectionError,
|
|
26
|
+
AMQPError,
|
|
27
|
+
ChannelClosed,
|
|
28
|
+
ConnectionClosed,
|
|
29
|
+
)
|
|
30
|
+
from aio_pika.pool import Pool
|
|
31
|
+
from aiormq.exceptions import ChannelInvalidStateError
|
|
32
|
+
|
|
33
|
+
from jararaca.broker_backend import MessageBrokerBackend
|
|
34
|
+
from jararaca.broker_backend.mapper import get_message_broker_backend_from_url
|
|
35
|
+
from jararaca.core.uow import UnitOfWorkContextProvider
|
|
36
|
+
from jararaca.di import Container
|
|
37
|
+
from jararaca.lifecycle import AppLifecycle
|
|
38
|
+
from jararaca.microservice import Microservice, providing_app_type
|
|
39
|
+
from jararaca.scheduler.decorators import (
|
|
40
|
+
ScheduledAction,
|
|
41
|
+
ScheduledActionData,
|
|
42
|
+
get_type_scheduled_actions,
|
|
43
|
+
)
|
|
44
|
+
from jararaca.scheduler.types import DelayedMessageData
|
|
45
|
+
from jararaca.utils.rabbitmq_utils import RabbitmqUtils
|
|
46
|
+
from jararaca.utils.retry import RetryConfig, retry_with_backoff
|
|
47
|
+
|
|
48
|
+
logger = logging.getLogger(__name__)
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def _extract_scheduled_actions(
|
|
52
|
+
app: Microservice, container: Container, scheduler_names: set[str] | None = None
|
|
53
|
+
) -> list[ScheduledActionData]:
|
|
54
|
+
scheduled_actions: list[ScheduledActionData] = []
|
|
55
|
+
for controllers in app.controllers:
|
|
56
|
+
|
|
57
|
+
controller_instance: Any = container.get_by_type(controllers)
|
|
58
|
+
|
|
59
|
+
controller_scheduled_actions = get_type_scheduled_actions(controller_instance)
|
|
60
|
+
|
|
61
|
+
# Filter scheduled actions by name if scheduler_names is provided
|
|
62
|
+
if scheduler_names is not None:
|
|
63
|
+
filtered_actions = []
|
|
64
|
+
for action in controller_scheduled_actions:
|
|
65
|
+
# Include actions that have a name and it's in the provided set
|
|
66
|
+
if action.spec.name and action.spec.name in scheduler_names:
|
|
67
|
+
filtered_actions.append(action)
|
|
68
|
+
# Skip actions without names when filtering is active
|
|
69
|
+
controller_scheduled_actions = filtered_actions
|
|
70
|
+
|
|
71
|
+
scheduled_actions.extend(controller_scheduled_actions)
|
|
72
|
+
|
|
73
|
+
return scheduled_actions
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
# region Message Broker Dispatcher
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class _MessageBrokerDispatcher(ABC):
|
|
80
|
+
|
|
81
|
+
@abstractmethod
|
|
82
|
+
async def dispatch_scheduled_action(
|
|
83
|
+
self,
|
|
84
|
+
action_id: str,
|
|
85
|
+
timestamp: int,
|
|
86
|
+
) -> None:
|
|
87
|
+
"""
|
|
88
|
+
Dispatch a message to the message broker.
|
|
89
|
+
This is used to send a message to the message broker
|
|
90
|
+
to trigger the scheduled action.
|
|
91
|
+
"""
|
|
92
|
+
raise NotImplementedError("dispatch() is not implemented yet.")
|
|
93
|
+
|
|
94
|
+
@abstractmethod
|
|
95
|
+
async def dispatch_delayed_message(
|
|
96
|
+
self,
|
|
97
|
+
delayed_message: DelayedMessageData,
|
|
98
|
+
) -> None:
|
|
99
|
+
"""
|
|
100
|
+
Dispatch a delayed message to the message broker.
|
|
101
|
+
This is used to send a message to the message broker
|
|
102
|
+
to trigger the scheduled action.
|
|
103
|
+
"""
|
|
104
|
+
|
|
105
|
+
raise NotImplementedError("dispatch_delayed_message() is not implemented yet.")
|
|
106
|
+
|
|
107
|
+
@abstractmethod
|
|
108
|
+
async def initialize(self, scheduled_actions: list[ScheduledActionData]) -> None:
|
|
109
|
+
raise NotImplementedError("initialize() is not implemented yet.")
|
|
110
|
+
|
|
111
|
+
async def dispose(self) -> None:
|
|
112
|
+
pass
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
class _RabbitMQBrokerDispatcher(_MessageBrokerDispatcher):
|
|
116
|
+
|
|
117
|
+
def __init__(
|
|
118
|
+
self,
|
|
119
|
+
url: str,
|
|
120
|
+
config: "BeatWorkerConfig | None" = None,
|
|
121
|
+
shutdown_event: asyncio.Event | None = None,
|
|
122
|
+
) -> None:
|
|
123
|
+
self.url = url
|
|
124
|
+
self.config = config or BeatWorkerConfig()
|
|
125
|
+
self.connection_healthy = False
|
|
126
|
+
self.shutdown_event = shutdown_event or asyncio.Event()
|
|
127
|
+
self.health_check_task: asyncio.Task[Any] | None = None
|
|
128
|
+
|
|
129
|
+
self.conn_pool: "Pool[AbstractConnection]" = Pool(
|
|
130
|
+
self._create_connection,
|
|
131
|
+
max_size=self.config.max_pool_size,
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
self.channel_pool: "Pool[AbstractChannel]" = Pool(
|
|
135
|
+
self._create_channel,
|
|
136
|
+
max_size=self.config.max_pool_size,
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
splitted = urllib3.util.parse_url(url)
|
|
140
|
+
|
|
141
|
+
assert splitted.scheme in ["amqp", "amqps"], "Invalid URL scheme"
|
|
142
|
+
|
|
143
|
+
assert splitted.host, "Invalid URL host"
|
|
144
|
+
|
|
145
|
+
assert splitted.query, "Invalid URL query"
|
|
146
|
+
|
|
147
|
+
query_params: dict[str, list[str]] = parse_qs(splitted.query)
|
|
148
|
+
|
|
149
|
+
assert "exchange" in query_params, "Missing exchange parameter"
|
|
150
|
+
|
|
151
|
+
assert query_params["exchange"], "Empty exchange parameter"
|
|
152
|
+
|
|
153
|
+
self.exchange = str(query_params["exchange"][0])
|
|
154
|
+
|
|
155
|
+
async def _create_connection(self) -> AbstractConnection:
|
|
156
|
+
"""
|
|
157
|
+
Create a connection to the RabbitMQ server with retry logic.
|
|
158
|
+
"""
|
|
159
|
+
|
|
160
|
+
async def _establish_connection() -> AbstractConnection:
|
|
161
|
+
logger.debug("Establishing connection to RabbitMQ")
|
|
162
|
+
connection = await connect(
|
|
163
|
+
self.url,
|
|
164
|
+
heartbeat=self.config.connection_heartbeat_interval,
|
|
165
|
+
)
|
|
166
|
+
logger.debug("Connected to RabbitMQ successfully")
|
|
167
|
+
return connection
|
|
168
|
+
|
|
169
|
+
return await retry_with_backoff(
|
|
170
|
+
_establish_connection,
|
|
171
|
+
retry_config=self.config.connection_retry_config,
|
|
172
|
+
retry_exceptions=(
|
|
173
|
+
AMQPConnectionError,
|
|
174
|
+
ConnectionError,
|
|
175
|
+
OSError,
|
|
176
|
+
TimeoutError,
|
|
177
|
+
),
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
async def _create_channel(self) -> AbstractChannel:
|
|
181
|
+
"""
|
|
182
|
+
Create a channel to the RabbitMQ server with retry logic.
|
|
183
|
+
"""
|
|
184
|
+
|
|
185
|
+
async def _establish_channel() -> AbstractChannel:
|
|
186
|
+
async with self.conn_pool.acquire() as connection:
|
|
187
|
+
channel = await connection.channel()
|
|
188
|
+
return channel
|
|
189
|
+
|
|
190
|
+
return await retry_with_backoff(
|
|
191
|
+
_establish_channel,
|
|
192
|
+
retry_config=self.config.connection_retry_config,
|
|
193
|
+
retry_exceptions=(
|
|
194
|
+
AMQPConnectionError,
|
|
195
|
+
AMQPChannelError,
|
|
196
|
+
ChannelClosed,
|
|
197
|
+
ConnectionError,
|
|
198
|
+
),
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
async def dispatch_scheduled_action(self, action_id: str, timestamp: int) -> None:
|
|
202
|
+
"""
|
|
203
|
+
Dispatch a message to the RabbitMQ server with retry logic.
|
|
204
|
+
"""
|
|
205
|
+
if not self.connection_healthy:
|
|
206
|
+
await self._wait_for_connection()
|
|
207
|
+
|
|
208
|
+
async def _dispatch() -> None:
|
|
209
|
+
logger.debug("Dispatching message to %s at %s", action_id, timestamp)
|
|
210
|
+
async with self.channel_pool.acquire() as channel:
|
|
211
|
+
exchange = await RabbitmqUtils.get_main_exchange(channel, self.exchange)
|
|
212
|
+
|
|
213
|
+
await exchange.publish(
|
|
214
|
+
aio_pika.Message(body=str(timestamp).encode()),
|
|
215
|
+
routing_key=action_id,
|
|
216
|
+
)
|
|
217
|
+
logger.debug("Dispatched message to %s at %s", action_id, timestamp)
|
|
218
|
+
|
|
219
|
+
try:
|
|
220
|
+
await retry_with_backoff(
|
|
221
|
+
_dispatch,
|
|
222
|
+
retry_config=self.config.dispatch_retry_config,
|
|
223
|
+
retry_exceptions=(
|
|
224
|
+
AMQPConnectionError,
|
|
225
|
+
AMQPChannelError,
|
|
226
|
+
ChannelClosed,
|
|
227
|
+
ConnectionClosed,
|
|
228
|
+
AMQPError,
|
|
229
|
+
),
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
except ChannelInvalidStateError as e:
|
|
233
|
+
logger.error(
|
|
234
|
+
"Channel invalid state error when dispatching to %s: %s", action_id, e
|
|
235
|
+
)
|
|
236
|
+
# Trigger shutdown if dispatch fails
|
|
237
|
+
self.shutdown_event.set()
|
|
238
|
+
raise
|
|
239
|
+
|
|
240
|
+
except Exception as e:
|
|
241
|
+
logger.error(
|
|
242
|
+
"Failed to dispatch message to %s after retries: %s", action_id, e
|
|
243
|
+
)
|
|
244
|
+
# Trigger shutdown if dispatch fails
|
|
245
|
+
self.shutdown_event.set()
|
|
246
|
+
raise
|
|
247
|
+
|
|
248
|
+
async def dispatch_delayed_message(
|
|
249
|
+
self, delayed_message: DelayedMessageData
|
|
250
|
+
) -> None:
|
|
251
|
+
"""
|
|
252
|
+
Dispatch a delayed message to the RabbitMQ server with retry logic.
|
|
253
|
+
"""
|
|
254
|
+
if not self.connection_healthy:
|
|
255
|
+
await self._wait_for_connection()
|
|
256
|
+
|
|
257
|
+
async def _dispatch() -> None:
|
|
258
|
+
async with self.channel_pool.acquire() as channel:
|
|
259
|
+
exchange = await RabbitmqUtils.get_main_exchange(channel, self.exchange)
|
|
260
|
+
await exchange.publish(
|
|
261
|
+
aio_pika.Message(
|
|
262
|
+
body=delayed_message.payload,
|
|
263
|
+
),
|
|
264
|
+
routing_key=f"{delayed_message.message_topic}.",
|
|
265
|
+
)
|
|
266
|
+
|
|
267
|
+
try:
|
|
268
|
+
await retry_with_backoff(
|
|
269
|
+
_dispatch,
|
|
270
|
+
retry_config=self.config.dispatch_retry_config,
|
|
271
|
+
retry_exceptions=(
|
|
272
|
+
AMQPConnectionError,
|
|
273
|
+
AMQPChannelError,
|
|
274
|
+
ChannelClosed,
|
|
275
|
+
ConnectionClosed,
|
|
276
|
+
AMQPError,
|
|
277
|
+
),
|
|
278
|
+
)
|
|
279
|
+
except Exception as e:
|
|
280
|
+
logger.error("Failed to dispatch delayed message after retries: %s", e)
|
|
281
|
+
# Trigger shutdown if dispatch fails
|
|
282
|
+
self.shutdown_event.set()
|
|
283
|
+
raise
|
|
284
|
+
|
|
285
|
+
async def initialize(self, scheduled_actions: list[ScheduledActionData]) -> None:
|
|
286
|
+
"""
|
|
287
|
+
Initialize the RabbitMQ server with retry logic.
|
|
288
|
+
"""
|
|
289
|
+
|
|
290
|
+
async def _initialize() -> None:
|
|
291
|
+
async with self.channel_pool.acquire() as channel:
|
|
292
|
+
await RabbitmqUtils.get_main_exchange(channel, self.exchange)
|
|
293
|
+
|
|
294
|
+
for sched_act_data in scheduled_actions:
|
|
295
|
+
queue_name = ScheduledAction.get_function_id(
|
|
296
|
+
sched_act_data.callable
|
|
297
|
+
)
|
|
298
|
+
|
|
299
|
+
# Try to get existing queue
|
|
300
|
+
await RabbitmqUtils.get_scheduled_action_queue(
|
|
301
|
+
channel=channel,
|
|
302
|
+
queue_name=queue_name,
|
|
303
|
+
)
|
|
304
|
+
|
|
305
|
+
try:
|
|
306
|
+
logger.debug("Initializing RabbitMQ connection...")
|
|
307
|
+
await retry_with_backoff(
|
|
308
|
+
_initialize,
|
|
309
|
+
retry_config=self.config.connection_retry_config,
|
|
310
|
+
retry_exceptions=(
|
|
311
|
+
AMQPConnectionError,
|
|
312
|
+
AMQPChannelError,
|
|
313
|
+
ChannelClosed,
|
|
314
|
+
ConnectionClosed,
|
|
315
|
+
AMQPError,
|
|
316
|
+
),
|
|
317
|
+
)
|
|
318
|
+
|
|
319
|
+
# Verify connection is actually healthy after initialization
|
|
320
|
+
if not await self._is_connection_healthy():
|
|
321
|
+
logger.warning(
|
|
322
|
+
"Connection health check failed after initialization, retrying..."
|
|
323
|
+
)
|
|
324
|
+
# Wait a bit and try again
|
|
325
|
+
await asyncio.sleep(2.0)
|
|
326
|
+
if not await self._is_connection_healthy():
|
|
327
|
+
raise ConnectionError("Connection not healthy after initialization")
|
|
328
|
+
|
|
329
|
+
self.connection_healthy = True
|
|
330
|
+
logger.debug("RabbitMQ connection initialized successfully")
|
|
331
|
+
|
|
332
|
+
# Start health monitoring
|
|
333
|
+
self.health_check_task = asyncio.create_task(
|
|
334
|
+
self._monitor_connection_health()
|
|
335
|
+
)
|
|
336
|
+
|
|
337
|
+
except Exception as e:
|
|
338
|
+
logger.error("Failed to initialize RabbitMQ after retries: %s", e)
|
|
339
|
+
raise
|
|
340
|
+
|
|
341
|
+
async def dispose(self) -> None:
|
|
342
|
+
"""Clean up resources"""
|
|
343
|
+
logger.debug("Disposing RabbitMQ broker dispatcher")
|
|
344
|
+
self.shutdown_event.set()
|
|
345
|
+
|
|
346
|
+
# Cancel health monitoring
|
|
347
|
+
if self.health_check_task:
|
|
348
|
+
self.health_check_task.cancel()
|
|
349
|
+
try:
|
|
350
|
+
await self.health_check_task
|
|
351
|
+
except asyncio.CancelledError:
|
|
352
|
+
pass
|
|
353
|
+
|
|
354
|
+
# Clean up pools
|
|
355
|
+
await self._cleanup_pools()
|
|
356
|
+
|
|
357
|
+
async def _monitor_connection_health(self) -> None:
|
|
358
|
+
"""Monitor connection health and trigger shutdown if needed"""
|
|
359
|
+
while not self.shutdown_event.is_set():
|
|
360
|
+
try:
|
|
361
|
+
await asyncio.sleep(self.config.health_check_interval)
|
|
362
|
+
|
|
363
|
+
if self.shutdown_event.is_set():
|
|
364
|
+
break
|
|
365
|
+
|
|
366
|
+
# Check connection health
|
|
367
|
+
if not await self._is_connection_healthy():
|
|
368
|
+
logger.error("Connection health check failed, triggering shutdown")
|
|
369
|
+
self.shutdown_event.set()
|
|
370
|
+
break
|
|
371
|
+
|
|
372
|
+
except asyncio.CancelledError:
|
|
373
|
+
logger.debug("Connection health monitoring cancelled")
|
|
374
|
+
break
|
|
375
|
+
except Exception as e:
|
|
376
|
+
logger.error("Error in connection health monitoring: %s", e)
|
|
377
|
+
await asyncio.sleep(5) # Wait before retrying
|
|
378
|
+
|
|
379
|
+
async def _is_connection_healthy(self) -> bool:
|
|
380
|
+
"""Check if the connection is healthy"""
|
|
381
|
+
try:
|
|
382
|
+
# Try to acquire a connection from the pool
|
|
383
|
+
async with self.conn_pool.acquire() as connection:
|
|
384
|
+
if connection.is_closed:
|
|
385
|
+
return False
|
|
386
|
+
|
|
387
|
+
# Try to create a channel to test connection
|
|
388
|
+
channel = await connection.channel()
|
|
389
|
+
await channel.close()
|
|
390
|
+
return True
|
|
391
|
+
|
|
392
|
+
except Exception as e:
|
|
393
|
+
logger.debug("Connection health check failed: %s", e)
|
|
394
|
+
return False
|
|
395
|
+
|
|
396
|
+
async def _cleanup_pools(self) -> None:
|
|
397
|
+
"""Clean up existing connection pools"""
|
|
398
|
+
try:
|
|
399
|
+
if hasattr(self, "channel_pool"):
|
|
400
|
+
await self.channel_pool.close()
|
|
401
|
+
except Exception as e:
|
|
402
|
+
logger.warning("Error closing channel pool: %s", e)
|
|
403
|
+
|
|
404
|
+
try:
|
|
405
|
+
if hasattr(self, "conn_pool"):
|
|
406
|
+
await self.conn_pool.close()
|
|
407
|
+
except Exception as e:
|
|
408
|
+
logger.warning("Error closing connection pool: %s", e)
|
|
409
|
+
|
|
410
|
+
async def _wait_for_connection(self) -> None:
|
|
411
|
+
"""Wait for connection to be healthy"""
|
|
412
|
+
max_wait = 30.0 # Maximum wait time
|
|
413
|
+
wait_time = 0.0
|
|
414
|
+
|
|
415
|
+
while not self.connection_healthy and wait_time < max_wait:
|
|
416
|
+
if self.shutdown_event.is_set():
|
|
417
|
+
raise ConnectionError("Shutdown requested while waiting for connection")
|
|
418
|
+
|
|
419
|
+
await asyncio.sleep(0.5)
|
|
420
|
+
wait_time += 0.5
|
|
421
|
+
|
|
422
|
+
if not self.connection_healthy:
|
|
423
|
+
raise ConnectionError("Connection not healthy after maximum wait time")
|
|
424
|
+
|
|
425
|
+
|
|
426
|
+
def _get_message_broker_dispatcher_from_url(
|
|
427
|
+
url: str,
|
|
428
|
+
config: "BeatWorkerConfig | None" = None,
|
|
429
|
+
shutdown_event: asyncio.Event | None = None,
|
|
430
|
+
) -> _MessageBrokerDispatcher:
|
|
431
|
+
"""
|
|
432
|
+
Factory function to create a message broker instance from a URL.
|
|
433
|
+
Currently, only RabbitMQ is supported.
|
|
434
|
+
"""
|
|
435
|
+
if url.startswith("amqp://") or url.startswith("amqps://"):
|
|
436
|
+
return _RabbitMQBrokerDispatcher(
|
|
437
|
+
url=url, config=config, shutdown_event=shutdown_event
|
|
438
|
+
)
|
|
439
|
+
else:
|
|
440
|
+
raise ValueError(f"Unsupported message broker URL: {url}")
|
|
441
|
+
|
|
442
|
+
|
|
443
|
+
# endregion
|
|
444
|
+
|
|
445
|
+
|
|
446
|
+
@dataclass
|
|
447
|
+
class BeatWorkerConfig:
|
|
448
|
+
"""Configuration for beat worker connection resilience"""
|
|
449
|
+
|
|
450
|
+
connection_retry_config: RetryConfig = field(
|
|
451
|
+
default_factory=lambda: RetryConfig(
|
|
452
|
+
max_retries=10,
|
|
453
|
+
initial_delay=2.0,
|
|
454
|
+
max_delay=60.0,
|
|
455
|
+
backoff_factor=2.0,
|
|
456
|
+
jitter=True,
|
|
457
|
+
)
|
|
458
|
+
)
|
|
459
|
+
dispatch_retry_config: RetryConfig = field(
|
|
460
|
+
default_factory=lambda: RetryConfig(
|
|
461
|
+
max_retries=3,
|
|
462
|
+
initial_delay=1.0,
|
|
463
|
+
max_delay=10.0,
|
|
464
|
+
backoff_factor=2.0,
|
|
465
|
+
jitter=True,
|
|
466
|
+
)
|
|
467
|
+
)
|
|
468
|
+
connection_heartbeat_interval: float = 30.0
|
|
469
|
+
health_check_interval: float = 15.0
|
|
470
|
+
|
|
471
|
+
# Connection establishment timeouts
|
|
472
|
+
connection_wait_timeout: float = 300.0 # 5 minutes to wait for initial connection
|
|
473
|
+
|
|
474
|
+
# Pool configuration
|
|
475
|
+
max_pool_size: int = 10
|
|
476
|
+
pool_recycle_time: float = 3600.0 # 1 hour
|
|
477
|
+
|
|
478
|
+
|
|
479
|
+
class BeatWorker:
|
|
480
|
+
|
|
481
|
+
def __init__(
|
|
482
|
+
self,
|
|
483
|
+
app: Microservice,
|
|
484
|
+
interval: int,
|
|
485
|
+
broker_url: str,
|
|
486
|
+
backend_url: str,
|
|
487
|
+
scheduled_action_names: set[str] | None = None,
|
|
488
|
+
config: "BeatWorkerConfig | None" = None,
|
|
489
|
+
shutdown_event: asyncio.Event | None = None,
|
|
490
|
+
) -> None:
|
|
491
|
+
self.shutdown_event = shutdown_event or asyncio.Event()
|
|
492
|
+
|
|
493
|
+
self.app = app
|
|
494
|
+
self.config = config or BeatWorkerConfig()
|
|
495
|
+
|
|
496
|
+
self.broker: _MessageBrokerDispatcher = _get_message_broker_dispatcher_from_url(
|
|
497
|
+
broker_url, self.config, shutdown_event=self.shutdown_event
|
|
498
|
+
)
|
|
499
|
+
self.backend: MessageBrokerBackend = get_message_broker_backend_from_url(
|
|
500
|
+
backend_url
|
|
501
|
+
)
|
|
502
|
+
|
|
503
|
+
self.interval = interval
|
|
504
|
+
self.scheduler_names = scheduled_action_names
|
|
505
|
+
self.container = Container(self.app)
|
|
506
|
+
self.uow_provider = UnitOfWorkContextProvider(app, self.container)
|
|
507
|
+
|
|
508
|
+
self.lifecycle = AppLifecycle(app, self.container)
|
|
509
|
+
|
|
510
|
+
def run(self) -> None:
|
|
511
|
+
|
|
512
|
+
def on_shutdown(loop: asyncio.AbstractEventLoop) -> None:
|
|
513
|
+
logger.debug("Shutting down - signal received")
|
|
514
|
+
# Schedule the shutdown to run in the event loop
|
|
515
|
+
asyncio.create_task(self._graceful_shutdown())
|
|
516
|
+
|
|
517
|
+
with asyncio.Runner(loop_factory=uvloop.new_event_loop) as runner:
|
|
518
|
+
loop = runner.get_loop()
|
|
519
|
+
loop.add_signal_handler(signal.SIGINT, on_shutdown, loop)
|
|
520
|
+
# Add graceful shutdown handler for SIGTERM as well
|
|
521
|
+
loop.add_signal_handler(signal.SIGTERM, on_shutdown, loop)
|
|
522
|
+
try:
|
|
523
|
+
runner.run(self.start_scheduler())
|
|
524
|
+
except Exception as e:
|
|
525
|
+
logger.critical(
|
|
526
|
+
"Scheduler failed to start due to connection error: %s", e
|
|
527
|
+
)
|
|
528
|
+
# Exit with error code 1 to indicate startup failure
|
|
529
|
+
import sys
|
|
530
|
+
|
|
531
|
+
sys.exit(1)
|
|
532
|
+
|
|
533
|
+
async def start_scheduler(self) -> None:
|
|
534
|
+
"""
|
|
535
|
+
Declares the scheduled actions and starts the scheduler.
|
|
536
|
+
This is the main entry point for the scheduler.
|
|
537
|
+
"""
|
|
538
|
+
with providing_app_type("beat"):
|
|
539
|
+
async with self.lifecycle():
|
|
540
|
+
|
|
541
|
+
scheduled_actions = _extract_scheduled_actions(
|
|
542
|
+
self.app, self.container, self.scheduler_names
|
|
543
|
+
)
|
|
544
|
+
|
|
545
|
+
# Initialize and wait for connection to be established
|
|
546
|
+
logger.debug("Initializing broker connection...")
|
|
547
|
+
await self.broker.initialize(scheduled_actions)
|
|
548
|
+
|
|
549
|
+
# Wait for connection to be healthy before starting scheduler
|
|
550
|
+
logger.debug("Waiting for connection to be established...")
|
|
551
|
+
await self._wait_for_broker_connection()
|
|
552
|
+
|
|
553
|
+
logger.debug("Connection established, starting scheduler...")
|
|
554
|
+
await self.run_scheduled_actions(scheduled_actions)
|
|
555
|
+
|
|
556
|
+
async def run_scheduled_actions(
|
|
557
|
+
self, scheduled_actions: list[ScheduledActionData]
|
|
558
|
+
) -> None:
|
|
559
|
+
|
|
560
|
+
logger.debug("Starting scheduled actions processing loop")
|
|
561
|
+
|
|
562
|
+
# Ensure we have a healthy connection before starting the main loop
|
|
563
|
+
if (
|
|
564
|
+
hasattr(self.broker, "connection_healthy")
|
|
565
|
+
and not self.broker.connection_healthy
|
|
566
|
+
):
|
|
567
|
+
logger.error("Connection not healthy at start of processing loop. Exiting.")
|
|
568
|
+
return
|
|
569
|
+
|
|
570
|
+
while not self.shutdown_event.is_set():
|
|
571
|
+
# Check connection health before processing scheduled actions
|
|
572
|
+
if (
|
|
573
|
+
hasattr(self.broker, "connection_healthy")
|
|
574
|
+
and not self.broker.connection_healthy
|
|
575
|
+
):
|
|
576
|
+
logger.error("Broker connection is not healthy. Exiting.")
|
|
577
|
+
break
|
|
578
|
+
|
|
579
|
+
now = int(time.time())
|
|
580
|
+
for sched_act_data in scheduled_actions:
|
|
581
|
+
func = sched_act_data.callable
|
|
582
|
+
scheduled_action = sched_act_data.spec
|
|
583
|
+
if self.shutdown_event.is_set():
|
|
584
|
+
break
|
|
585
|
+
|
|
586
|
+
try:
|
|
587
|
+
async with self.backend.lock():
|
|
588
|
+
|
|
589
|
+
last_dispatch_time: int | None = (
|
|
590
|
+
await self.backend.get_last_dispatch_time(
|
|
591
|
+
ScheduledAction.get_function_id(func)
|
|
592
|
+
)
|
|
593
|
+
)
|
|
594
|
+
|
|
595
|
+
if last_dispatch_time is not None:
|
|
596
|
+
cron = croniter.croniter(
|
|
597
|
+
scheduled_action.cron, last_dispatch_time
|
|
598
|
+
)
|
|
599
|
+
next_run: datetime = cron.get_next(datetime).replace(
|
|
600
|
+
tzinfo=UTC
|
|
601
|
+
)
|
|
602
|
+
if next_run > datetime.now(UTC):
|
|
603
|
+
logger.debug(
|
|
604
|
+
"Skipping %s.%s until %s",
|
|
605
|
+
func.__module__,
|
|
606
|
+
func.__qualname__,
|
|
607
|
+
next_run,
|
|
608
|
+
)
|
|
609
|
+
continue
|
|
610
|
+
|
|
611
|
+
if not scheduled_action.allow_overlap:
|
|
612
|
+
if (
|
|
613
|
+
await self.backend.get_in_execution_count(
|
|
614
|
+
ScheduledAction.get_function_id(func)
|
|
615
|
+
)
|
|
616
|
+
> 0
|
|
617
|
+
):
|
|
618
|
+
continue
|
|
619
|
+
|
|
620
|
+
try:
|
|
621
|
+
start_time = time.perf_counter()
|
|
622
|
+
await self.broker.dispatch_scheduled_action(
|
|
623
|
+
ScheduledAction.get_function_id(func),
|
|
624
|
+
now,
|
|
625
|
+
)
|
|
626
|
+
elapsed_time = time.perf_counter() - start_time
|
|
627
|
+
|
|
628
|
+
await self.backend.set_last_dispatch_time(
|
|
629
|
+
ScheduledAction.get_function_id(func), now
|
|
630
|
+
)
|
|
631
|
+
|
|
632
|
+
logger.debug(
|
|
633
|
+
"Scheduled %s.%s at %s in %.4fs",
|
|
634
|
+
func.__module__,
|
|
635
|
+
func.__qualname__,
|
|
636
|
+
now,
|
|
637
|
+
elapsed_time,
|
|
638
|
+
)
|
|
639
|
+
except ChannelInvalidStateError as e:
|
|
640
|
+
logger.error(
|
|
641
|
+
"Channel invalid state error when dispatching %s.%s: %s",
|
|
642
|
+
func.__module__,
|
|
643
|
+
func.__qualname__,
|
|
644
|
+
e,
|
|
645
|
+
)
|
|
646
|
+
# Trigger shutdown if dispatch fails
|
|
647
|
+
self.shutdown_event.set()
|
|
648
|
+
raise
|
|
649
|
+
except Exception as e:
|
|
650
|
+
logger.error(
|
|
651
|
+
"Failed to dispatch scheduled action %s.%s: %s",
|
|
652
|
+
func.__module__,
|
|
653
|
+
func.__qualname__,
|
|
654
|
+
e,
|
|
655
|
+
)
|
|
656
|
+
# Continue with other scheduled actions even if one fails
|
|
657
|
+
continue
|
|
658
|
+
|
|
659
|
+
except Exception as e:
|
|
660
|
+
logger.error(
|
|
661
|
+
"Error processing scheduled action %s.%s: %s",
|
|
662
|
+
func.__module__,
|
|
663
|
+
func.__qualname__,
|
|
664
|
+
e,
|
|
665
|
+
)
|
|
666
|
+
# Continue with other scheduled actions even if one fails
|
|
667
|
+
continue
|
|
668
|
+
|
|
669
|
+
# Handle delayed messages
|
|
670
|
+
try:
|
|
671
|
+
delayed_messages = await self.backend.dequeue_next_delayed_messages(now)
|
|
672
|
+
for delayed_message_data in delayed_messages:
|
|
673
|
+
try:
|
|
674
|
+
start_time = time.perf_counter()
|
|
675
|
+
await self.broker.dispatch_delayed_message(delayed_message_data)
|
|
676
|
+
elapsed_time = time.perf_counter() - start_time
|
|
677
|
+
logger.debug(
|
|
678
|
+
"Dispatched delayed message for topic %s in %.4fs",
|
|
679
|
+
delayed_message_data.message_topic,
|
|
680
|
+
elapsed_time,
|
|
681
|
+
)
|
|
682
|
+
except Exception as e:
|
|
683
|
+
logger.error("Failed to dispatch delayed message: %s", e)
|
|
684
|
+
# Continue with other delayed messages even if one fails
|
|
685
|
+
continue
|
|
686
|
+
except Exception as e:
|
|
687
|
+
logger.error("Error processing delayed messages: %s", e)
|
|
688
|
+
|
|
689
|
+
with contextlib.suppress(asyncio.TimeoutError):
|
|
690
|
+
await asyncio.wait_for(self.shutdown_event.wait(), self.interval)
|
|
691
|
+
|
|
692
|
+
logger.debug("Scheduler stopped")
|
|
693
|
+
|
|
694
|
+
try:
|
|
695
|
+
await self.backend.dispose()
|
|
696
|
+
except Exception as e:
|
|
697
|
+
logger.error("Error disposing backend: %s", e)
|
|
698
|
+
|
|
699
|
+
try:
|
|
700
|
+
await self.broker.dispose()
|
|
701
|
+
except Exception as e:
|
|
702
|
+
logger.error("Error disposing broker: %s", e)
|
|
703
|
+
|
|
704
|
+
async def _graceful_shutdown(self) -> None:
|
|
705
|
+
"""Handles graceful shutdown process"""
|
|
706
|
+
logger.debug("Initiating graceful shutdown sequence")
|
|
707
|
+
self.shutdown_event.set()
|
|
708
|
+
logger.debug("Graceful shutdown completed")
|
|
709
|
+
|
|
710
|
+
async def _wait_for_broker_connection(self) -> None:
|
|
711
|
+
"""
|
|
712
|
+
Wait for the broker connection to be established and healthy.
|
|
713
|
+
This ensures the scheduler doesn't start until RabbitMQ is ready.
|
|
714
|
+
"""
|
|
715
|
+
max_wait_time = self.config.connection_wait_timeout
|
|
716
|
+
check_interval = 2.0 # Check every 2 seconds
|
|
717
|
+
elapsed_time = 0.0
|
|
718
|
+
|
|
719
|
+
logger.debug(
|
|
720
|
+
"Waiting for broker connection to be established (timeout: %ss)...",
|
|
721
|
+
max_wait_time,
|
|
722
|
+
)
|
|
723
|
+
|
|
724
|
+
while elapsed_time < max_wait_time:
|
|
725
|
+
if self.shutdown_event.is_set():
|
|
726
|
+
raise ConnectionError(
|
|
727
|
+
"Shutdown requested while waiting for broker connection"
|
|
728
|
+
)
|
|
729
|
+
|
|
730
|
+
# Check if broker connection is healthy
|
|
731
|
+
if (
|
|
732
|
+
hasattr(self.broker, "connection_healthy")
|
|
733
|
+
and self.broker.connection_healthy
|
|
734
|
+
):
|
|
735
|
+
logger.debug("Broker connection is healthy")
|
|
736
|
+
return
|
|
737
|
+
|
|
738
|
+
# If broker doesn't have health status, try a simple health check
|
|
739
|
+
if not hasattr(self.broker, "connection_healthy"):
|
|
740
|
+
try:
|
|
741
|
+
# For non-RabbitMQ brokers, assume connection is ready after initialization
|
|
742
|
+
logger.debug("Broker connection assumed to be ready")
|
|
743
|
+
return
|
|
744
|
+
except Exception as e:
|
|
745
|
+
logger.debug("Broker connection check failed: %s", e)
|
|
746
|
+
|
|
747
|
+
if elapsed_time % 10.0 == 0.0: # Log every 10 seconds
|
|
748
|
+
logger.warning(
|
|
749
|
+
"Still waiting for broker connection... (%.1fs elapsed)",
|
|
750
|
+
elapsed_time,
|
|
751
|
+
)
|
|
752
|
+
|
|
753
|
+
await asyncio.sleep(check_interval)
|
|
754
|
+
elapsed_time += check_interval
|
|
755
|
+
|
|
756
|
+
raise ConnectionError(
|
|
757
|
+
f"Broker connection not established after {max_wait_time} seconds"
|
|
758
|
+
)
|