jararaca 0.3.10__py3-none-any.whl → 0.3.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of jararaca might be problematic. Click here for more details.

Files changed (34) hide show
  1. jararaca/__init__.py +76 -5
  2. jararaca/cli.py +460 -116
  3. jararaca/core/uow.py +17 -12
  4. jararaca/messagebus/decorators.py +33 -30
  5. jararaca/messagebus/interceptors/aiopika_publisher_interceptor.py +30 -2
  6. jararaca/messagebus/interceptors/publisher_interceptor.py +7 -3
  7. jararaca/messagebus/publisher.py +14 -6
  8. jararaca/messagebus/worker.py +1102 -88
  9. jararaca/microservice.py +137 -34
  10. jararaca/observability/decorators.py +7 -3
  11. jararaca/observability/interceptor.py +4 -2
  12. jararaca/observability/providers/otel.py +14 -10
  13. jararaca/persistence/base.py +2 -1
  14. jararaca/persistence/interceptors/aiosqa_interceptor.py +167 -16
  15. jararaca/presentation/decorators.py +96 -10
  16. jararaca/presentation/server.py +31 -4
  17. jararaca/presentation/websocket/context.py +30 -4
  18. jararaca/presentation/websocket/types.py +2 -2
  19. jararaca/presentation/websocket/websocket_interceptor.py +28 -4
  20. jararaca/reflect/__init__.py +0 -0
  21. jararaca/reflect/controller_inspect.py +75 -0
  22. jararaca/{tools → reflect}/metadata.py +25 -5
  23. jararaca/scheduler/{scheduler_v2.py → beat_worker.py} +49 -53
  24. jararaca/scheduler/decorators.py +55 -20
  25. jararaca/tools/app_config/interceptor.py +4 -2
  26. jararaca/utils/rabbitmq_utils.py +259 -5
  27. jararaca/utils/retry.py +141 -0
  28. {jararaca-0.3.10.dist-info → jararaca-0.3.11.dist-info}/METADATA +2 -1
  29. {jararaca-0.3.10.dist-info → jararaca-0.3.11.dist-info}/RECORD +32 -31
  30. {jararaca-0.3.10.dist-info → jararaca-0.3.11.dist-info}/WHEEL +1 -1
  31. jararaca/messagebus/worker_v2.py +0 -617
  32. jararaca/scheduler/scheduler.py +0 -161
  33. {jararaca-0.3.10.dist-info → jararaca-0.3.11.dist-info}/LICENSE +0 -0
  34. {jararaca-0.3.10.dist-info → jararaca-0.3.11.dist-info}/entry_points.txt +0 -0
@@ -1,16 +1,33 @@
1
1
  import asyncio
2
2
  import inspect
3
3
  import logging
4
+ import random
4
5
  import signal
6
+ import time
7
+ import uuid
8
+ from abc import ABC
5
9
  from contextlib import asynccontextmanager, suppress
6
- from dataclasses import dataclass
7
- from typing import Any, AsyncContextManager, AsyncGenerator, Type, get_origin
10
+ from dataclasses import dataclass, field
11
+ from datetime import UTC, datetime
12
+ from typing import (
13
+ Any,
14
+ AsyncContextManager,
15
+ AsyncGenerator,
16
+ Awaitable,
17
+ Optional,
18
+ Type,
19
+ get_origin,
20
+ )
21
+ from urllib.parse import parse_qs, urlparse
8
22
 
9
23
  import aio_pika
10
24
  import aio_pika.abc
11
25
  import uvloop
26
+ from aio_pika.exceptions import AMQPError, ChannelClosed, ChannelNotFoundEntity
12
27
  from pydantic import BaseModel
13
28
 
29
+ from jararaca.broker_backend import MessageBrokerBackend
30
+ from jararaca.broker_backend.mapper import get_message_broker_backend_from_url
14
31
  from jararaca.core.uow import UnitOfWorkContextProvider
15
32
  from jararaca.di import Container
16
33
  from jararaca.lifecycle import AppLifecycle
@@ -20,13 +37,22 @@ from jararaca.messagebus.bus_message_controller import (
20
37
  )
21
38
  from jararaca.messagebus.decorators import (
22
39
  MESSAGE_HANDLER_DATA_SET,
40
+ SCHEDULED_ACTION_DATA_SET,
23
41
  MessageBusController,
24
42
  MessageHandler,
25
43
  MessageHandlerData,
44
+ ScheduleDispatchData,
26
45
  )
27
46
  from jararaca.messagebus.message import Message, MessageOf
28
- from jararaca.microservice import MessageBusAppContext, Microservice
47
+ from jararaca.microservice import (
48
+ AppTransactionContext,
49
+ MessageBusTransactionData,
50
+ Microservice,
51
+ SchedulerTransactionData,
52
+ )
53
+ from jararaca.scheduler.decorators import ScheduledActionData
29
54
  from jararaca.utils.rabbitmq_utils import RabbitmqUtils
55
+ from jararaca.utils.retry import RetryConfig, retry_with_backoff
30
56
 
31
57
  logger = logging.getLogger(__name__)
32
58
 
@@ -36,6 +62,22 @@ class AioPikaWorkerConfig:
36
62
  url: str
37
63
  exchange: str
38
64
  prefetch_count: int
65
+ connection_retry_config: RetryConfig = field(
66
+ default_factory=lambda: RetryConfig(
67
+ max_retries=15,
68
+ initial_delay=1.0,
69
+ max_delay=60.0,
70
+ backoff_factor=2.0,
71
+ )
72
+ )
73
+ consumer_retry_config: RetryConfig = field(
74
+ default_factory=lambda: RetryConfig(
75
+ max_retries=15,
76
+ initial_delay=0.5,
77
+ max_delay=40.0,
78
+ backoff_factor=2.0,
79
+ )
80
+ )
39
81
 
40
82
 
41
83
  class AioPikaMessage(MessageOf[Message]):
@@ -76,78 +118,732 @@ class MessageProcessingLocker:
76
118
  await asyncio.gather(*self.current_processing_messages_set)
77
119
 
78
120
 
79
- class AioPikaMicroserviceConsumer:
121
+ class MessageBusConsumer(ABC):
122
+
123
+ async def consume(self) -> None:
124
+ raise NotImplementedError("consume method not implemented")
125
+
126
+ def shutdown(self) -> None: ...
127
+
128
+ async def close(self) -> None:
129
+ """Close all resources related to the consumer"""
130
+
131
+
132
+ class AioPikaMicroserviceConsumer(MessageBusConsumer):
80
133
  def __init__(
81
134
  self,
135
+ broker_backend: MessageBrokerBackend,
82
136
  config: AioPikaWorkerConfig,
83
137
  message_handler_set: MESSAGE_HANDLER_DATA_SET,
138
+ scheduled_actions: SCHEDULED_ACTION_DATA_SET,
84
139
  uow_context_provider: UnitOfWorkContextProvider,
85
140
  ):
141
+
142
+ self.broker_backend = broker_backend
86
143
  self.config = config
87
144
  self.message_handler_set = message_handler_set
145
+ self.scheduled_actions = scheduled_actions
88
146
  self.incoming_map: dict[str, MessageHandlerData] = {}
89
147
  self.uow_context_provider = uow_context_provider
90
148
  self.shutdown_event = asyncio.Event()
91
149
  self.lock = asyncio.Lock()
92
150
  self.tasks: set[asyncio.Task[Any]] = set()
151
+ self.connection: aio_pika.abc.AbstractConnection | None = None
152
+ self.channels: dict[str, aio_pika.abc.AbstractChannel] = {}
153
+
154
+ async def _verify_infrastructure(self) -> bool:
155
+ """
156
+ Verify that the required RabbitMQ infrastructure (exchanges, queues) exists.
157
+ Returns True if all required infrastructure is in place.
158
+ """
159
+ try:
160
+ async with self.connect() as connection:
161
+ # Create a main channel just for checking infrastructure
162
+ async with connection.channel() as main_channel:
163
+ # Get existing exchange and queues to verify infrastructure is in place
164
+ await RabbitmqUtils.get_main_exchange(
165
+ channel=main_channel,
166
+ exchange_name=self.config.exchange,
167
+ )
168
+ await RabbitmqUtils.get_dl_exchange(channel=main_channel)
169
+ await RabbitmqUtils.get_dl_queue(channel=main_channel)
170
+ return True
171
+ except (ChannelNotFoundEntity, ChannelClosed, AMQPError) as e:
172
+ logger.critical(
173
+ f"Required exchange or queue infrastructure not found. "
174
+ f"Please use the declare command first to create the required infrastructure. Error: {e}"
175
+ )
176
+ return False
177
+
178
+ async def _setup_message_handler_consumer(
179
+ self, handler: MessageHandlerData
180
+ ) -> bool:
181
+ """
182
+ Set up a consumer for a message handler with retry mechanism.
183
+ Returns True if successful, False otherwise.
184
+ """
185
+ queue_name = f"{handler.message_type.MESSAGE_TOPIC}.{handler.instance_callable.__module__}.{handler.instance_callable.__qualname__}"
186
+ routing_key = f"{handler.message_type.MESSAGE_TOPIC}.#"
187
+
188
+ async def setup_consumer() -> None:
189
+ # Create a channel using the context manager
190
+ async with self.create_channel(queue_name) as channel:
191
+ queue = await RabbitmqUtils.get_queue(
192
+ channel=channel, queue_name=queue_name
193
+ )
194
+
195
+ # Configure consumer right away while in the context
196
+ await queue.consume(
197
+ callback=MessageHandlerCallback(
198
+ consumer=self,
199
+ queue_name=queue_name,
200
+ routing_key=routing_key,
201
+ message_handler=handler,
202
+ ),
203
+ no_ack=handler.spec.auto_ack,
204
+ )
205
+
206
+ logger.info(
207
+ f"Consuming message handler {queue_name} on dedicated channel"
208
+ )
209
+
210
+ try:
211
+ # Setup with retry
212
+ await retry_with_backoff(
213
+ setup_consumer,
214
+ retry_config=self.config.consumer_retry_config,
215
+ retry_exceptions=(ChannelNotFoundEntity, ChannelClosed, AMQPError),
216
+ )
217
+ return True
218
+ except Exception as e:
219
+ logger.error(
220
+ f"Failed to setup consumer for queue '{queue_name}' after retries: {e}"
221
+ )
222
+ return False
223
+
224
+ async def _setup_scheduled_action_consumer(
225
+ self, scheduled_action: ScheduledActionData
226
+ ) -> bool:
227
+ """
228
+ Set up a consumer for a scheduled action with retry mechanism.
229
+ Returns True if successful, False otherwise.
230
+ """
231
+ queue_name = f"{scheduled_action.callable.__module__}.{scheduled_action.callable.__qualname__}"
232
+ routing_key = queue_name
233
+
234
+ async def setup_consumer() -> None:
235
+ # Create a channel using the context manager
236
+ async with self.create_channel(queue_name) as channel:
237
+ queue = await RabbitmqUtils.get_queue(
238
+ channel=channel, queue_name=queue_name
239
+ )
240
+
241
+ # Configure consumer right away while in the context
242
+ await queue.consume(
243
+ callback=ScheduledMessageHandlerCallback(
244
+ consumer=self,
245
+ queue_name=queue_name,
246
+ routing_key=routing_key,
247
+ scheduled_action=scheduled_action,
248
+ ),
249
+ no_ack=True,
250
+ )
251
+
252
+ logger.info(f"Consuming scheduler {queue_name} on dedicated channel")
253
+
254
+ try:
255
+ # Setup with retry
256
+ await retry_with_backoff(
257
+ setup_consumer,
258
+ retry_config=self.config.consumer_retry_config,
259
+ retry_exceptions=(ChannelNotFoundEntity, ChannelClosed, AMQPError),
260
+ )
261
+ return True
262
+ except Exception as e:
263
+ logger.error(
264
+ f"Failed to setup consumer for scheduler queue '{queue_name}' after retries: {e}"
265
+ )
266
+ return False
267
+
268
+ async def consume(self) -> None:
269
+ """
270
+ Main consume method that sets up all message handlers and scheduled actions with retry mechanisms.
271
+ """
272
+ # Verify infrastructure with retry
273
+ infra_check_success = await retry_with_backoff(
274
+ self._verify_infrastructure,
275
+ retry_config=self.config.connection_retry_config,
276
+ retry_exceptions=(Exception,),
277
+ )
93
278
 
94
- async def consume(self, passive_declare: bool) -> None:
279
+ if not infra_check_success:
280
+ logger.critical("Failed to verify RabbitMQ infrastructure. Shutting down.")
281
+ self.shutdown_event.set()
282
+ return
95
283
 
96
- connection = await aio_pika.connect(self.config.url)
284
+ async def wait_for(
285
+ type: str, name: str, coroutine: Awaitable[bool]
286
+ ) -> tuple[str, str, bool]:
287
+ return type, name, await coroutine
97
288
 
98
- channel = await connection.channel()
289
+ tasks: set[asyncio.Task[tuple[str, str, bool]]] = set()
290
+
291
+ # Setup message handlers
292
+ for handler in self.message_handler_set:
293
+ queue_name = f"{handler.message_type.MESSAGE_TOPIC}.{handler.instance_callable.__module__}.{handler.instance_callable.__qualname__}"
294
+ self.incoming_map[queue_name] = handler
99
295
 
296
+ tasks.add(
297
+ task := asyncio.create_task(
298
+ wait_for(
299
+ "message_handler",
300
+ queue_name,
301
+ self._setup_message_handler_consumer(handler),
302
+ )
303
+ )
304
+ )
305
+ # task.add_done_callback(tasks.discard)
306
+ # success = await self._setup_message_handler_consumer(handler)
307
+ # if not success:
308
+ # logger.warning(
309
+ # f"Failed to set up consumer for {queue_name}, will not process messages from this queue"
310
+ # )
311
+
312
+ # Setup scheduled actions
313
+ for scheduled_action in self.scheduled_actions:
314
+
315
+ queue_name = f"{scheduled_action.callable.__module__}.{scheduled_action.callable.__qualname__}"
316
+ tasks.add(
317
+ task := asyncio.create_task(
318
+ wait_for(
319
+ "scheduled_action",
320
+ queue_name,
321
+ self._setup_scheduled_action_consumer(scheduled_action),
322
+ )
323
+ )
324
+ )
325
+ # task.add_done_callback(tasks.discard)
326
+
327
+ # success = await self._setup_scheduled_action_consumer(scheduled_action)
328
+ # if not success:
329
+ # queue_name = f"{scheduled_action.callable.__module__}.{scheduled_action.callable.__qualname__}"
330
+ # logger.warning(
331
+ # f"Failed to set up consumer for scheduled action {queue_name}, will not process scheduled tasks from this queue"
332
+ # )
333
+
334
+ async def handle_task_results() -> None:
335
+ for task in asyncio.as_completed(tasks):
336
+ type, name, success = await task
337
+ if success:
338
+ logger.info(f"Successfully set up {type} consumer for {name}")
339
+ else:
340
+ logger.warning(
341
+ f"Failed to set up {type} consumer for {name}, will not process messages from this queue"
342
+ )
343
+
344
+ handle_task_results_task = asyncio.create_task(handle_task_results())
345
+
346
+ # Wait for shutdown signal
347
+ await self.shutdown_event.wait()
348
+ logger.info("Shutdown event received, stopping consumers")
349
+ handle_task_results_task.cancel()
350
+ with suppress(asyncio.CancelledError):
351
+ await handle_task_results_task
352
+ for task in tasks:
353
+ if not task.done():
354
+ task.cancel()
355
+ with suppress(asyncio.CancelledError):
356
+ await task
357
+ logger.info("Worker shutting down")
358
+
359
+ # Wait for all tasks to complete
360
+ await self.wait_all_tasks_done()
361
+
362
+ # Close all channels and the connection
363
+ await self.close_channels_and_connection()
364
+
365
+ async def wait_all_tasks_done(self) -> None:
366
+ if not self.tasks:
367
+ return
368
+
369
+ logger.info(f"Waiting for {len(self.tasks)} in-flight tasks to complete")
370
+ async with self.lock:
371
+ # Use gather with return_exceptions=True to ensure all tasks are awaited
372
+ # even if some raise exceptions
373
+ results = await asyncio.gather(*self.tasks, return_exceptions=True)
374
+
375
+ # Log any exceptions that occurred
376
+ for result in results:
377
+ if isinstance(result, Exception):
378
+ logger.error(f"Task raised an exception during shutdown: {result}")
379
+
380
+ async def close_channels_and_connection(self) -> None:
381
+ """Close all channels and then the connection"""
382
+ # Close all channels
383
+ channel_close_tasks = []
384
+ for queue_name, channel in self.channels.items():
385
+ try:
386
+ if not channel.is_closed:
387
+ logger.info(f"Closing channel for queue {queue_name}")
388
+ channel_close_tasks.append(channel.close())
389
+ else:
390
+ logger.info(f"Channel for queue {queue_name} already closed")
391
+ except Exception as e:
392
+ logger.error(
393
+ f"Error preparing to close channel for queue {queue_name}: {e}"
394
+ )
395
+
396
+ # Wait for all channels to close (if any)
397
+ if channel_close_tasks:
398
+ try:
399
+ await asyncio.gather(*channel_close_tasks, return_exceptions=True)
400
+ except Exception as e:
401
+ logger.error(f"Error during channel closures: {e}")
402
+
403
+ # Clear channels dictionary
404
+ self.channels.clear()
405
+
406
+ # Close the connection
407
+ if self.connection:
408
+ try:
409
+ if not self.connection.is_closed:
410
+ logger.info("Closing RabbitMQ connection")
411
+ await self.connection.close()
412
+ else:
413
+ logger.info("RabbitMQ connection already closed")
414
+ except Exception as e:
415
+ logger.error(f"Error closing RabbitMQ connection: {e}")
416
+ self.connection = None
417
+
418
+ def shutdown(self) -> None:
419
+ """Signal for shutdown"""
420
+ logger.info("Initiating graceful shutdown")
421
+ self.shutdown_event.set()
422
+
423
+ async def close(self) -> None:
424
+ """Implement MessageBusConsumer.close for cleanup"""
425
+ self.shutdown()
426
+ await self.wait_all_tasks_done()
427
+ await self.close_channels_and_connection()
428
+
429
+ async def get_channel(self, queue_name: str) -> aio_pika.abc.AbstractChannel | None:
430
+ """
431
+ Get the channel for a specific queue, or None if not found.
432
+ This helps with error handling when a channel might have been closed.
433
+ """
434
+ if queue_name not in self.channels:
435
+ logger.warning(f"No channel found for queue {queue_name}")
436
+ return None
437
+
438
+ try:
439
+ channel = self.channels[queue_name]
440
+ if channel.is_closed:
441
+ logger.warning(f"Channel for queue {queue_name} is closed")
442
+ # Attempt to recreate the channel if needed
443
+ if self.connection and not self.connection.is_closed:
444
+ logger.info(f"Creating new channel for {queue_name}")
445
+ self.channels[queue_name] = await self.connection.channel()
446
+ await self.channels[queue_name].set_qos(
447
+ prefetch_count=self.config.prefetch_count
448
+ )
449
+ return self.channels[queue_name]
450
+ return None
451
+ return channel
452
+ except Exception as e:
453
+ logger.error(f"Error accessing channel for queue {queue_name}: {e}")
454
+ return None
455
+
456
+ async def _establish_channel(self, queue_name: str) -> aio_pika.abc.AbstractChannel:
457
+ """
458
+ Creates a new channel for the specified queue with proper QoS settings.
459
+ """
460
+ if self.connection is None or self.connection.is_closed:
461
+ logger.warning(
462
+ f"Cannot create channel for {queue_name}: connection is not available"
463
+ )
464
+ raise RuntimeError("Connection is not available")
465
+
466
+ logger.debug(f"Creating channel for queue {queue_name}")
467
+ channel = await self.connection.channel()
100
468
  await channel.set_qos(prefetch_count=self.config.prefetch_count)
469
+ logger.debug(f"Created channel for queue {queue_name}")
470
+ return channel
471
+
472
+ @asynccontextmanager
473
+ async def create_channel(
474
+ self, queue_name: str
475
+ ) -> AsyncGenerator[aio_pika.abc.AbstractChannel, None]:
476
+ """
477
+ Create and yield a channel for the specified queue with retry mechanism.
478
+ This context manager ensures the channel is properly managed.
479
+ """
480
+ try:
481
+ # Create a new channel with retry
482
+ channel = await retry_with_backoff(
483
+ fn=lambda: self._establish_channel(queue_name),
484
+ retry_config=self.config.consumer_retry_config,
485
+ retry_exceptions=(
486
+ aio_pika.exceptions.AMQPConnectionError,
487
+ aio_pika.exceptions.AMQPChannelError,
488
+ ConnectionError,
489
+ ),
490
+ )
491
+
492
+ # Save in the channels dict for tracking
493
+ self.channels[queue_name] = channel
494
+ logger.debug(f"Created new channel for queue {queue_name}")
495
+
496
+ try:
497
+ yield channel
498
+ finally:
499
+ # Don't close the channel here as it might be used later
500
+ # It will be closed during shutdown
501
+ pass
502
+ except aio_pika.exceptions.AMQPError as e:
503
+ logger.error(
504
+ f"Error creating channel for queue {queue_name} after retries: {e}"
505
+ )
506
+ raise
507
+
508
+ async def _establish_connection(self) -> aio_pika.abc.AbstractConnection:
509
+ """
510
+ Creates a new RabbitMQ connection with retry logic.
511
+ """
512
+ try:
513
+ logger.info("Establishing connection to RabbitMQ")
514
+ connection = await aio_pika.connect(self.config.url)
515
+ logger.info("Connected to RabbitMQ successfully")
516
+ return connection
517
+ except Exception as e:
518
+ logger.error(f"Failed to connect to RabbitMQ: {e}")
519
+ raise
101
520
 
102
- main_ex = await RabbitmqUtils.declare_main_exchange(
103
- channel=channel,
104
- exchange_name=self.config.exchange,
105
- passive=passive_declare,
521
+ @asynccontextmanager
522
+ async def connect(self) -> AsyncGenerator[aio_pika.abc.AbstractConnection, None]:
523
+ """
524
+ Create and manage the main connection to RabbitMQ with automatic retry.
525
+ """
526
+ if self.connection is not None and not self.connection.is_closed:
527
+ logger.debug("Connection already exists, reusing existing connection")
528
+ try:
529
+ yield self.connection
530
+ finally:
531
+ # The existing connection will be handled by close_channels_and_connection
532
+ pass
533
+ return
534
+
535
+ try:
536
+ # Create a new connection with retry
537
+ self.connection = await retry_with_backoff(
538
+ self._establish_connection,
539
+ retry_config=self.config.connection_retry_config,
540
+ retry_exceptions=(
541
+ aio_pika.exceptions.AMQPConnectionError,
542
+ ConnectionError,
543
+ OSError,
544
+ TimeoutError,
545
+ ),
546
+ )
547
+
548
+ try:
549
+ yield self.connection
550
+ finally:
551
+ # Don't close the connection here; it will be closed in close_channels_and_connection
552
+ pass
553
+ except Exception as e:
554
+ logger.error(
555
+ f"Failed to establish connection to RabbitMQ after retries: {e}"
556
+ )
557
+ if self.connection:
558
+ try:
559
+ await self.connection.close()
560
+ except Exception as close_error:
561
+ logger.error(
562
+ f"Error closing connection after connect failure: {close_error}"
563
+ )
564
+ self.connection = None
565
+ raise
566
+
567
+ @asynccontextmanager
568
+ async def get_channel_ctx(
569
+ self, queue_name: str
570
+ ) -> AsyncGenerator[aio_pika.abc.AbstractChannel, None]:
571
+ """
572
+ Get a channel for a specific queue as a context manager.
573
+ This is safer than using get_channel directly as it ensures proper error handling.
574
+ """
575
+ channel = await self.get_channel(queue_name)
576
+ if channel is None:
577
+ if self.connection and not self.connection.is_closed:
578
+ # Try to create a new channel
579
+ async with self.create_channel(queue_name) as new_channel:
580
+ yield new_channel
581
+ else:
582
+ raise RuntimeError(
583
+ f"Cannot get channel for queue {queue_name}: no connection available"
584
+ )
585
+ else:
586
+ try:
587
+ yield channel
588
+ finally:
589
+ # We don't close the channel here as it's managed by the consumer
590
+ pass
591
+
592
+
593
+ def create_message_bus(
594
+ broker_url: str,
595
+ broker_backend: MessageBrokerBackend,
596
+ scheduled_actions: SCHEDULED_ACTION_DATA_SET,
597
+ message_handler_set: MESSAGE_HANDLER_DATA_SET,
598
+ uow_context_provider: UnitOfWorkContextProvider,
599
+ ) -> MessageBusConsumer:
600
+
601
+ parsed_url = urlparse(broker_url)
602
+
603
+ if parsed_url.scheme == "amqp" or parsed_url.scheme == "amqps":
604
+ assert parsed_url.query, "Query string must be set for AMQP URLs"
605
+
606
+ query_params: dict[str, list[str]] = parse_qs(parsed_url.query)
607
+
608
+ assert "exchange" in query_params, "Exchange must be set in the query string"
609
+ assert (
610
+ len(query_params["exchange"]) == 1
611
+ ), "Exchange must be set in the query string"
612
+ assert (
613
+ "prefetch_count" in query_params
614
+ ), "Prefetch count must be set in the query string"
615
+ assert (
616
+ len(query_params["prefetch_count"]) == 1
617
+ ), "Prefetch count must be set in the query string"
618
+ assert query_params["prefetch_count"][
619
+ 0
620
+ ].isdigit(), "Prefetch count must be an integer in the query string"
621
+ assert query_params["exchange"][0], "Exchange must be set in the query string"
622
+ assert query_params["prefetch_count"][
623
+ 0
624
+ ], "Prefetch count must be set in the query string"
625
+
626
+ exchange = query_params["exchange"][0]
627
+ prefetch_count = int(query_params["prefetch_count"][0])
628
+
629
+ # Parse optional retry configuration parameters
630
+ connection_retry_config = RetryConfig()
631
+ consumer_retry_config = RetryConfig(
632
+ max_retries=30, initial_delay=5, max_delay=60.0, backoff_factor=3.0
106
633
  )
107
634
 
108
- dlx, dlq = await RabbitmqUtils.declare_dl_kit(channel=channel)
635
+ # Connection retry config parameters
636
+ if (
637
+ "connection_retry_max" in query_params
638
+ and query_params["connection_retry_max"][0].isdigit()
639
+ ):
640
+ connection_retry_config.max_retries = int(
641
+ query_params["connection_retry_max"][0]
642
+ )
109
643
 
110
- for handler in self.message_handler_set:
644
+ if "connection_retry_delay" in query_params:
645
+ try:
646
+ connection_retry_config.initial_delay = float(
647
+ query_params["connection_retry_delay"][0]
648
+ )
649
+ except ValueError:
650
+ pass
111
651
 
112
- queue_name = f"{handler.message_type.MESSAGE_TOPIC}.{handler.callable.__module__}.{handler.callable.__qualname__}"
113
- routing_key = f"{handler.message_type.MESSAGE_TOPIC}.#"
652
+ if "connection_retry_max_delay" in query_params:
653
+ try:
654
+ connection_retry_config.max_delay = float(
655
+ query_params["connection_retry_max_delay"][0]
656
+ )
657
+ except ValueError:
658
+ pass
114
659
 
115
- self.incoming_map[queue_name] = handler
660
+ if "connection_retry_backoff" in query_params:
661
+ try:
662
+ connection_retry_config.backoff_factor = float(
663
+ query_params["connection_retry_backoff"][0]
664
+ )
665
+ except ValueError:
666
+ pass
667
+
668
+ # Consumer retry config parameters
669
+ if (
670
+ "consumer_retry_max" in query_params
671
+ and query_params["consumer_retry_max"][0].isdigit()
672
+ ):
673
+ consumer_retry_config.max_retries = int(
674
+ query_params["consumer_retry_max"][0]
675
+ )
676
+
677
+ if "consumer_retry_delay" in query_params:
678
+ try:
679
+ consumer_retry_config.initial_delay = float(
680
+ query_params["consumer_retry_delay"][0]
681
+ )
682
+ except ValueError:
683
+ pass
684
+
685
+ if "consumer_retry_max_delay" in query_params:
686
+ try:
687
+ consumer_retry_config.max_delay = float(
688
+ query_params["consumer_retry_max_delay"][0]
689
+ )
690
+ except ValueError:
691
+ pass
692
+
693
+ if "consumer_retry_backoff" in query_params:
694
+ try:
695
+ consumer_retry_config.backoff_factor = float(
696
+ query_params["consumer_retry_backoff"][0]
697
+ )
698
+ except ValueError:
699
+ pass
700
+
701
+ config = AioPikaWorkerConfig(
702
+ url=broker_url,
703
+ exchange=exchange,
704
+ prefetch_count=prefetch_count,
705
+ connection_retry_config=connection_retry_config,
706
+ consumer_retry_config=consumer_retry_config,
707
+ )
708
+
709
+ return AioPikaMicroserviceConsumer(
710
+ config=config,
711
+ broker_backend=broker_backend,
712
+ message_handler_set=message_handler_set,
713
+ scheduled_actions=scheduled_actions,
714
+ uow_context_provider=uow_context_provider,
715
+ )
716
+
717
+ raise ValueError(
718
+ f"Unsupported broker URL scheme: {parsed_url.scheme}. Supported schemes are amqp and amqps"
719
+ )
116
720
 
117
- queue: aio_pika.abc.AbstractQueue = await channel.declare_queue(
118
- passive=passive_declare,
119
- name=queue_name,
120
- arguments={
121
- "x-dead-letter-exchange": dlx.name,
122
- "x-dead-letter-routing-key": dlq.name,
123
- },
721
+
722
+ class ScheduledMessageHandlerCallback:
723
+ def __init__(
724
+ self,
725
+ consumer: AioPikaMicroserviceConsumer,
726
+ queue_name: str,
727
+ routing_key: str,
728
+ scheduled_action: ScheduledActionData,
729
+ ):
730
+ self.consumer = consumer
731
+ self.queue_name = queue_name
732
+ self.routing_key = routing_key
733
+ self.scheduled_action = scheduled_action
734
+
735
+ async def __call__(
736
+ self, aio_pika_message: aio_pika.abc.AbstractIncomingMessage
737
+ ) -> None:
738
+
739
+ if self.consumer.shutdown_event.is_set():
740
+ logger.info(
741
+ f"Shutdown in progress. Requeuing scheduled message for {self.queue_name}"
124
742
  )
743
+ try:
744
+ # Use channel context for requeuing
745
+ async with self.consumer.get_channel_ctx(self.queue_name):
746
+ await aio_pika_message.reject(requeue=True)
747
+ except RuntimeError:
748
+ logger.warning(
749
+ f"Could not requeue scheduled message during shutdown - channel not available"
750
+ )
751
+ except Exception as e:
752
+ logger.error(
753
+ f"Failed to requeue scheduled message during shutdown: {e}"
754
+ )
755
+ return
125
756
 
126
- await queue.bind(exchange=main_ex, routing_key=routing_key)
757
+ async with self.consumer.lock:
758
+ task = asyncio.create_task(self.handle_message(aio_pika_message))
759
+ self.consumer.tasks.add(task)
760
+ task.add_done_callback(self.handle_message_consume_done)
127
761
 
128
- await queue.consume(
129
- callback=MessageHandlerCallback(
130
- consumer=self,
131
- queue_name=queue_name,
132
- routing_key=routing_key,
133
- message_handler=handler,
134
- ),
135
- no_ack=handler.spec.auto_ack,
762
+ def handle_message_consume_done(self, task: asyncio.Task[Any]) -> None:
763
+ self.consumer.tasks.discard(task)
764
+ if task.cancelled():
765
+ logger.warning(f"Scheduled task for {self.queue_name} was cancelled")
766
+ return
767
+
768
+ if (error := task.exception()) is not None:
769
+ logger.exception(
770
+ f"Error processing scheduled action {self.queue_name}", exc_info=error
136
771
  )
137
772
 
138
- print(f"Consuming {queue_name}")
773
+ async def handle_message(
774
+ self, aio_pika_message: aio_pika.abc.AbstractIncomingMessage
775
+ ) -> None:
139
776
 
140
- await self.shutdown_event.wait()
141
- logger.info("Worker shutting down")
777
+ if self.consumer.shutdown_event.is_set():
778
+ logger.info(f"Shutdown event set. Requeuing message for {self.queue_name}")
779
+ try:
780
+ # Use channel context for requeuing
781
+ async with self.consumer.get_channel_ctx(self.queue_name):
782
+ await aio_pika_message.reject(requeue=True)
783
+ return
784
+ except RuntimeError:
785
+ logger.warning(
786
+ f"Could not requeue message during shutdown - channel not available"
787
+ )
788
+ except Exception as e:
789
+ logger.error(f"Failed to requeue message during shutdown: {e}")
790
+ return
791
+
792
+ sig = inspect.signature(self.scheduled_action.callable)
793
+ if len(sig.parameters) == 1:
794
+
795
+ task = asyncio.create_task(
796
+ self.run_with_context(
797
+ self.scheduled_action,
798
+ (ScheduleDispatchData(int(aio_pika_message.body.decode("utf-8"))),),
799
+ {},
800
+ )
801
+ )
142
802
 
143
- await self.wait_all_tasks_done()
803
+ elif len(sig.parameters) == 0:
804
+ task = asyncio.create_task(
805
+ self.run_with_context(
806
+ self.scheduled_action,
807
+ (),
808
+ {},
809
+ )
810
+ )
811
+ else:
812
+ logger.warning(
813
+ "Scheduled action '%s' must have exactly one parameter of type ScheduleDispatchData or no parameters"
814
+ % self.queue_name
815
+ )
816
+ return
144
817
 
145
- await channel.close()
146
- await connection.close()
818
+ self.consumer.tasks.add(task)
819
+ task.add_done_callback(self.handle_message_consume_done)
147
820
 
148
- async def wait_all_tasks_done(self) -> None:
149
- async with self.lock:
150
- await asyncio.gather(*self.tasks)
821
+ try:
822
+ await task
823
+ except Exception as e:
824
+
825
+ logger.exception(
826
+ f"Error processing scheduled action {self.queue_name}: {e}"
827
+ )
828
+
829
+ async def run_with_context(
830
+ self,
831
+ scheduled_action: ScheduledActionData,
832
+ args: tuple[Any, ...],
833
+ kwargs: dict[str, Any],
834
+ ) -> None:
835
+ async with self.consumer.uow_context_provider(
836
+ AppTransactionContext(
837
+ controller_member_reflect=scheduled_action.controller_member,
838
+ transaction_data=SchedulerTransactionData(
839
+ scheduled_to=datetime.now(UTC),
840
+ cron_expression=scheduled_action.spec.cron,
841
+ triggered_at=datetime.now(UTC),
842
+ ),
843
+ )
844
+ ):
845
+
846
+ await scheduled_action.callable(*args, **kwargs)
151
847
 
152
848
 
153
849
  class MessageHandlerCallback:
@@ -163,11 +859,25 @@ class MessageHandlerCallback:
163
859
  self.queue_name = queue_name
164
860
  self.routing_key = routing_key
165
861
  self.message_handler = message_handler
862
+ self.retry_state: dict[str, dict[str, Any]] = {}
166
863
 
167
864
  async def message_consumer(
168
865
  self, aio_pika_message: aio_pika.abc.AbstractIncomingMessage
169
866
  ) -> None:
170
867
  if self.consumer.shutdown_event.is_set():
868
+ logger.info(
869
+ f"Shutdown in progress. Requeuing message for {self.queue_name}"
870
+ )
871
+ try:
872
+ # Use channel context for requeuing
873
+ async with self.consumer.get_channel_ctx(self.queue_name):
874
+ await aio_pika_message.reject(requeue=True)
875
+ except RuntimeError:
876
+ logger.warning(
877
+ f"Could not requeue message during shutdown - channel not available"
878
+ )
879
+ except Exception as e:
880
+ logger.error(f"Failed to requeue message during shutdown: {e}")
171
881
  return
172
882
 
173
883
  async with self.consumer.lock:
@@ -178,10 +888,13 @@ class MessageHandlerCallback:
178
888
  def handle_message_consume_done(self, task: asyncio.Task[Any]) -> None:
179
889
  self.consumer.tasks.discard(task)
180
890
  if task.cancelled():
891
+ logger.warning(f"Task for queue {self.queue_name} was cancelled")
181
892
  return
182
893
 
183
894
  if (error := task.exception()) is not None:
184
- logger.exception("Error processing message", exc_info=error)
895
+ logger.exception(
896
+ f"Error processing message for queue {self.queue_name}", exc_info=error
897
+ )
185
898
 
186
899
  async def __call__(
187
900
  self, aio_pika_message: aio_pika.abc.AbstractIncomingMessage
@@ -192,13 +905,177 @@ class MessageHandlerCallback:
192
905
  self,
193
906
  aio_pika_message: aio_pika.abc.AbstractIncomingMessage,
194
907
  requeue: bool = False,
908
+ retry_count: int = 0,
909
+ exception: Optional[BaseException] = None,
195
910
  ) -> None:
196
- if self.message_handler.spec.auto_ack is False:
197
- await aio_pika_message.reject(requeue=requeue)
198
- elif requeue:
199
- logger.warning(
200
- f"Message {aio_pika_message.message_id} ({self.queue_name}) cannot be requeued because auto_ack is enabled"
911
+ """
912
+ Handle rejecting a message, with support for retry with exponential backoff.
913
+
914
+ Args:
915
+ aio_pika_message: The message to reject
916
+ requeue: Whether to requeue the message directly (True) or handle with retry logic (False)
917
+ retry_count: The current retry count for this message
918
+ exception: The exception that caused the rejection, if any
919
+ """
920
+ message_id = aio_pika_message.message_id or str(uuid.uuid4())
921
+
922
+ # If auto_ack is enabled, we cannot retry the message through RabbitMQ reject mechanism
923
+ if self.message_handler.spec.auto_ack:
924
+ if requeue:
925
+ logger.warning(
926
+ f"Message {message_id} ({self.queue_name}) cannot be requeued because auto_ack is enabled"
927
+ )
928
+ return
929
+
930
+ try:
931
+ # Check if we should retry with backoff
932
+ if (
933
+ not requeue
934
+ and self.message_handler.spec.requeue_on_exception
935
+ and exception is not None
936
+ ):
937
+ # Get retry config from consumer
938
+ retry_config = self.consumer.config.consumer_retry_config
939
+
940
+ # Check if we reached max retries
941
+ if retry_count >= retry_config.max_retries:
942
+ logger.warning(
943
+ f"Message {message_id} ({self.queue_name}) failed after {retry_count} retries, "
944
+ f"dead-lettering: {str(exception)}"
945
+ )
946
+ # Dead-letter the message after max retries
947
+ async with self.consumer.get_channel_ctx(self.queue_name):
948
+ await aio_pika_message.reject(requeue=False)
949
+ return
950
+
951
+ # Calculate delay for this retry attempt
952
+ delay = retry_config.initial_delay * (
953
+ retry_config.backoff_factor**retry_count
954
+ )
955
+ if retry_config.jitter:
956
+ jitter_amount = delay * 0.25
957
+ delay = delay + random.uniform(-jitter_amount, jitter_amount)
958
+ delay = max(
959
+ delay, 0.1
960
+ ) # Ensure delay doesn't go negative due to jitter
961
+
962
+ delay = min(delay, retry_config.max_delay)
963
+
964
+ logger.info(
965
+ f"Message {message_id} ({self.queue_name}) failed with {str(exception)}, "
966
+ f"retry {retry_count+1}/{retry_config.max_retries} scheduled in {delay:.2f}s"
967
+ )
968
+
969
+ # Store retry state for this message
970
+ self.retry_state[message_id] = {
971
+ "retry_count": retry_count + 1,
972
+ "last_exception": exception,
973
+ "next_retry": time.time() + delay,
974
+ }
975
+
976
+ # Schedule retry after delay
977
+ asyncio.create_task(
978
+ self._delayed_retry(
979
+ aio_pika_message, delay, retry_count + 1, exception
980
+ )
981
+ )
982
+
983
+ # Acknowledge the current message since we'll handle retry ourselves
984
+ async with self.consumer.get_channel_ctx(self.queue_name):
985
+ await aio_pika_message.ack()
986
+ return
987
+
988
+ # Standard reject without retry or with immediate requeue
989
+ async with self.consumer.get_channel_ctx(self.queue_name):
990
+ await aio_pika_message.reject(requeue=requeue)
991
+ if requeue:
992
+ logger.info(
993
+ f"Message {message_id} ({self.queue_name}) requeued for immediate retry"
994
+ )
995
+ else:
996
+ logger.info(
997
+ f"Message {message_id} ({self.queue_name}) rejected without requeue"
998
+ )
999
+
1000
+ except RuntimeError as e:
1001
+ logger.error(
1002
+ f"Error rejecting message {message_id} ({self.queue_name}): {e}"
1003
+ )
1004
+ except Exception as e:
1005
+ logger.exception(
1006
+ f"Unexpected error rejecting message {message_id} ({self.queue_name}): {e}"
1007
+ )
1008
+
1009
+ async def _delayed_retry(
1010
+ self,
1011
+ aio_pika_message: aio_pika.abc.AbstractIncomingMessage,
1012
+ delay: float,
1013
+ retry_count: int,
1014
+ exception: Optional[BaseException],
1015
+ ) -> None:
1016
+ """
1017
+ Handle delayed retry of a message after exponential backoff delay.
1018
+
1019
+ Args:
1020
+ aio_pika_message: The original message
1021
+ delay: Delay in seconds before retry
1022
+ retry_count: The current retry count (after increment)
1023
+ exception: The exception that caused the failure
1024
+ """
1025
+ message_id = aio_pika_message.message_id or str(uuid.uuid4())
1026
+
1027
+ try:
1028
+ # Wait for the backoff delay
1029
+ await asyncio.sleep(delay)
1030
+
1031
+ # Get message body and properties for republishing
1032
+ message_body = aio_pika_message.body
1033
+ headers = (
1034
+ aio_pika_message.headers.copy() if aio_pika_message.headers else {}
1035
+ )
1036
+
1037
+ # Add retry information to headers
1038
+ headers["x-retry-count"] = retry_count
1039
+ if exception:
1040
+ headers["x-last-error"] = str(exception)
1041
+
1042
+ # Clean up retry state
1043
+ if message_id in self.retry_state:
1044
+ del self.retry_state[message_id]
1045
+
1046
+ # Republish the message to the same queue
1047
+ async with self.consumer.get_channel_ctx(self.queue_name) as channel:
1048
+ exchange = await RabbitmqUtils.get_main_exchange(
1049
+ channel=channel,
1050
+ exchange_name=self.consumer.config.exchange,
1051
+ )
1052
+
1053
+ await exchange.publish(
1054
+ aio_pika.Message(
1055
+ body=message_body,
1056
+ headers=headers,
1057
+ message_id=message_id,
1058
+ content_type=aio_pika_message.content_type,
1059
+ content_encoding=aio_pika_message.content_encoding,
1060
+ delivery_mode=aio_pika_message.delivery_mode,
1061
+ ),
1062
+ routing_key=self.routing_key,
1063
+ )
1064
+
1065
+ logger.info(
1066
+ f"Message {message_id} ({self.queue_name}) republished for retry {retry_count}"
1067
+ )
1068
+
1069
+ except Exception as e:
1070
+ logger.exception(
1071
+ f"Failed to execute delayed retry for message {message_id} ({self.queue_name}): {e}"
201
1072
  )
1073
+ # If we fail to republish, try to dead-letter the original message
1074
+ try:
1075
+ if message_id in self.retry_state:
1076
+ del self.retry_state[message_id]
1077
+ except Exception:
1078
+ pass
202
1079
 
203
1080
  async def handle_message(
204
1081
  self, aio_pika_message: aio_pika.abc.AbstractIncomingMessage
@@ -211,15 +1088,9 @@ class MessageHandlerCallback:
211
1088
  await self.handle_reject_message(aio_pika_message)
212
1089
  return
213
1090
 
214
- handler_data = self.consumer.incoming_map.get(routing_key)
1091
+ handler_data = self.message_handler
215
1092
 
216
- if handler_data is None:
217
- logger.warning("No handler found for topic '%s'" % routing_key)
218
- await self.handle_reject_message(aio_pika_message)
219
-
220
- return
221
-
222
- handler = handler_data.callable
1093
+ handler = handler_data.instance_callable
223
1094
 
224
1095
  sig = inspect.signature(handler)
225
1096
 
@@ -263,9 +1134,12 @@ class MessageHandlerCallback:
263
1134
  assert incoming_message_spec is not None
264
1135
 
265
1136
  async with self.consumer.uow_context_provider(
266
- MessageBusAppContext(
267
- message=builded_message,
268
- topic=routing_key,
1137
+ AppTransactionContext(
1138
+ controller_member_reflect=handler_data.controller_member,
1139
+ transaction_data=MessageBusTransactionData(
1140
+ message=builded_message,
1141
+ topic=routing_key,
1142
+ ),
269
1143
  )
270
1144
  ):
271
1145
  ctx: AsyncContextManager[Any]
@@ -281,29 +1155,61 @@ class MessageHandlerCallback:
281
1155
  await handler(builded_message)
282
1156
  if not incoming_message_spec.auto_ack:
283
1157
  with suppress(aio_pika.MessageProcessError):
284
- await aio_pika_message.ack()
1158
+ # Use channel context for acknowledgement
1159
+ async with self.consumer.get_channel_ctx(self.queue_name):
1160
+ await aio_pika_message.ack()
285
1161
  except BaseException as base_exc:
1162
+ # Get message id for logging
1163
+ message_id = aio_pika_message.message_id or str(uuid.uuid4())
1164
+
1165
+ # Extract retry count from headers if available
1166
+ headers = aio_pika_message.headers or {}
1167
+ retry_count = int(str(headers.get("x-retry-count", 0)))
1168
+
1169
+ # Process exception handler if configured
286
1170
  if incoming_message_spec.exception_handler is not None:
287
1171
  try:
288
1172
  incoming_message_spec.exception_handler(base_exc)
289
1173
  except Exception as nested_exc:
290
1174
  logger.exception(
291
- f"Error processing exception handler: {base_exc} | {nested_exc}"
1175
+ f"Error processing exception handler for message {message_id}: {base_exc} | {nested_exc}"
292
1176
  )
293
1177
  else:
294
1178
  logger.exception(
295
- f"Error processing message on topic {routing_key}"
1179
+ f"Error processing message {message_id} on topic {routing_key}: {str(base_exc)}"
296
1180
  )
1181
+
1182
+ # Handle rejection with retry logic
297
1183
  if incoming_message_spec.requeue_on_exception:
298
- await self.handle_reject_message(aio_pika_message, requeue=True)
1184
+ # Use our retry with backoff mechanism
1185
+ await self.handle_reject_message(
1186
+ aio_pika_message,
1187
+ requeue=False, # Don't requeue directly, use our backoff mechanism
1188
+ retry_count=retry_count,
1189
+ exception=base_exc,
1190
+ )
299
1191
  else:
1192
+ # Message shouldn't be retried, reject it
300
1193
  await self.handle_reject_message(
301
- aio_pika_message, requeue=False
1194
+ aio_pika_message, requeue=False, exception=base_exc
302
1195
  )
303
1196
  else:
304
- logger.info(
305
- f"Message {aio_pika_message.message_id}#{self.queue_name} processed successfully"
306
- )
1197
+ # Message processed successfully, log and clean up any retry state
1198
+ message_id = aio_pika_message.message_id or str(uuid.uuid4())
1199
+ if message_id in self.retry_state:
1200
+ del self.retry_state[message_id]
1201
+
1202
+ # Log success with retry information if applicable
1203
+ headers = aio_pika_message.headers or {}
1204
+ if "x-retry-count" in headers:
1205
+ retry_count = int(str(headers.get("x-retry-count", 0)))
1206
+ logger.info(
1207
+ f"Message {message_id}#{self.queue_name} processed successfully after {retry_count} retries"
1208
+ )
1209
+ else:
1210
+ logger.info(
1211
+ f"Message {message_id}#{self.queue_name} processed successfully"
1212
+ )
307
1213
 
308
1214
 
309
1215
  @asynccontextmanager
@@ -312,9 +1218,18 @@ async def none_context() -> AsyncGenerator[None, None]:
312
1218
 
313
1219
 
314
1220
  class MessageBusWorker:
315
- def __init__(self, app: Microservice, config: AioPikaWorkerConfig) -> None:
1221
+ def __init__(
1222
+ self,
1223
+ app: Microservice,
1224
+ broker_url: str,
1225
+ backend_url: str,
1226
+ handler_names: set[str] | None = None,
1227
+ ) -> None:
316
1228
  self.app = app
317
- self.config = config
1229
+ self.backend_url = backend_url
1230
+ self.broker_url = broker_url
1231
+ self.handler_names = handler_names
1232
+
318
1233
  self.container = Container(app)
319
1234
  self.lifecycle = AppLifecycle(app, self.container)
320
1235
 
@@ -322,33 +1237,49 @@ class MessageBusWorker:
322
1237
  app=app, container=self.container
323
1238
  )
324
1239
 
325
- self._consumer: AioPikaMicroserviceConsumer | None = None
1240
+ self._consumer: MessageBusConsumer | None = None
326
1241
 
327
1242
  @property
328
- def consumer(self) -> AioPikaMicroserviceConsumer:
1243
+ def consumer(self) -> MessageBusConsumer:
329
1244
  if self._consumer is None:
330
1245
  raise RuntimeError("Consumer not started")
331
1246
  return self._consumer
332
1247
 
333
- async def start_async(self, passive_declare: bool) -> None:
1248
+ async def start_async(self) -> None:
334
1249
  all_message_handlers_set: MESSAGE_HANDLER_DATA_SET = set()
1250
+ all_scheduled_actions_set: SCHEDULED_ACTION_DATA_SET = set()
335
1251
  async with self.lifecycle():
336
- for instance_type in self.app.controllers:
337
- controller = MessageBusController.get_messagebus(instance_type)
1252
+ for instance_class in self.app.controllers:
1253
+ controller = MessageBusController.get_messagebus(instance_class)
338
1254
 
339
1255
  if controller is None:
340
1256
  continue
341
1257
 
342
- instance: Any = self.container.get_by_type(instance_type)
1258
+ instance: Any = self.container.get_by_type(instance_class)
343
1259
 
344
1260
  factory = controller.get_messagebus_factory()
345
- handlers, _ = factory(instance)
1261
+ handlers, schedulers = factory(instance)
346
1262
 
347
1263
  message_handler_data_map: dict[str, MessageHandlerData] = {}
348
-
1264
+ all_scheduled_actions_set.update(schedulers)
349
1265
  for handler_data in handlers:
350
1266
  message_type = handler_data.spec.message_type
351
1267
  topic = message_type.MESSAGE_TOPIC
1268
+
1269
+ # Filter handlers by name if specified
1270
+ if (
1271
+ self.handler_names is not None
1272
+ and handler_data.spec.name is not None
1273
+ ):
1274
+ if handler_data.spec.name not in self.handler_names:
1275
+ continue
1276
+ elif (
1277
+ self.handler_names is not None
1278
+ and handler_data.spec.name is None
1279
+ ):
1280
+ # Skip handlers without names when filtering is requested
1281
+ continue
1282
+
352
1283
  if (
353
1284
  topic in message_handler_data_map
354
1285
  and message_type.MESSAGE_TYPE == "task"
@@ -361,30 +1292,79 @@ class MessageBusWorker:
361
1292
  message_handler_data_map[topic] = handler_data
362
1293
  all_message_handlers_set.add(handler_data)
363
1294
 
364
- consumer = self._consumer = AioPikaMicroserviceConsumer(
365
- config=self.config,
1295
+ broker_backend = get_message_broker_backend_from_url(url=self.backend_url)
1296
+
1297
+ consumer = self._consumer = create_message_bus(
1298
+ broker_url=self.broker_url,
1299
+ broker_backend=broker_backend,
1300
+ scheduled_actions=all_scheduled_actions_set,
366
1301
  message_handler_set=all_message_handlers_set,
367
1302
  uow_context_provider=self.uow_context_provider,
368
1303
  )
369
1304
 
370
- await consumer.consume(passive_declare=passive_declare)
1305
+ await consumer.consume()
371
1306
 
372
- def start_sync(self, passive_declare: bool) -> None:
1307
+ def start_sync(self) -> None:
373
1308
 
374
1309
  def on_shutdown(loop: asyncio.AbstractEventLoop) -> None:
375
- logger.info("Shutting down")
376
- self.consumer.shutdown_event.set()
1310
+ logger.info("Shutting down - signal received")
1311
+ # Schedule the shutdown to run in the event loop
1312
+ asyncio.create_task(self._graceful_shutdown())
1313
+ # wait until the shutdown is complete
377
1314
 
378
1315
  with asyncio.Runner(loop_factory=uvloop.new_event_loop) as runner:
379
- runner.get_loop().add_signal_handler(
380
- signal.SIGINT, on_shutdown, runner.get_loop()
381
- )
382
- runner.run(self.start_async(passive_declare=passive_declare))
1316
+ loop = runner.get_loop()
1317
+ loop.add_signal_handler(signal.SIGINT, on_shutdown, loop)
1318
+ # Add graceful shutdown handler for SIGTERM as well
1319
+ loop.add_signal_handler(signal.SIGTERM, on_shutdown, loop)
1320
+ runner.run(self.start_async())
1321
+
1322
+ async def _graceful_shutdown(self) -> None:
1323
+ """Handles graceful shutdown process"""
1324
+ logger.info("Initiating graceful shutdown sequence")
1325
+ # Use the comprehensive close method that handles shutdown, task waiting and connection cleanup
1326
+
1327
+ self.consumer.shutdown()
1328
+ logger.info("Graceful shutdown completed")
383
1329
 
384
1330
 
385
1331
  class AioPikaMessageBusController(BusMessageController):
386
1332
  def __init__(self, aio_pika_message: aio_pika.abc.AbstractIncomingMessage):
387
1333
  self.aio_pika_message = aio_pika_message
1334
+ # We access consumer callback through context if available
1335
+ self._callback: Optional[MessageHandlerCallback] = None
1336
+
1337
+ def _get_callback(self) -> MessageHandlerCallback:
1338
+ """
1339
+ Find the callback associated with this message.
1340
+ This allows us to access the retry mechanisms.
1341
+ """
1342
+ if self._callback is None:
1343
+ # Get the context from current frame's locals
1344
+ frame = inspect.currentframe()
1345
+ if frame is not None:
1346
+ try:
1347
+ caller_frame = frame.f_back
1348
+ if caller_frame is not None:
1349
+ # Check for context with handler callback
1350
+ callback_ref = None
1351
+ # Look for handler_message call context
1352
+ while caller_frame is not None:
1353
+ if "self" in caller_frame.f_locals:
1354
+ self_obj = caller_frame.f_locals["self"]
1355
+ if isinstance(self_obj, MessageHandlerCallback):
1356
+ callback_ref = self_obj
1357
+ break
1358
+ caller_frame = caller_frame.f_back
1359
+ # Save callback reference if we found it
1360
+ self._callback = callback_ref
1361
+ finally:
1362
+ del frame # Avoid reference cycles
1363
+
1364
+ if self._callback is None:
1365
+ raise RuntimeError("Could not find callback context for message retry")
1366
+
1367
+ return self._callback
388
1368
 
389
1369
  async def ack(self) -> None:
390
1370
  await self.aio_pika_message.ack()
@@ -396,7 +1376,41 @@ class AioPikaMessageBusController(BusMessageController):
396
1376
  await self.aio_pika_message.reject()
397
1377
 
398
1378
  async def retry(self) -> None:
399
- await self.aio_pika_message.reject(requeue=True)
1379
+ """
1380
+ Retry the message immediately by rejecting with requeue flag.
1381
+ This doesn't use the exponential backoff mechanism.
1382
+ """
1383
+ callback = self._get_callback()
1384
+ await callback.handle_reject_message(self.aio_pika_message, requeue=True)
400
1385
 
401
1386
  async def retry_later(self, delay: int) -> None:
402
- raise NotImplementedError("Not implemented")
1387
+ """
1388
+ Retry the message after a specified delay using the exponential backoff mechanism.
1389
+
1390
+ Args:
1391
+ delay: Minimum delay in seconds before retrying
1392
+ """
1393
+ try:
1394
+ callback = self._get_callback()
1395
+
1396
+ # Get current retry count from message headers
1397
+ headers = self.aio_pika_message.headers or {}
1398
+ retry_count = int(str(headers.get("x-retry-count", 0)))
1399
+
1400
+ # Handle retry with explicit delay
1401
+ asyncio.create_task(
1402
+ callback._delayed_retry(
1403
+ self.aio_pika_message,
1404
+ float(delay),
1405
+ retry_count + 1,
1406
+ None, # No specific exception
1407
+ )
1408
+ )
1409
+
1410
+ # Acknowledge the current message since we'll republish
1411
+ await self.aio_pika_message.ack()
1412
+
1413
+ except Exception as e:
1414
+ logger.exception(f"Failed to schedule retry_later: {e}")
1415
+ # Fall back to immediate retry
1416
+ await self.aio_pika_message.reject(requeue=True)