jararaca 0.3.11a15__py3-none-any.whl → 0.3.11a16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,12 +1,23 @@
1
1
  import asyncio
2
2
  import inspect
3
3
  import logging
4
+ import random
4
5
  import signal
6
+ import time
7
+ import uuid
5
8
  from abc import ABC
6
9
  from contextlib import asynccontextmanager, suppress
7
- from dataclasses import dataclass
10
+ from dataclasses import dataclass, field
8
11
  from datetime import UTC, datetime
9
- from typing import Any, AsyncContextManager, AsyncGenerator, Type, get_origin
12
+ from typing import (
13
+ Any,
14
+ AsyncContextManager,
15
+ AsyncGenerator,
16
+ Awaitable,
17
+ Optional,
18
+ Type,
19
+ get_origin,
20
+ )
10
21
  from urllib.parse import parse_qs, urlparse
11
22
 
12
23
  import aio_pika
@@ -41,6 +52,7 @@ from jararaca.microservice import (
41
52
  )
42
53
  from jararaca.scheduler.decorators import ScheduledActionData
43
54
  from jararaca.utils.rabbitmq_utils import RabbitmqUtils
55
+ from jararaca.utils.retry import RetryConfig, retry_with_backoff
44
56
 
45
57
  logger = logging.getLogger(__name__)
46
58
 
@@ -50,6 +62,22 @@ class AioPikaWorkerConfig:
50
62
  url: str
51
63
  exchange: str
52
64
  prefetch_count: int
65
+ connection_retry_config: RetryConfig = field(
66
+ default_factory=lambda: RetryConfig(
67
+ max_retries=15,
68
+ initial_delay=1.0,
69
+ max_delay=60.0,
70
+ backoff_factor=2.0,
71
+ )
72
+ )
73
+ consumer_retry_config: RetryConfig = field(
74
+ default_factory=lambda: RetryConfig(
75
+ max_retries=15,
76
+ initial_delay=0.5,
77
+ max_delay=40.0,
78
+ backoff_factor=2.0,
79
+ )
80
+ )
53
81
 
54
82
 
55
83
  class AioPikaMessage(MessageOf[Message]):
@@ -97,6 +125,9 @@ class MessageBusConsumer(ABC):
97
125
 
98
126
  def shutdown(self) -> None: ...
99
127
 
128
+ async def close(self) -> None:
129
+ """Close all resources related to the consumer"""
130
+
100
131
 
101
132
  class AioPikaMicroserviceConsumer(MessageBusConsumer):
102
133
  def __init__(
@@ -117,106 +148,447 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
117
148
  self.shutdown_event = asyncio.Event()
118
149
  self.lock = asyncio.Lock()
119
150
  self.tasks: set[asyncio.Task[Any]] = set()
151
+ self.connection: aio_pika.abc.AbstractConnection | None = None
152
+ self.channels: dict[str, aio_pika.abc.AbstractChannel] = {}
153
+
154
+ async def _verify_infrastructure(self) -> bool:
155
+ """
156
+ Verify that the required RabbitMQ infrastructure (exchanges, queues) exists.
157
+ Returns True if all required infrastructure is in place.
158
+ """
159
+ try:
160
+ async with self.connect() as connection:
161
+ # Create a main channel just for checking infrastructure
162
+ async with connection.channel() as main_channel:
163
+ # Get existing exchange and queues to verify infrastructure is in place
164
+ await RabbitmqUtils.get_main_exchange(
165
+ channel=main_channel,
166
+ exchange_name=self.config.exchange,
167
+ )
168
+ await RabbitmqUtils.get_dl_exchange(channel=main_channel)
169
+ await RabbitmqUtils.get_dl_queue(channel=main_channel)
170
+ return True
171
+ except (ChannelNotFoundEntity, ChannelClosed, AMQPError) as e:
172
+ logger.critical(
173
+ f"Required exchange or queue infrastructure not found. "
174
+ f"Please use the declare command first to create the required infrastructure. Error: {e}"
175
+ )
176
+ return False
177
+
178
+ async def _setup_message_handler_consumer(
179
+ self, handler: MessageHandlerData
180
+ ) -> bool:
181
+ """
182
+ Set up a consumer for a message handler with retry mechanism.
183
+ Returns True if successful, False otherwise.
184
+ """
185
+ queue_name = f"{handler.message_type.MESSAGE_TOPIC}.{handler.instance_callable.__module__}.{handler.instance_callable.__qualname__}"
186
+ routing_key = f"{handler.message_type.MESSAGE_TOPIC}.#"
187
+
188
+ async def setup_consumer() -> None:
189
+ # Create a channel using the context manager
190
+ async with self.create_channel(queue_name) as channel:
191
+ queue = await RabbitmqUtils.get_queue(
192
+ channel=channel, queue_name=queue_name
193
+ )
120
194
 
121
- async def consume(self) -> None:
195
+ # Configure consumer right away while in the context
196
+ await queue.consume(
197
+ callback=MessageHandlerCallback(
198
+ consumer=self,
199
+ queue_name=queue_name,
200
+ routing_key=routing_key,
201
+ message_handler=handler,
202
+ ),
203
+ no_ack=handler.spec.auto_ack,
204
+ )
205
+
206
+ logger.info(
207
+ f"Consuming message handler {queue_name} on dedicated channel"
208
+ )
122
209
 
123
- connection = await aio_pika.connect(self.config.url)
210
+ try:
211
+ # Setup with retry
212
+ await retry_with_backoff(
213
+ setup_consumer,
214
+ retry_config=self.config.consumer_retry_config,
215
+ retry_exceptions=(ChannelNotFoundEntity, ChannelClosed, AMQPError),
216
+ )
217
+ return True
218
+ except Exception as e:
219
+ logger.error(
220
+ f"Failed to setup consumer for queue '{queue_name}' after retries: {e}"
221
+ )
222
+ return False
223
+
224
+ async def _setup_scheduled_action_consumer(
225
+ self, scheduled_action: ScheduledActionData
226
+ ) -> bool:
227
+ """
228
+ Set up a consumer for a scheduled action with retry mechanism.
229
+ Returns True if successful, False otherwise.
230
+ """
231
+ queue_name = f"{scheduled_action.callable.__module__}.{scheduled_action.callable.__qualname__}"
232
+ routing_key = queue_name
233
+
234
+ async def setup_consumer() -> None:
235
+ # Create a channel using the context manager
236
+ async with self.create_channel(queue_name) as channel:
237
+ queue = await RabbitmqUtils.get_queue(
238
+ channel=channel, queue_name=queue_name
239
+ )
124
240
 
125
- channel = await connection.channel()
241
+ # Configure consumer right away while in the context
242
+ await queue.consume(
243
+ callback=ScheduledMessageHandlerCallback(
244
+ consumer=self,
245
+ queue_name=queue_name,
246
+ routing_key=routing_key,
247
+ scheduled_action=scheduled_action,
248
+ ),
249
+ no_ack=True,
250
+ )
126
251
 
127
- await channel.set_qos(prefetch_count=self.config.prefetch_count)
252
+ logger.info(f"Consuming scheduler {queue_name} on dedicated channel")
128
253
 
129
- # Get existing exchange and queues
130
254
  try:
131
- exchange = await RabbitmqUtils.get_main_exchange(
132
- channel=channel,
133
- exchange_name=self.config.exchange,
255
+ # Setup with retry
256
+ await retry_with_backoff(
257
+ setup_consumer,
258
+ retry_config=self.config.consumer_retry_config,
259
+ retry_exceptions=(ChannelNotFoundEntity, ChannelClosed, AMQPError),
134
260
  )
135
-
136
- dlx = await RabbitmqUtils.get_dl_exchange(channel=channel)
137
- dlq = await RabbitmqUtils.get_dl_queue(channel=channel)
138
- except (ChannelNotFoundEntity, ChannelClosed, AMQPError) as e:
139
- logger.critical(
140
- f"Required exchange or queue infrastructure not found and passive mode is enabled. "
141
- f"Please use the declare command first to create the required infrastructure. Error: {e}"
261
+ return True
262
+ except Exception as e:
263
+ logger.error(
264
+ f"Failed to setup consumer for scheduler queue '{queue_name}' after retries: {e}"
142
265
  )
266
+ return False
267
+
268
+ async def consume(self) -> None:
269
+ """
270
+ Main consume method that sets up all message handlers and scheduled actions with retry mechanisms.
271
+ """
272
+ # Verify infrastructure with retry
273
+ infra_check_success = await retry_with_backoff(
274
+ self._verify_infrastructure,
275
+ retry_config=self.config.connection_retry_config,
276
+ retry_exceptions=(Exception,),
277
+ )
278
+
279
+ if not infra_check_success:
280
+ logger.critical("Failed to verify RabbitMQ infrastructure. Shutting down.")
143
281
  self.shutdown_event.set()
144
282
  return
145
283
 
146
- for handler in self.message_handler_set:
284
+ async def wait_for(
285
+ type: str, name: str, coroutine: Awaitable[bool]
286
+ ) -> tuple[str, str, bool]:
287
+ return type, name, await coroutine
147
288
 
148
- queue_name = f"{handler.message_type.MESSAGE_TOPIC}.{handler.instance_callable.__module__}.{handler.instance_callable.__qualname__}"
149
- routing_key = f"{handler.message_type.MESSAGE_TOPIC}.#"
289
+ tasks: set[asyncio.Task[tuple[str, str, bool]]] = set()
150
290
 
291
+ # Setup message handlers
292
+ for handler in self.message_handler_set:
293
+ queue_name = f"{handler.message_type.MESSAGE_TOPIC}.{handler.instance_callable.__module__}.{handler.instance_callable.__qualname__}"
151
294
  self.incoming_map[queue_name] = handler
152
295
 
153
- try:
154
- queue = await RabbitmqUtils.get_queue(
155
- channel=channel, queue_name=queue_name
156
- )
157
- except (ChannelNotFoundEntity, ChannelClosed, AMQPError) as e:
158
- logger.error(
159
- f"Queue '{queue_name}' not found and passive mode is enabled. "
160
- f"Please use the declare command first to create the queue. Error: {e}"
296
+ tasks.add(
297
+ task := asyncio.create_task(
298
+ wait_for(
299
+ "message_handler",
300
+ queue_name,
301
+ self._setup_message_handler_consumer(handler),
302
+ )
161
303
  )
162
- continue
163
-
164
- await queue.consume(
165
- callback=MessageHandlerCallback(
166
- consumer=self,
167
- queue_name=queue_name,
168
- routing_key=routing_key,
169
- message_handler=handler,
170
- ),
171
- no_ack=handler.spec.auto_ack,
172
304
  )
173
-
174
- logger.info(f"Consuming message handler {queue_name}")
175
-
305
+ # task.add_done_callback(tasks.discard)
306
+ # success = await self._setup_message_handler_consumer(handler)
307
+ # if not success:
308
+ # logger.warning(
309
+ # f"Failed to set up consumer for {queue_name}, will not process messages from this queue"
310
+ # )
311
+
312
+ # Setup scheduled actions
176
313
  for scheduled_action in self.scheduled_actions:
177
314
 
178
315
  queue_name = f"{scheduled_action.callable.__module__}.{scheduled_action.callable.__qualname__}"
179
-
180
- routing_key = queue_name
181
-
182
- try:
183
- queue = await RabbitmqUtils.get_queue(
184
- channel=channel, queue_name=queue_name
185
- )
186
- except (ChannelNotFoundEntity, ChannelClosed, AMQPError) as e:
187
- logger.error(
188
- f"Scheduler queue '{queue_name}' not found and passive mode is enabled. "
189
- f"Please use the declare command first to create the queue. Error: {e}"
316
+ tasks.add(
317
+ task := asyncio.create_task(
318
+ wait_for(
319
+ "scheduled_action",
320
+ queue_name,
321
+ self._setup_scheduled_action_consumer(scheduled_action),
322
+ )
190
323
  )
191
- continue
192
-
193
- await queue.consume(
194
- callback=ScheduledMessageHandlerCallback(
195
- consumer=self,
196
- queue_name=queue_name,
197
- routing_key=routing_key,
198
- scheduled_action=scheduled_action,
199
- ),
200
- no_ack=True,
201
324
  )
325
+ # task.add_done_callback(tasks.discard)
326
+
327
+ # success = await self._setup_scheduled_action_consumer(scheduled_action)
328
+ # if not success:
329
+ # queue_name = f"{scheduled_action.callable.__module__}.{scheduled_action.callable.__qualname__}"
330
+ # logger.warning(
331
+ # f"Failed to set up consumer for scheduled action {queue_name}, will not process scheduled tasks from this queue"
332
+ # )
333
+
334
+ async def handle_task_results() -> None:
335
+ for task in asyncio.as_completed(tasks):
336
+ type, name, success = await task
337
+ if success:
338
+ logger.info(f"Successfully set up {type} consumer for {name}")
339
+ else:
340
+ logger.warning(
341
+ f"Failed to set up {type} consumer for {name}, will not process messages from this queue"
342
+ )
202
343
 
203
- logger.info(f"Consuming scheduler {queue_name}")
344
+ handle_task_results_task = asyncio.create_task(handle_task_results())
204
345
 
346
+ # Wait for shutdown signal
205
347
  await self.shutdown_event.wait()
348
+ logger.info("Shutdown event received, stopping consumers")
349
+ handle_task_results_task.cancel()
350
+ with suppress(asyncio.CancelledError):
351
+ await handle_task_results_task
352
+ for task in tasks:
353
+ if not task.done():
354
+ task.cancel()
355
+ with suppress(asyncio.CancelledError):
356
+ await task
206
357
  logger.info("Worker shutting down")
207
358
 
359
+ # Wait for all tasks to complete
208
360
  await self.wait_all_tasks_done()
209
361
 
210
- await channel.close()
211
- await connection.close()
362
+ # Close all channels and the connection
363
+ await self.close_channels_and_connection()
212
364
 
213
365
  async def wait_all_tasks_done(self) -> None:
366
+ if not self.tasks:
367
+ return
368
+
369
+ logger.info(f"Waiting for {len(self.tasks)} in-flight tasks to complete")
214
370
  async with self.lock:
215
- await asyncio.gather(*self.tasks)
371
+ # Use gather with return_exceptions=True to ensure all tasks are awaited
372
+ # even if some raise exceptions
373
+ results = await asyncio.gather(*self.tasks, return_exceptions=True)
374
+
375
+ # Log any exceptions that occurred
376
+ for result in results:
377
+ if isinstance(result, Exception):
378
+ logger.error(f"Task raised an exception during shutdown: {result}")
379
+
380
+ async def close_channels_and_connection(self) -> None:
381
+ """Close all channels and then the connection"""
382
+ # Close all channels
383
+ channel_close_tasks = []
384
+ for queue_name, channel in self.channels.items():
385
+ try:
386
+ if not channel.is_closed:
387
+ logger.info(f"Closing channel for queue {queue_name}")
388
+ channel_close_tasks.append(channel.close())
389
+ else:
390
+ logger.info(f"Channel for queue {queue_name} already closed")
391
+ except Exception as e:
392
+ logger.error(
393
+ f"Error preparing to close channel for queue {queue_name}: {e}"
394
+ )
395
+
396
+ # Wait for all channels to close (if any)
397
+ if channel_close_tasks:
398
+ try:
399
+ await asyncio.gather(*channel_close_tasks, return_exceptions=True)
400
+ except Exception as e:
401
+ logger.error(f"Error during channel closures: {e}")
402
+
403
+ # Clear channels dictionary
404
+ self.channels.clear()
405
+
406
+ # Close the connection
407
+ if self.connection:
408
+ try:
409
+ if not self.connection.is_closed:
410
+ logger.info("Closing RabbitMQ connection")
411
+ await self.connection.close()
412
+ else:
413
+ logger.info("RabbitMQ connection already closed")
414
+ except Exception as e:
415
+ logger.error(f"Error closing RabbitMQ connection: {e}")
416
+ self.connection = None
216
417
 
217
418
  def shutdown(self) -> None:
419
+ """Signal for shutdown"""
420
+ logger.info("Initiating graceful shutdown")
218
421
  self.shutdown_event.set()
219
422
 
423
+ async def close(self) -> None:
424
+ """Implement MessageBusConsumer.close for cleanup"""
425
+ self.shutdown()
426
+ await self.wait_all_tasks_done()
427
+ await self.close_channels_and_connection()
428
+
429
+ async def get_channel(self, queue_name: str) -> aio_pika.abc.AbstractChannel | None:
430
+ """
431
+ Get the channel for a specific queue, or None if not found.
432
+ This helps with error handling when a channel might have been closed.
433
+ """
434
+ if queue_name not in self.channels:
435
+ logger.warning(f"No channel found for queue {queue_name}")
436
+ return None
437
+
438
+ try:
439
+ channel = self.channels[queue_name]
440
+ if channel.is_closed:
441
+ logger.warning(f"Channel for queue {queue_name} is closed")
442
+ # Attempt to recreate the channel if needed
443
+ if self.connection and not self.connection.is_closed:
444
+ logger.info(f"Creating new channel for {queue_name}")
445
+ self.channels[queue_name] = await self.connection.channel()
446
+ await self.channels[queue_name].set_qos(
447
+ prefetch_count=self.config.prefetch_count
448
+ )
449
+ return self.channels[queue_name]
450
+ return None
451
+ return channel
452
+ except Exception as e:
453
+ logger.error(f"Error accessing channel for queue {queue_name}: {e}")
454
+ return None
455
+
456
+ async def _establish_channel(self, queue_name: str) -> aio_pika.abc.AbstractChannel:
457
+ """
458
+ Creates a new channel for the specified queue with proper QoS settings.
459
+ """
460
+ if self.connection is None or self.connection.is_closed:
461
+ logger.warning(
462
+ f"Cannot create channel for {queue_name}: connection is not available"
463
+ )
464
+ raise RuntimeError("Connection is not available")
465
+
466
+ logger.debug(f"Creating channel for queue {queue_name}")
467
+ channel = await self.connection.channel()
468
+ await channel.set_qos(prefetch_count=self.config.prefetch_count)
469
+ logger.debug(f"Created channel for queue {queue_name}")
470
+ return channel
471
+
472
+ @asynccontextmanager
473
+ async def create_channel(
474
+ self, queue_name: str
475
+ ) -> AsyncGenerator[aio_pika.abc.AbstractChannel, None]:
476
+ """
477
+ Create and yield a channel for the specified queue with retry mechanism.
478
+ This context manager ensures the channel is properly managed.
479
+ """
480
+ try:
481
+ # Create a new channel with retry
482
+ channel = await retry_with_backoff(
483
+ fn=lambda: self._establish_channel(queue_name),
484
+ retry_config=self.config.consumer_retry_config,
485
+ retry_exceptions=(
486
+ aio_pika.exceptions.AMQPConnectionError,
487
+ aio_pika.exceptions.AMQPChannelError,
488
+ ConnectionError,
489
+ ),
490
+ )
491
+
492
+ # Save in the channels dict for tracking
493
+ self.channels[queue_name] = channel
494
+ logger.debug(f"Created new channel for queue {queue_name}")
495
+
496
+ try:
497
+ yield channel
498
+ finally:
499
+ # Don't close the channel here as it might be used later
500
+ # It will be closed during shutdown
501
+ pass
502
+ except aio_pika.exceptions.AMQPError as e:
503
+ logger.error(
504
+ f"Error creating channel for queue {queue_name} after retries: {e}"
505
+ )
506
+ raise
507
+
508
+ async def _establish_connection(self) -> aio_pika.abc.AbstractConnection:
509
+ """
510
+ Creates a new RabbitMQ connection with retry logic.
511
+ """
512
+ try:
513
+ logger.info("Establishing connection to RabbitMQ")
514
+ connection = await aio_pika.connect(self.config.url)
515
+ logger.info("Connected to RabbitMQ successfully")
516
+ return connection
517
+ except Exception as e:
518
+ logger.error(f"Failed to connect to RabbitMQ: {e}")
519
+ raise
520
+
521
+ @asynccontextmanager
522
+ async def connect(self) -> AsyncGenerator[aio_pika.abc.AbstractConnection, None]:
523
+ """
524
+ Create and manage the main connection to RabbitMQ with automatic retry.
525
+ """
526
+ if self.connection is not None and not self.connection.is_closed:
527
+ logger.debug("Connection already exists, reusing existing connection")
528
+ try:
529
+ yield self.connection
530
+ finally:
531
+ # The existing connection will be handled by close_channels_and_connection
532
+ pass
533
+ return
534
+
535
+ try:
536
+ # Create a new connection with retry
537
+ self.connection = await retry_with_backoff(
538
+ self._establish_connection,
539
+ retry_config=self.config.connection_retry_config,
540
+ retry_exceptions=(
541
+ aio_pika.exceptions.AMQPConnectionError,
542
+ ConnectionError,
543
+ OSError,
544
+ TimeoutError,
545
+ ),
546
+ )
547
+
548
+ try:
549
+ yield self.connection
550
+ finally:
551
+ # Don't close the connection here; it will be closed in close_channels_and_connection
552
+ pass
553
+ except Exception as e:
554
+ logger.error(
555
+ f"Failed to establish connection to RabbitMQ after retries: {e}"
556
+ )
557
+ if self.connection:
558
+ try:
559
+ await self.connection.close()
560
+ except Exception as close_error:
561
+ logger.error(
562
+ f"Error closing connection after connect failure: {close_error}"
563
+ )
564
+ self.connection = None
565
+ raise
566
+
567
+ @asynccontextmanager
568
+ async def get_channel_ctx(
569
+ self, queue_name: str
570
+ ) -> AsyncGenerator[aio_pika.abc.AbstractChannel, None]:
571
+ """
572
+ Get a channel for a specific queue as a context manager.
573
+ This is safer than using get_channel directly as it ensures proper error handling.
574
+ """
575
+ channel = await self.get_channel(queue_name)
576
+ if channel is None:
577
+ if self.connection and not self.connection.is_closed:
578
+ # Try to create a new channel
579
+ async with self.create_channel(queue_name) as new_channel:
580
+ yield new_channel
581
+ else:
582
+ raise RuntimeError(
583
+ f"Cannot get channel for queue {queue_name}: no connection available"
584
+ )
585
+ else:
586
+ try:
587
+ yield channel
588
+ finally:
589
+ # We don't close the channel here as it's managed by the consumer
590
+ pass
591
+
220
592
 
221
593
  def create_message_bus(
222
594
  broker_url: str,
@@ -254,10 +626,84 @@ def create_message_bus(
254
626
  exchange = query_params["exchange"][0]
255
627
  prefetch_count = int(query_params["prefetch_count"][0])
256
628
 
629
+ # Parse optional retry configuration parameters
630
+ connection_retry_config = RetryConfig()
631
+ consumer_retry_config = RetryConfig(
632
+ max_retries=30, initial_delay=5, max_delay=60.0, backoff_factor=3.0
633
+ )
634
+
635
+ # Connection retry config parameters
636
+ if (
637
+ "connection_retry_max" in query_params
638
+ and query_params["connection_retry_max"][0].isdigit()
639
+ ):
640
+ connection_retry_config.max_retries = int(
641
+ query_params["connection_retry_max"][0]
642
+ )
643
+
644
+ if "connection_retry_delay" in query_params:
645
+ try:
646
+ connection_retry_config.initial_delay = float(
647
+ query_params["connection_retry_delay"][0]
648
+ )
649
+ except ValueError:
650
+ pass
651
+
652
+ if "connection_retry_max_delay" in query_params:
653
+ try:
654
+ connection_retry_config.max_delay = float(
655
+ query_params["connection_retry_max_delay"][0]
656
+ )
657
+ except ValueError:
658
+ pass
659
+
660
+ if "connection_retry_backoff" in query_params:
661
+ try:
662
+ connection_retry_config.backoff_factor = float(
663
+ query_params["connection_retry_backoff"][0]
664
+ )
665
+ except ValueError:
666
+ pass
667
+
668
+ # Consumer retry config parameters
669
+ if (
670
+ "consumer_retry_max" in query_params
671
+ and query_params["consumer_retry_max"][0].isdigit()
672
+ ):
673
+ consumer_retry_config.max_retries = int(
674
+ query_params["consumer_retry_max"][0]
675
+ )
676
+
677
+ if "consumer_retry_delay" in query_params:
678
+ try:
679
+ consumer_retry_config.initial_delay = float(
680
+ query_params["consumer_retry_delay"][0]
681
+ )
682
+ except ValueError:
683
+ pass
684
+
685
+ if "consumer_retry_max_delay" in query_params:
686
+ try:
687
+ consumer_retry_config.max_delay = float(
688
+ query_params["consumer_retry_max_delay"][0]
689
+ )
690
+ except ValueError:
691
+ pass
692
+
693
+ if "consumer_retry_backoff" in query_params:
694
+ try:
695
+ consumer_retry_config.backoff_factor = float(
696
+ query_params["consumer_retry_backoff"][0]
697
+ )
698
+ except ValueError:
699
+ pass
700
+
257
701
  config = AioPikaWorkerConfig(
258
702
  url=broker_url,
259
703
  exchange=exchange,
260
704
  prefetch_count=prefetch_count,
705
+ connection_retry_config=connection_retry_config,
706
+ consumer_retry_config=consumer_retry_config,
261
707
  )
262
708
 
263
709
  return AioPikaMicroserviceConsumer(
@@ -291,6 +737,21 @@ class ScheduledMessageHandlerCallback:
291
737
  ) -> None:
292
738
 
293
739
  if self.consumer.shutdown_event.is_set():
740
+ logger.info(
741
+ f"Shutdown in progress. Requeuing scheduled message for {self.queue_name}"
742
+ )
743
+ try:
744
+ # Use channel context for requeuing
745
+ async with self.consumer.get_channel_ctx(self.queue_name):
746
+ await aio_pika_message.reject(requeue=True)
747
+ except RuntimeError:
748
+ logger.warning(
749
+ f"Could not requeue scheduled message during shutdown - channel not available"
750
+ )
751
+ except Exception as e:
752
+ logger.error(
753
+ f"Failed to requeue scheduled message during shutdown: {e}"
754
+ )
294
755
  return
295
756
 
296
757
  async with self.consumer.lock:
@@ -300,14 +761,33 @@ class ScheduledMessageHandlerCallback:
300
761
 
301
762
  def handle_message_consume_done(self, task: asyncio.Task[Any]) -> None:
302
763
  self.consumer.tasks.discard(task)
764
+ if task.cancelled():
765
+ logger.warning(f"Scheduled task for {self.queue_name} was cancelled")
766
+ return
767
+
768
+ if (error := task.exception()) is not None:
769
+ logger.exception(
770
+ f"Error processing scheduled action {self.queue_name}", exc_info=error
771
+ )
303
772
 
304
773
  async def handle_message(
305
774
  self, aio_pika_message: aio_pika.abc.AbstractIncomingMessage
306
775
  ) -> None:
307
776
 
308
777
  if self.consumer.shutdown_event.is_set():
309
- logger.info("Shutdown event set. Rqueuing message")
310
- await aio_pika_message.reject(requeue=True)
778
+ logger.info(f"Shutdown event set. Requeuing message for {self.queue_name}")
779
+ try:
780
+ # Use channel context for requeuing
781
+ async with self.consumer.get_channel_ctx(self.queue_name):
782
+ await aio_pika_message.reject(requeue=True)
783
+ return
784
+ except RuntimeError:
785
+ logger.warning(
786
+ f"Could not requeue message during shutdown - channel not available"
787
+ )
788
+ except Exception as e:
789
+ logger.error(f"Failed to requeue message during shutdown: {e}")
790
+ return
311
791
 
312
792
  sig = inspect.signature(self.scheduled_action.callable)
313
793
  if len(sig.parameters) == 1:
@@ -379,11 +859,25 @@ class MessageHandlerCallback:
379
859
  self.queue_name = queue_name
380
860
  self.routing_key = routing_key
381
861
  self.message_handler = message_handler
862
+ self.retry_state: dict[str, dict[str, Any]] = {}
382
863
 
383
864
  async def message_consumer(
384
865
  self, aio_pika_message: aio_pika.abc.AbstractIncomingMessage
385
866
  ) -> None:
386
867
  if self.consumer.shutdown_event.is_set():
868
+ logger.info(
869
+ f"Shutdown in progress. Requeuing message for {self.queue_name}"
870
+ )
871
+ try:
872
+ # Use channel context for requeuing
873
+ async with self.consumer.get_channel_ctx(self.queue_name):
874
+ await aio_pika_message.reject(requeue=True)
875
+ except RuntimeError:
876
+ logger.warning(
877
+ f"Could not requeue message during shutdown - channel not available"
878
+ )
879
+ except Exception as e:
880
+ logger.error(f"Failed to requeue message during shutdown: {e}")
387
881
  return
388
882
 
389
883
  async with self.consumer.lock:
@@ -394,10 +888,13 @@ class MessageHandlerCallback:
394
888
  def handle_message_consume_done(self, task: asyncio.Task[Any]) -> None:
395
889
  self.consumer.tasks.discard(task)
396
890
  if task.cancelled():
891
+ logger.warning(f"Task for queue {self.queue_name} was cancelled")
397
892
  return
398
893
 
399
894
  if (error := task.exception()) is not None:
400
- logger.exception("Error processing message", exc_info=error)
895
+ logger.exception(
896
+ f"Error processing message for queue {self.queue_name}", exc_info=error
897
+ )
401
898
 
402
899
  async def __call__(
403
900
  self, aio_pika_message: aio_pika.abc.AbstractIncomingMessage
@@ -408,13 +905,177 @@ class MessageHandlerCallback:
408
905
  self,
409
906
  aio_pika_message: aio_pika.abc.AbstractIncomingMessage,
410
907
  requeue: bool = False,
908
+ retry_count: int = 0,
909
+ exception: Optional[BaseException] = None,
411
910
  ) -> None:
412
- if self.message_handler.spec.auto_ack is False:
413
- await aio_pika_message.reject(requeue=requeue)
414
- elif requeue:
415
- logger.warning(
416
- f"Message {aio_pika_message.message_id} ({self.queue_name}) cannot be requeued because auto_ack is enabled"
911
+ """
912
+ Handle rejecting a message, with support for retry with exponential backoff.
913
+
914
+ Args:
915
+ aio_pika_message: The message to reject
916
+ requeue: Whether to requeue the message directly (True) or handle with retry logic (False)
917
+ retry_count: The current retry count for this message
918
+ exception: The exception that caused the rejection, if any
919
+ """
920
+ message_id = aio_pika_message.message_id or str(uuid.uuid4())
921
+
922
+ # If auto_ack is enabled, we cannot retry the message through RabbitMQ reject mechanism
923
+ if self.message_handler.spec.auto_ack:
924
+ if requeue:
925
+ logger.warning(
926
+ f"Message {message_id} ({self.queue_name}) cannot be requeued because auto_ack is enabled"
927
+ )
928
+ return
929
+
930
+ try:
931
+ # Check if we should retry with backoff
932
+ if (
933
+ not requeue
934
+ and self.message_handler.spec.requeue_on_exception
935
+ and exception is not None
936
+ ):
937
+ # Get retry config from consumer
938
+ retry_config = self.consumer.config.consumer_retry_config
939
+
940
+ # Check if we reached max retries
941
+ if retry_count >= retry_config.max_retries:
942
+ logger.warning(
943
+ f"Message {message_id} ({self.queue_name}) failed after {retry_count} retries, "
944
+ f"dead-lettering: {str(exception)}"
945
+ )
946
+ # Dead-letter the message after max retries
947
+ async with self.consumer.get_channel_ctx(self.queue_name):
948
+ await aio_pika_message.reject(requeue=False)
949
+ return
950
+
951
+ # Calculate delay for this retry attempt
952
+ delay = retry_config.initial_delay * (
953
+ retry_config.backoff_factor**retry_count
954
+ )
955
+ if retry_config.jitter:
956
+ jitter_amount = delay * 0.25
957
+ delay = delay + random.uniform(-jitter_amount, jitter_amount)
958
+ delay = max(
959
+ delay, 0.1
960
+ ) # Ensure delay doesn't go negative due to jitter
961
+
962
+ delay = min(delay, retry_config.max_delay)
963
+
964
+ logger.info(
965
+ f"Message {message_id} ({self.queue_name}) failed with {str(exception)}, "
966
+ f"retry {retry_count+1}/{retry_config.max_retries} scheduled in {delay:.2f}s"
967
+ )
968
+
969
+ # Store retry state for this message
970
+ self.retry_state[message_id] = {
971
+ "retry_count": retry_count + 1,
972
+ "last_exception": exception,
973
+ "next_retry": time.time() + delay,
974
+ }
975
+
976
+ # Schedule retry after delay
977
+ asyncio.create_task(
978
+ self._delayed_retry(
979
+ aio_pika_message, delay, retry_count + 1, exception
980
+ )
981
+ )
982
+
983
+ # Acknowledge the current message since we'll handle retry ourselves
984
+ async with self.consumer.get_channel_ctx(self.queue_name):
985
+ await aio_pika_message.ack()
986
+ return
987
+
988
+ # Standard reject without retry or with immediate requeue
989
+ async with self.consumer.get_channel_ctx(self.queue_name):
990
+ await aio_pika_message.reject(requeue=requeue)
991
+ if requeue:
992
+ logger.info(
993
+ f"Message {message_id} ({self.queue_name}) requeued for immediate retry"
994
+ )
995
+ else:
996
+ logger.info(
997
+ f"Message {message_id} ({self.queue_name}) rejected without requeue"
998
+ )
999
+
1000
+ except RuntimeError as e:
1001
+ logger.error(
1002
+ f"Error rejecting message {message_id} ({self.queue_name}): {e}"
417
1003
  )
1004
+ except Exception as e:
1005
+ logger.exception(
1006
+ f"Unexpected error rejecting message {message_id} ({self.queue_name}): {e}"
1007
+ )
1008
+
1009
+ async def _delayed_retry(
1010
+ self,
1011
+ aio_pika_message: aio_pika.abc.AbstractIncomingMessage,
1012
+ delay: float,
1013
+ retry_count: int,
1014
+ exception: Optional[BaseException],
1015
+ ) -> None:
1016
+ """
1017
+ Handle delayed retry of a message after exponential backoff delay.
1018
+
1019
+ Args:
1020
+ aio_pika_message: The original message
1021
+ delay: Delay in seconds before retry
1022
+ retry_count: The current retry count (after increment)
1023
+ exception: The exception that caused the failure
1024
+ """
1025
+ message_id = aio_pika_message.message_id or str(uuid.uuid4())
1026
+
1027
+ try:
1028
+ # Wait for the backoff delay
1029
+ await asyncio.sleep(delay)
1030
+
1031
+ # Get message body and properties for republishing
1032
+ message_body = aio_pika_message.body
1033
+ headers = (
1034
+ aio_pika_message.headers.copy() if aio_pika_message.headers else {}
1035
+ )
1036
+
1037
+ # Add retry information to headers
1038
+ headers["x-retry-count"] = retry_count
1039
+ if exception:
1040
+ headers["x-last-error"] = str(exception)
1041
+
1042
+ # Clean up retry state
1043
+ if message_id in self.retry_state:
1044
+ del self.retry_state[message_id]
1045
+
1046
+ # Republish the message to the same queue
1047
+ async with self.consumer.get_channel_ctx(self.queue_name) as channel:
1048
+ exchange = await RabbitmqUtils.get_main_exchange(
1049
+ channel=channel,
1050
+ exchange_name=self.consumer.config.exchange,
1051
+ )
1052
+
1053
+ await exchange.publish(
1054
+ aio_pika.Message(
1055
+ body=message_body,
1056
+ headers=headers,
1057
+ message_id=message_id,
1058
+ content_type=aio_pika_message.content_type,
1059
+ content_encoding=aio_pika_message.content_encoding,
1060
+ delivery_mode=aio_pika_message.delivery_mode,
1061
+ ),
1062
+ routing_key=self.routing_key,
1063
+ )
1064
+
1065
+ logger.info(
1066
+ f"Message {message_id} ({self.queue_name}) republished for retry {retry_count}"
1067
+ )
1068
+
1069
+ except Exception as e:
1070
+ logger.exception(
1071
+ f"Failed to execute delayed retry for message {message_id} ({self.queue_name}): {e}"
1072
+ )
1073
+ # If we fail to republish, try to dead-letter the original message
1074
+ try:
1075
+ if message_id in self.retry_state:
1076
+ del self.retry_state[message_id]
1077
+ except Exception:
1078
+ pass
418
1079
 
419
1080
  async def handle_message(
420
1081
  self, aio_pika_message: aio_pika.abc.AbstractIncomingMessage
@@ -494,29 +1155,61 @@ class MessageHandlerCallback:
494
1155
  await handler(builded_message)
495
1156
  if not incoming_message_spec.auto_ack:
496
1157
  with suppress(aio_pika.MessageProcessError):
497
- await aio_pika_message.ack()
1158
+ # Use channel context for acknowledgement
1159
+ async with self.consumer.get_channel_ctx(self.queue_name):
1160
+ await aio_pika_message.ack()
498
1161
  except BaseException as base_exc:
1162
+ # Get message id for logging
1163
+ message_id = aio_pika_message.message_id or str(uuid.uuid4())
1164
+
1165
+ # Extract retry count from headers if available
1166
+ headers = aio_pika_message.headers or {}
1167
+ retry_count = int(str(headers.get("x-retry-count", 0)))
1168
+
1169
+ # Process exception handler if configured
499
1170
  if incoming_message_spec.exception_handler is not None:
500
1171
  try:
501
1172
  incoming_message_spec.exception_handler(base_exc)
502
1173
  except Exception as nested_exc:
503
1174
  logger.exception(
504
- f"Error processing exception handler: {base_exc} | {nested_exc}"
1175
+ f"Error processing exception handler for message {message_id}: {base_exc} | {nested_exc}"
505
1176
  )
506
1177
  else:
507
1178
  logger.exception(
508
- f"Error processing message on topic {routing_key}"
1179
+ f"Error processing message {message_id} on topic {routing_key}: {str(base_exc)}"
509
1180
  )
1181
+
1182
+ # Handle rejection with retry logic
510
1183
  if incoming_message_spec.requeue_on_exception:
511
- await self.handle_reject_message(aio_pika_message, requeue=True)
1184
+ # Use our retry with backoff mechanism
1185
+ await self.handle_reject_message(
1186
+ aio_pika_message,
1187
+ requeue=False, # Don't requeue directly, use our backoff mechanism
1188
+ retry_count=retry_count,
1189
+ exception=base_exc,
1190
+ )
512
1191
  else:
1192
+ # Message shouldn't be retried, reject it
513
1193
  await self.handle_reject_message(
514
- aio_pika_message, requeue=False
1194
+ aio_pika_message, requeue=False, exception=base_exc
515
1195
  )
516
1196
  else:
517
- logger.info(
518
- f"Message {aio_pika_message.message_id}#{self.queue_name} processed successfully"
519
- )
1197
+ # Message processed successfully, log and clean up any retry state
1198
+ message_id = aio_pika_message.message_id or str(uuid.uuid4())
1199
+ if message_id in self.retry_state:
1200
+ del self.retry_state[message_id]
1201
+
1202
+ # Log success with retry information if applicable
1203
+ headers = aio_pika_message.headers or {}
1204
+ if "x-retry-count" in headers:
1205
+ retry_count = int(str(headers.get("x-retry-count", 0)))
1206
+ logger.info(
1207
+ f"Message {message_id}#{self.queue_name} processed successfully after {retry_count} retries"
1208
+ )
1209
+ else:
1210
+ logger.info(
1211
+ f"Message {message_id}#{self.queue_name} processed successfully"
1212
+ )
520
1213
 
521
1214
 
522
1215
  @asynccontextmanager
@@ -614,19 +1307,64 @@ class MessageBusWorker:
614
1307
  def start_sync(self) -> None:
615
1308
 
616
1309
  def on_shutdown(loop: asyncio.AbstractEventLoop) -> None:
617
- logger.info("Shutting down")
618
- self.consumer.shutdown()
1310
+ logger.info("Shutting down - signal received")
1311
+ # Schedule the shutdown to run in the event loop
1312
+ asyncio.create_task(self._graceful_shutdown())
1313
+ # wait until the shutdown is complete
619
1314
 
620
1315
  with asyncio.Runner(loop_factory=uvloop.new_event_loop) as runner:
621
- runner.get_loop().add_signal_handler(
622
- signal.SIGINT, on_shutdown, runner.get_loop()
623
- )
1316
+ loop = runner.get_loop()
1317
+ loop.add_signal_handler(signal.SIGINT, on_shutdown, loop)
1318
+ # Add graceful shutdown handler for SIGTERM as well
1319
+ loop.add_signal_handler(signal.SIGTERM, on_shutdown, loop)
624
1320
  runner.run(self.start_async())
625
1321
 
1322
+ async def _graceful_shutdown(self) -> None:
1323
+ """Handles graceful shutdown process"""
1324
+ logger.info("Initiating graceful shutdown sequence")
1325
+ # Use the comprehensive close method that handles shutdown, task waiting and connection cleanup
1326
+
1327
+ self.consumer.shutdown()
1328
+ logger.info("Graceful shutdown completed")
1329
+
626
1330
 
627
1331
  class AioPikaMessageBusController(BusMessageController):
628
1332
  def __init__(self, aio_pika_message: aio_pika.abc.AbstractIncomingMessage):
629
1333
  self.aio_pika_message = aio_pika_message
1334
+ # We access consumer callback through context if available
1335
+ self._callback: Optional[MessageHandlerCallback] = None
1336
+
1337
+ def _get_callback(self) -> MessageHandlerCallback:
1338
+ """
1339
+ Find the callback associated with this message.
1340
+ This allows us to access the retry mechanisms.
1341
+ """
1342
+ if self._callback is None:
1343
+ # Get the context from current frame's locals
1344
+ frame = inspect.currentframe()
1345
+ if frame is not None:
1346
+ try:
1347
+ caller_frame = frame.f_back
1348
+ if caller_frame is not None:
1349
+ # Check for context with handler callback
1350
+ callback_ref = None
1351
+ # Look for handler_message call context
1352
+ while caller_frame is not None:
1353
+ if "self" in caller_frame.f_locals:
1354
+ self_obj = caller_frame.f_locals["self"]
1355
+ if isinstance(self_obj, MessageHandlerCallback):
1356
+ callback_ref = self_obj
1357
+ break
1358
+ caller_frame = caller_frame.f_back
1359
+ # Save callback reference if we found it
1360
+ self._callback = callback_ref
1361
+ finally:
1362
+ del frame # Avoid reference cycles
1363
+
1364
+ if self._callback is None:
1365
+ raise RuntimeError("Could not find callback context for message retry")
1366
+
1367
+ return self._callback
630
1368
 
631
1369
  async def ack(self) -> None:
632
1370
  await self.aio_pika_message.ack()
@@ -638,7 +1376,41 @@ class AioPikaMessageBusController(BusMessageController):
638
1376
  await self.aio_pika_message.reject()
639
1377
 
640
1378
  async def retry(self) -> None:
641
- await self.aio_pika_message.reject(requeue=True)
1379
+ """
1380
+ Retry the message immediately by rejecting with requeue flag.
1381
+ This doesn't use the exponential backoff mechanism.
1382
+ """
1383
+ callback = self._get_callback()
1384
+ await callback.handle_reject_message(self.aio_pika_message, requeue=True)
642
1385
 
643
1386
  async def retry_later(self, delay: int) -> None:
644
- raise NotImplementedError("Not implemented")
1387
+ """
1388
+ Retry the message after a specified delay using the exponential backoff mechanism.
1389
+
1390
+ Args:
1391
+ delay: Minimum delay in seconds before retrying
1392
+ """
1393
+ try:
1394
+ callback = self._get_callback()
1395
+
1396
+ # Get current retry count from message headers
1397
+ headers = self.aio_pika_message.headers or {}
1398
+ retry_count = int(str(headers.get("x-retry-count", 0)))
1399
+
1400
+ # Handle retry with explicit delay
1401
+ asyncio.create_task(
1402
+ callback._delayed_retry(
1403
+ self.aio_pika_message,
1404
+ float(delay),
1405
+ retry_count + 1,
1406
+ None, # No specific exception
1407
+ )
1408
+ )
1409
+
1410
+ # Acknowledge the current message since we'll republish
1411
+ await self.aio_pika_message.ack()
1412
+
1413
+ except Exception as e:
1414
+ logger.exception(f"Failed to schedule retry_later: {e}")
1415
+ # Fall back to immediate retry
1416
+ await self.aio_pika_message.reject(requeue=True)
@@ -0,0 +1,141 @@
1
+ import asyncio
2
+ import logging
3
+ import random
4
+ from functools import wraps
5
+ from typing import Awaitable, Callable, Optional, ParamSpec, TypeVar
6
+
7
+ logger = logging.getLogger(__name__)
8
+
9
+ P = ParamSpec("P")
10
+ T = TypeVar("T")
11
+
12
+
13
+ class RetryConfig:
14
+ """Configuration for the retry mechanism."""
15
+
16
+ def __init__(
17
+ self,
18
+ max_retries: int = 5,
19
+ initial_delay: float = 1.0,
20
+ max_delay: float = 60.0,
21
+ backoff_factor: float = 2.0,
22
+ jitter: bool = True,
23
+ ):
24
+ """
25
+ Initialize retry configuration.
26
+
27
+ Args:
28
+ max_retries: Maximum number of retry attempts (default: 5)
29
+ initial_delay: Initial delay in seconds between retries (default: 1.0)
30
+ max_delay: Maximum delay in seconds between retries (default: 60.0)
31
+ backoff_factor: Multiplier for the delay after each retry (default: 2.0)
32
+ jitter: Whether to add randomness to the delay to prevent thundering herd (default: True)
33
+ """
34
+ self.max_retries = max_retries
35
+ self.initial_delay = initial_delay
36
+ self.max_delay = max_delay
37
+ self.backoff_factor = backoff_factor
38
+ self.jitter = jitter
39
+
40
+
41
+ E = TypeVar("E", bound=Exception)
42
+
43
+
44
+ async def retry_with_backoff(
45
+ fn: Callable[[], Awaitable[T]],
46
+ # args: P.args,
47
+ # kwargs: P.kwargs,
48
+ retry_config: Optional[RetryConfig] = None,
49
+ on_retry_callback: Optional[Callable[[int, E, float], None]] = None,
50
+ retry_exceptions: tuple[type[E], ...] = (),
51
+ ) -> T:
52
+ """
53
+ Execute a function with exponential backoff retry mechanism.
54
+
55
+ Args:
56
+ fn: The async function to execute with retry
57
+ *args: Arguments to pass to the function
58
+ retry_config: Configuration for the retry mechanism
59
+ on_retry_callback: Optional callback function called on each retry with retry count, exception, and next delay
60
+ retry_exceptions: Tuple of exception types that should trigger a retry
61
+ **kwargs: Keyword arguments to pass to the function
62
+
63
+ Returns:
64
+ The result of the function if successful
65
+
66
+ Raises:
67
+ The last exception encountered if all retries fail
68
+ """
69
+ if retry_config is None:
70
+ retry_config = RetryConfig()
71
+
72
+ last_exception = None
73
+ delay = retry_config.initial_delay
74
+
75
+ for retry_count in range(retry_config.max_retries + 1):
76
+ try:
77
+ return await fn()
78
+ except retry_exceptions as e:
79
+ last_exception = e
80
+
81
+ if retry_count >= retry_config.max_retries:
82
+ logger.error(
83
+ f"Max retries ({retry_config.max_retries}) exceeded: {str(e)}"
84
+ )
85
+ raise
86
+
87
+ # Calculate next delay with exponential backoff
88
+ if retry_count > 0: # Don't increase delay on the first failure
89
+ delay = min(delay * retry_config.backoff_factor, retry_config.max_delay)
90
+
91
+ # Apply jitter if configured (±25% randomness)
92
+ if retry_config.jitter:
93
+ jitter_amount = delay * 0.25
94
+ delay = delay + random.uniform(-jitter_amount, jitter_amount)
95
+ # Ensure delay doesn't go negative due to jitter
96
+ delay = max(delay, 0.1)
97
+
98
+ logger.warning(
99
+ f"Retry {retry_count+1}/{retry_config.max_retries} after error: {str(e)}. "
100
+ f"Retrying in {delay:.2f}s"
101
+ )
102
+
103
+ # Call the optional retry callback if provided
104
+ if on_retry_callback:
105
+ on_retry_callback(retry_count, e, delay)
106
+
107
+ await asyncio.sleep(delay)
108
+
109
+ # This should never be reached with the current implementation
110
+ if last_exception:
111
+ raise last_exception
112
+ raise RuntimeError("Unexpected error in retry logic")
113
+
114
+
115
+ def with_retry(
116
+ retry_config: Optional[RetryConfig] = None,
117
+ retry_exceptions: tuple[type[Exception], ...] = (Exception,),
118
+ ) -> Callable[[Callable[P, Awaitable[T]]], Callable[P, Awaitable[T]]]:
119
+ """
120
+ Decorator to wrap an async function with retry logic.
121
+
122
+ Args:
123
+ retry_config: Configuration for the retry mechanism
124
+ retry_exceptions: Tuple of exception types that should trigger a retry
125
+
126
+ Returns:
127
+ Decorated function with retry mechanism
128
+ """
129
+
130
+ def decorator(fn: Callable[P, Awaitable[T]]) -> Callable[P, Awaitable[T]]:
131
+ @wraps(fn)
132
+ async def wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
133
+ return await retry_with_backoff(
134
+ lambda: fn(*args, **kwargs),
135
+ retry_config=retry_config,
136
+ retry_exceptions=retry_exceptions,
137
+ )
138
+
139
+ return wrapper
140
+
141
+ return decorator
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: jararaca
3
- Version: 0.3.11a15
3
+ Version: 0.3.11a16
4
4
  Summary: A simple and fast API framework for Python
5
5
  Author: Lucas S
6
6
  Author-email: me@luscasleo.dev
@@ -20,7 +20,7 @@ jararaca/messagebus/interceptors/aiopika_publisher_interceptor.py,sha256=_DEHwIH
20
20
  jararaca/messagebus/interceptors/publisher_interceptor.py,sha256=ojy1bRhqMgrkQljcGGS8cd8-8pUjL8ZHjIUkdmaAnNM,1325
21
21
  jararaca/messagebus/message.py,sha256=U6cyd2XknX8mtm0333slz5fanky2PFLWCmokAO56vvU,819
22
22
  jararaca/messagebus/publisher.py,sha256=JTkxdKbvxvDWT8nK8PVEyyX061vYYbKQMxRHXrZtcEY,2173
23
- jararaca/messagebus/worker.py,sha256=18oD-6Ip_rOa90p53EcEPlvkXho3SWrC40l4OVSIsE4,22356
23
+ jararaca/messagebus/worker.py,sha256=CrSIejWMGII4_JK0aH4jxdj0oBJX4hSXY0SmVa6KURA,54187
24
24
  jararaca/microservice.py,sha256=rRIimfeP2-wf289PKoUbk9wrSdA0ga_qWz5JNgQ5IE0,9667
25
25
  jararaca/observability/decorators.py,sha256=MOIr2PttPYYvRwEdfQZEwD5RxKHOTv8UEy9n1YQVoKw,2281
26
26
  jararaca/observability/interceptor.py,sha256=U4ZLM0f8j6Q7gMUKKnA85bnvD-Qa0ii79Qa_X8KsXAQ,1498
@@ -66,8 +66,9 @@ jararaca/tools/app_config/interceptor.py,sha256=HV8h4AxqUc_ACs5do4BSVlyxlRXzx7Hq
66
66
  jararaca/tools/typescript/interface_parser.py,sha256=35xbOrZDQDyTXdMrVZQ8nnFw79f28lJuLYNHAspIqi8,30492
67
67
  jararaca/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
68
68
  jararaca/utils/rabbitmq_utils.py,sha256=ytdAFUyv-OBkaVnxezuJaJoLrmN7giZgtKeet_IsMBs,10918
69
- jararaca-0.3.11a15.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
70
- jararaca-0.3.11a15.dist-info/METADATA,sha256=7F5Rf37ynVQCj-5hACmHpsmjK11436KyQV6V3uqGdeI,4998
71
- jararaca-0.3.11a15.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
72
- jararaca-0.3.11a15.dist-info/entry_points.txt,sha256=WIh3aIvz8LwUJZIDfs4EeH3VoFyCGEk7cWJurW38q0I,45
73
- jararaca-0.3.11a15.dist-info/RECORD,,
69
+ jararaca/utils/retry.py,sha256=DzPX_fXUvTqej6BQ8Mt2dvLo9nNlTBm7Kx2pFZ26P2Q,4668
70
+ jararaca-0.3.11a16.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
71
+ jararaca-0.3.11a16.dist-info/METADATA,sha256=E5OUx4jCVKdki-auNqJUVcOflGaKTt37W3FAtKYS7ow,4998
72
+ jararaca-0.3.11a16.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
73
+ jararaca-0.3.11a16.dist-info/entry_points.txt,sha256=WIh3aIvz8LwUJZIDfs4EeH3VoFyCGEk7cWJurW38q0I,45
74
+ jararaca-0.3.11a16.dist-info/RECORD,,