jararaca 0.4.0a5__py3-none-any.whl → 0.4.0a19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- jararaca/__init__.py +9 -9
- jararaca/cli.py +643 -4
- jararaca/core/providers.py +4 -0
- jararaca/helpers/__init__.py +3 -0
- jararaca/helpers/global_scheduler/__init__.py +3 -0
- jararaca/helpers/global_scheduler/config.py +21 -0
- jararaca/helpers/global_scheduler/controller.py +42 -0
- jararaca/helpers/global_scheduler/registry.py +32 -0
- jararaca/messagebus/decorators.py +104 -10
- jararaca/messagebus/interceptors/aiopika_publisher_interceptor.py +50 -8
- jararaca/messagebus/interceptors/message_publisher_collector.py +62 -0
- jararaca/messagebus/interceptors/publisher_interceptor.py +25 -3
- jararaca/messagebus/worker.py +276 -200
- jararaca/microservice.py +3 -1
- jararaca/observability/providers/otel.py +31 -13
- jararaca/persistence/base.py +1 -1
- jararaca/persistence/utilities.py +47 -24
- jararaca/presentation/decorators.py +3 -3
- jararaca/reflect/decorators.py +24 -10
- jararaca/reflect/helpers.py +18 -0
- jararaca/rpc/http/__init__.py +2 -2
- jararaca/rpc/http/decorators.py +9 -9
- jararaca/scheduler/beat_worker.py +14 -14
- jararaca/tools/typescript/decorators.py +4 -4
- jararaca/tools/typescript/interface_parser.py +3 -1
- jararaca/utils/env_parse_utils.py +133 -0
- jararaca/utils/rabbitmq_utils.py +47 -0
- jararaca/utils/retry.py +11 -13
- {jararaca-0.4.0a5.dist-info → jararaca-0.4.0a19.dist-info}/METADATA +2 -1
- {jararaca-0.4.0a5.dist-info → jararaca-0.4.0a19.dist-info}/RECORD +35 -27
- pyproject.toml +2 -1
- {jararaca-0.4.0a5.dist-info → jararaca-0.4.0a19.dist-info}/LICENSE +0 -0
- {jararaca-0.4.0a5.dist-info → jararaca-0.4.0a19.dist-info}/LICENSES/GPL-3.0-or-later.txt +0 -0
- {jararaca-0.4.0a5.dist-info → jararaca-0.4.0a19.dist-info}/WHEEL +0 -0
- {jararaca-0.4.0a5.dist-info → jararaca-0.4.0a19.dist-info}/entry_points.txt +0 -0
jararaca/messagebus/worker.py
CHANGED
|
@@ -13,15 +13,7 @@ from abc import ABC
|
|
|
13
13
|
from contextlib import asynccontextmanager, suppress
|
|
14
14
|
from dataclasses import dataclass, field
|
|
15
15
|
from datetime import UTC, datetime
|
|
16
|
-
from typing import
|
|
17
|
-
Any,
|
|
18
|
-
AsyncContextManager,
|
|
19
|
-
AsyncGenerator,
|
|
20
|
-
Awaitable,
|
|
21
|
-
Optional,
|
|
22
|
-
Type,
|
|
23
|
-
get_origin,
|
|
24
|
-
)
|
|
16
|
+
from typing import Any, AsyncContextManager, AsyncGenerator, Awaitable, Optional, Type
|
|
25
17
|
from urllib.parse import parse_qs, urlparse
|
|
26
18
|
|
|
27
19
|
import aio_pika
|
|
@@ -35,7 +27,7 @@ from aio_pika.exceptions import (
|
|
|
35
27
|
ChannelNotFoundEntity,
|
|
36
28
|
ConnectionClosed,
|
|
37
29
|
)
|
|
38
|
-
from pydantic import
|
|
30
|
+
from pydantic import ValidationError
|
|
39
31
|
|
|
40
32
|
from jararaca.broker_backend import MessageBrokerBackend
|
|
41
33
|
from jararaca.broker_backend.mapper import get_message_broker_backend_from_url
|
|
@@ -65,9 +57,10 @@ from jararaca.microservice import (
|
|
|
65
57
|
provide_shutdown_state,
|
|
66
58
|
providing_app_type,
|
|
67
59
|
)
|
|
60
|
+
from jararaca.observability.hooks import record_exception, set_span_status
|
|
68
61
|
from jararaca.scheduler.decorators import ScheduledActionData
|
|
69
62
|
from jararaca.utils.rabbitmq_utils import RabbitmqUtils
|
|
70
|
-
from jararaca.utils.retry import
|
|
63
|
+
from jararaca.utils.retry import RetryPolicy, retry_with_backoff
|
|
71
64
|
|
|
72
65
|
logger = logging.getLogger(__name__)
|
|
73
66
|
|
|
@@ -77,16 +70,16 @@ class AioPikaWorkerConfig:
|
|
|
77
70
|
url: str
|
|
78
71
|
exchange: str
|
|
79
72
|
prefetch_count: int
|
|
80
|
-
connection_retry_config:
|
|
81
|
-
default_factory=lambda:
|
|
73
|
+
connection_retry_config: RetryPolicy = field(
|
|
74
|
+
default_factory=lambda: RetryPolicy(
|
|
82
75
|
max_retries=15,
|
|
83
76
|
initial_delay=1.0,
|
|
84
77
|
max_delay=60.0,
|
|
85
78
|
backoff_factor=2.0,
|
|
86
79
|
)
|
|
87
80
|
)
|
|
88
|
-
|
|
89
|
-
default_factory=lambda:
|
|
81
|
+
consumer_retry_policy: RetryPolicy = field(
|
|
82
|
+
default_factory=lambda: RetryPolicy(
|
|
90
83
|
max_retries=15,
|
|
91
84
|
initial_delay=0.5,
|
|
92
85
|
max_delay=40.0,
|
|
@@ -260,7 +253,7 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
|
|
|
260
253
|
# Setup with retry
|
|
261
254
|
await retry_with_backoff(
|
|
262
255
|
setup_consumer,
|
|
263
|
-
|
|
256
|
+
retry_policy=self.config.consumer_retry_policy,
|
|
264
257
|
retry_exceptions=(
|
|
265
258
|
ChannelNotFoundEntity,
|
|
266
259
|
ChannelClosed,
|
|
@@ -321,7 +314,7 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
|
|
|
321
314
|
# Setup with retry
|
|
322
315
|
await retry_with_backoff(
|
|
323
316
|
setup_consumer,
|
|
324
|
-
|
|
317
|
+
retry_policy=self.config.consumer_retry_policy,
|
|
325
318
|
retry_exceptions=(
|
|
326
319
|
ChannelNotFoundEntity,
|
|
327
320
|
ChannelClosed,
|
|
@@ -355,7 +348,7 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
|
|
|
355
348
|
# Verify infrastructure with retry
|
|
356
349
|
infra_check_success = await retry_with_backoff(
|
|
357
350
|
self._verify_infrastructure,
|
|
358
|
-
|
|
351
|
+
retry_policy=self.config.connection_retry_config,
|
|
359
352
|
retry_exceptions=(Exception,),
|
|
360
353
|
)
|
|
361
354
|
|
|
@@ -503,12 +496,12 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
|
|
|
503
496
|
len(pending_tasks),
|
|
504
497
|
", ".join((task.get_name()) for task in pending_tasks),
|
|
505
498
|
)
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
499
|
+
|
|
500
|
+
logger.warning("All in-flight tasks have completed.")
|
|
501
|
+
# Log any exceptions that occurred
|
|
502
|
+
# for result in results:
|
|
503
|
+
# if isinstance(result, Exception):
|
|
504
|
+
# logger.error("Task raised an exception during shutdown: %s", result)
|
|
512
505
|
|
|
513
506
|
async def close_channels_and_connection(self) -> None:
|
|
514
507
|
"""Close all channels and then the connection"""
|
|
@@ -609,7 +602,7 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
|
|
|
609
602
|
# Create a new channel with retry
|
|
610
603
|
channel = await retry_with_backoff(
|
|
611
604
|
fn=lambda: self._establish_channel(queue_name),
|
|
612
|
-
|
|
605
|
+
retry_policy=self.config.consumer_retry_policy,
|
|
613
606
|
retry_exceptions=(
|
|
614
607
|
AMQPConnectionError,
|
|
615
608
|
AMQPChannelError,
|
|
@@ -667,7 +660,7 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
|
|
|
667
660
|
# Create a new connection with retry
|
|
668
661
|
self.connection = await retry_with_backoff(
|
|
669
662
|
self._establish_connection,
|
|
670
|
-
|
|
663
|
+
retry_policy=self.config.connection_retry_config,
|
|
671
664
|
retry_exceptions=(
|
|
672
665
|
AMQPConnectionError,
|
|
673
666
|
ConnectionError,
|
|
@@ -744,7 +737,7 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
|
|
|
744
737
|
logger.warning(
|
|
745
738
|
"Error getting channel for %s, retrying: %s", queue_name, e
|
|
746
739
|
)
|
|
747
|
-
await
|
|
740
|
+
await self._wait_delay_or_shutdown(retry_delay)
|
|
748
741
|
retry_delay *= 2
|
|
749
742
|
else:
|
|
750
743
|
logger.error(
|
|
@@ -755,6 +748,24 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
|
|
|
755
748
|
)
|
|
756
749
|
raise
|
|
757
750
|
|
|
751
|
+
async def _wait_delay_or_shutdown(self, delay: float) -> None:
|
|
752
|
+
"""
|
|
753
|
+
Wait for the specified delay or exit early if shutdown is initiated.
|
|
754
|
+
|
|
755
|
+
Args:
|
|
756
|
+
delay: Delay in seconds to wait
|
|
757
|
+
"""
|
|
758
|
+
|
|
759
|
+
wait_cor = asyncio.create_task(asyncio.sleep(delay), name="delayed-retry-wait")
|
|
760
|
+
wait_shutdown_cor = asyncio.create_task(
|
|
761
|
+
self.shutdown_event.wait(), name="delayed-retry-shutdown-wait"
|
|
762
|
+
)
|
|
763
|
+
|
|
764
|
+
await asyncio.wait(
|
|
765
|
+
[wait_cor, wait_shutdown_cor],
|
|
766
|
+
return_when=asyncio.FIRST_COMPLETED,
|
|
767
|
+
)
|
|
768
|
+
|
|
758
769
|
async def _monitor_connection_health(self) -> None:
|
|
759
770
|
"""
|
|
760
771
|
Monitor connection health and trigger shutdown if connection is lost.
|
|
@@ -762,7 +773,9 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
|
|
|
762
773
|
"""
|
|
763
774
|
while not self.shutdown_event.is_set():
|
|
764
775
|
try:
|
|
765
|
-
await
|
|
776
|
+
await self._wait_delay_or_shutdown(
|
|
777
|
+
self.config.connection_health_check_interval
|
|
778
|
+
)
|
|
766
779
|
|
|
767
780
|
if self.shutdown_event.is_set():
|
|
768
781
|
break
|
|
@@ -780,7 +793,7 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
|
|
|
780
793
|
break
|
|
781
794
|
except Exception as e:
|
|
782
795
|
logger.error("Error in connection health monitoring: %s", e)
|
|
783
|
-
await
|
|
796
|
+
await self._wait_delay_or_shutdown(5) # Wait before retrying
|
|
784
797
|
|
|
785
798
|
async def _is_connection_healthy(self) -> bool:
|
|
786
799
|
"""
|
|
@@ -890,9 +903,9 @@ def create_message_bus(
|
|
|
890
903
|
prefetch_count = int(query_params["prefetch_count"][0])
|
|
891
904
|
|
|
892
905
|
# Parse optional retry configuration parameters
|
|
893
|
-
connection_retry_config =
|
|
894
|
-
consumer_retry_config =
|
|
895
|
-
max_retries=
|
|
906
|
+
connection_retry_config = RetryPolicy()
|
|
907
|
+
consumer_retry_config = RetryPolicy(
|
|
908
|
+
max_retries=5, initial_delay=5, max_delay=60.0, backoff_factor=3.0
|
|
896
909
|
)
|
|
897
910
|
|
|
898
911
|
# Parse heartbeat and health check intervals
|
|
@@ -987,7 +1000,7 @@ def create_message_bus(
|
|
|
987
1000
|
exchange=exchange,
|
|
988
1001
|
prefetch_count=prefetch_count,
|
|
989
1002
|
connection_retry_config=connection_retry_config,
|
|
990
|
-
|
|
1003
|
+
consumer_retry_policy=consumer_retry_config,
|
|
991
1004
|
connection_heartbeat_interval=connection_heartbeat_interval,
|
|
992
1005
|
connection_health_check_interval=connection_health_check_interval,
|
|
993
1006
|
)
|
|
@@ -1049,8 +1062,8 @@ class ScheduledMessageHandlerCallback:
|
|
|
1049
1062
|
try:
|
|
1050
1063
|
if not self.consumer.connection_healthy:
|
|
1051
1064
|
# Still not healthy, requeue the message
|
|
1052
|
-
|
|
1053
|
-
|
|
1065
|
+
|
|
1066
|
+
await aio_pika_message.reject(requeue=True)
|
|
1054
1067
|
return
|
|
1055
1068
|
except Exception as e:
|
|
1056
1069
|
logger.error(
|
|
@@ -1088,8 +1101,8 @@ class ScheduledMessageHandlerCallback:
|
|
|
1088
1101
|
)
|
|
1089
1102
|
try:
|
|
1090
1103
|
# Use channel context for requeuing
|
|
1091
|
-
|
|
1092
|
-
|
|
1104
|
+
|
|
1105
|
+
await aio_pika_message.reject(requeue=True)
|
|
1093
1106
|
return
|
|
1094
1107
|
except RuntimeError:
|
|
1095
1108
|
logger.warning(
|
|
@@ -1106,8 +1119,8 @@ class ScheduledMessageHandlerCallback:
|
|
|
1106
1119
|
self.queue_name,
|
|
1107
1120
|
)
|
|
1108
1121
|
try:
|
|
1109
|
-
|
|
1110
|
-
|
|
1122
|
+
|
|
1123
|
+
await aio_pika_message.reject(requeue=True)
|
|
1111
1124
|
return
|
|
1112
1125
|
except Exception as e:
|
|
1113
1126
|
logger.error(
|
|
@@ -1203,8 +1216,8 @@ class MessageHandlerCallback:
|
|
|
1203
1216
|
)
|
|
1204
1217
|
try:
|
|
1205
1218
|
# Use channel context for requeuing
|
|
1206
|
-
|
|
1207
|
-
|
|
1219
|
+
|
|
1220
|
+
await aio_pika_message.reject(requeue=True)
|
|
1208
1221
|
except RuntimeError:
|
|
1209
1222
|
logger.warning(
|
|
1210
1223
|
"Could not requeue message during shutdown - channel not available"
|
|
@@ -1221,8 +1234,8 @@ class MessageHandlerCallback:
|
|
|
1221
1234
|
try:
|
|
1222
1235
|
if not self.consumer.connection_healthy:
|
|
1223
1236
|
# Still not healthy, requeue the message
|
|
1224
|
-
|
|
1225
|
-
|
|
1237
|
+
|
|
1238
|
+
await aio_pika_message.reject(requeue=True)
|
|
1226
1239
|
return
|
|
1227
1240
|
except Exception as e:
|
|
1228
1241
|
logger.error(
|
|
@@ -1236,18 +1249,22 @@ class MessageHandlerCallback:
|
|
|
1236
1249
|
name=f"MessageHandler-{self.queue_name}-handle-message-{aio_pika_message.message_id}",
|
|
1237
1250
|
)
|
|
1238
1251
|
self.consumer.tasks.add(task)
|
|
1239
|
-
task.add_done_callback(self.handle_message_consume_done)
|
|
1240
1252
|
|
|
1241
|
-
|
|
1242
|
-
|
|
1243
|
-
|
|
1244
|
-
|
|
1245
|
-
|
|
1253
|
+
def handle_message_consume_done(task: asyncio.Task[Any]) -> None:
|
|
1254
|
+
self.consumer.tasks.discard(task)
|
|
1255
|
+
if task.cancelled():
|
|
1256
|
+
logger.warning("Task for queue %s was cancelled", self.queue_name)
|
|
1257
|
+
return
|
|
1246
1258
|
|
|
1247
|
-
|
|
1248
|
-
|
|
1249
|
-
|
|
1250
|
-
|
|
1259
|
+
if (error := task.exception()) is not None:
|
|
1260
|
+
logger.exception(
|
|
1261
|
+
"Error processing message id %s for queue %s",
|
|
1262
|
+
aio_pika_message.message_id,
|
|
1263
|
+
self.queue_name,
|
|
1264
|
+
exc_info=error,
|
|
1265
|
+
)
|
|
1266
|
+
|
|
1267
|
+
task.add_done_callback(handle_message_consume_done)
|
|
1251
1268
|
|
|
1252
1269
|
async def __call__(
|
|
1253
1270
|
self, aio_pika_message: aio_pika.abc.AbstractIncomingMessage
|
|
@@ -1257,6 +1274,8 @@ class MessageHandlerCallback:
|
|
|
1257
1274
|
async def handle_reject_message(
|
|
1258
1275
|
self,
|
|
1259
1276
|
aio_pika_message: aio_pika.abc.AbstractIncomingMessage,
|
|
1277
|
+
*,
|
|
1278
|
+
requeue_timeout: float = 0,
|
|
1260
1279
|
requeue: bool = False,
|
|
1261
1280
|
retry_count: int = 0,
|
|
1262
1281
|
exception: Optional[BaseException] = None,
|
|
@@ -1280,7 +1299,10 @@ class MessageHandlerCallback:
|
|
|
1280
1299
|
and exception is not None
|
|
1281
1300
|
):
|
|
1282
1301
|
# Get retry config from consumer
|
|
1283
|
-
retry_config =
|
|
1302
|
+
retry_config = (
|
|
1303
|
+
self.message_handler.spec.retry_config
|
|
1304
|
+
or self.consumer.config.consumer_retry_policy
|
|
1305
|
+
)
|
|
1284
1306
|
|
|
1285
1307
|
# Check if we reached max retries
|
|
1286
1308
|
if retry_count >= retry_config.max_retries:
|
|
@@ -1293,7 +1315,6 @@ class MessageHandlerCallback:
|
|
|
1293
1315
|
)
|
|
1294
1316
|
# Dead-letter the message after max retries
|
|
1295
1317
|
try:
|
|
1296
|
-
|
|
1297
1318
|
await aio_pika_message.reject(requeue=False)
|
|
1298
1319
|
except Exception as e:
|
|
1299
1320
|
logger.error(
|
|
@@ -1332,12 +1353,13 @@ class MessageHandlerCallback:
|
|
|
1332
1353
|
}
|
|
1333
1354
|
|
|
1334
1355
|
# Schedule retry after delay
|
|
1335
|
-
asyncio.create_task(
|
|
1356
|
+
task = asyncio.create_task(
|
|
1336
1357
|
self._delayed_retry(
|
|
1337
1358
|
aio_pika_message, delay, retry_count + 1, exception
|
|
1338
1359
|
),
|
|
1339
1360
|
name=f"MessageHandler-{self.queue_name}-delayed-retry-{message_id}",
|
|
1340
1361
|
)
|
|
1362
|
+
self.consumer.tasks.add(task)
|
|
1341
1363
|
|
|
1342
1364
|
# Acknowledge the current message since we'll handle retry ourselves
|
|
1343
1365
|
try:
|
|
@@ -1350,20 +1372,22 @@ class MessageHandlerCallback:
|
|
|
1350
1372
|
|
|
1351
1373
|
# Standard reject without retry or with immediate requeue
|
|
1352
1374
|
try:
|
|
1353
|
-
|
|
1354
|
-
|
|
1355
|
-
|
|
1356
|
-
|
|
1357
|
-
|
|
1358
|
-
|
|
1359
|
-
|
|
1360
|
-
|
|
1361
|
-
|
|
1362
|
-
|
|
1363
|
-
|
|
1364
|
-
|
|
1365
|
-
|
|
1366
|
-
|
|
1375
|
+
await self._wait_delay_or_shutdown(
|
|
1376
|
+
requeue_timeout
|
|
1377
|
+
) # Optional delay before requeueing
|
|
1378
|
+
await aio_pika_message.reject(requeue=requeue)
|
|
1379
|
+
if requeue:
|
|
1380
|
+
logger.warning(
|
|
1381
|
+
"Message %s (%s) requeued for immediate retry",
|
|
1382
|
+
message_id,
|
|
1383
|
+
self.queue_name,
|
|
1384
|
+
)
|
|
1385
|
+
else:
|
|
1386
|
+
logger.warning(
|
|
1387
|
+
"Message %s (%s) rejected without requeue",
|
|
1388
|
+
message_id,
|
|
1389
|
+
self.queue_name,
|
|
1390
|
+
)
|
|
1367
1391
|
except Exception as e:
|
|
1368
1392
|
logger.error("Failed to reject message %s: %s", message_id, e)
|
|
1369
1393
|
|
|
@@ -1452,7 +1476,9 @@ class MessageHandlerCallback:
|
|
|
1452
1476
|
attempt + 1,
|
|
1453
1477
|
e,
|
|
1454
1478
|
)
|
|
1455
|
-
await
|
|
1479
|
+
await self._wait_delay_or_shutdown(
|
|
1480
|
+
(1.0 * (attempt + 1))
|
|
1481
|
+
) # Exponential backoff
|
|
1456
1482
|
else:
|
|
1457
1483
|
logger.error(
|
|
1458
1484
|
"Failed to republish message %s after %s attempts: %s",
|
|
@@ -1477,22 +1503,7 @@ class MessageHandlerCallback:
|
|
|
1477
1503
|
pass
|
|
1478
1504
|
|
|
1479
1505
|
async def _wait_delay_or_shutdown(self, delay: float) -> None:
|
|
1480
|
-
|
|
1481
|
-
Wait for the specified delay or exit early if shutdown is initiated.
|
|
1482
|
-
|
|
1483
|
-
Args:
|
|
1484
|
-
delay: Delay in seconds to wait
|
|
1485
|
-
"""
|
|
1486
|
-
|
|
1487
|
-
wait_cor = asyncio.create_task(asyncio.sleep(delay), name="delayed-retry-wait")
|
|
1488
|
-
wait_shutdown_cor = asyncio.create_task(
|
|
1489
|
-
self.consumer.shutdown_event.wait(), name="delayed-retry-shutdown-wait"
|
|
1490
|
-
)
|
|
1491
|
-
|
|
1492
|
-
await asyncio.wait(
|
|
1493
|
-
[wait_cor, wait_shutdown_cor],
|
|
1494
|
-
return_when=asyncio.FIRST_COMPLETED,
|
|
1495
|
-
)
|
|
1506
|
+
await self.consumer._wait_delay_or_shutdown(delay)
|
|
1496
1507
|
|
|
1497
1508
|
async def handle_message(
|
|
1498
1509
|
self, aio_pika_message: aio_pika.abc.AbstractIncomingMessage
|
|
@@ -1508,47 +1519,53 @@ class MessageHandlerCallback:
|
|
|
1508
1519
|
handler_data = self.message_handler
|
|
1509
1520
|
|
|
1510
1521
|
handler = handler_data.instance_callable
|
|
1522
|
+
handler_method = handler_data.controller_member.member_function
|
|
1511
1523
|
|
|
1512
|
-
sig = inspect.signature(handler)
|
|
1524
|
+
# sig = inspect.signature(handler)
|
|
1513
1525
|
|
|
1514
|
-
if len(sig.parameters) != 1:
|
|
1515
|
-
|
|
1516
|
-
|
|
1517
|
-
|
|
1518
|
-
|
|
1519
|
-
|
|
1526
|
+
# if len(sig.parameters) != 1:
|
|
1527
|
+
# logger.warning(
|
|
1528
|
+
# "Handler for topic '%s' must have exactly one parameter which is MessageOf[T extends Message]"
|
|
1529
|
+
# % routing_key
|
|
1530
|
+
# )
|
|
1531
|
+
# return
|
|
1520
1532
|
|
|
1521
|
-
parameter = list(sig.parameters.values())[0]
|
|
1533
|
+
# parameter = list(sig.parameters.values())[0]
|
|
1522
1534
|
|
|
1523
|
-
param_origin = get_origin(parameter.annotation)
|
|
1535
|
+
# param_origin = get_origin(parameter.annotation)
|
|
1524
1536
|
|
|
1525
|
-
if param_origin is not MessageOf:
|
|
1526
|
-
|
|
1527
|
-
|
|
1528
|
-
|
|
1529
|
-
|
|
1530
|
-
|
|
1537
|
+
# if param_origin is not MessageOf:
|
|
1538
|
+
# logger.warning(
|
|
1539
|
+
# "Handler for topic '%s' must have exactly one parameter of type Message"
|
|
1540
|
+
# % routing_key
|
|
1541
|
+
# )
|
|
1542
|
+
# return
|
|
1531
1543
|
|
|
1532
|
-
if len(parameter.annotation.__args__) != 1:
|
|
1533
|
-
|
|
1534
|
-
|
|
1535
|
-
|
|
1536
|
-
|
|
1537
|
-
|
|
1544
|
+
# if len(parameter.annotation.__args__) != 1:
|
|
1545
|
+
# logger.warning(
|
|
1546
|
+
# "Handler for topic '%s' must have exactly one parameter of type Message"
|
|
1547
|
+
# % routing_key
|
|
1548
|
+
# )
|
|
1549
|
+
# return
|
|
1538
1550
|
|
|
1539
|
-
message_type = parameter.annotation.__args__[0]
|
|
1551
|
+
# message_type = parameter.annotation.__args__[0]
|
|
1540
1552
|
|
|
1541
|
-
if not issubclass(message_type, BaseModel):
|
|
1542
|
-
|
|
1543
|
-
|
|
1544
|
-
|
|
1545
|
-
|
|
1546
|
-
|
|
1553
|
+
# if not issubclass(message_type, BaseModel):
|
|
1554
|
+
# logger.warning(
|
|
1555
|
+
# "Handler for topic '%s' must have exactly one parameter of type MessageOf[BaseModel]"
|
|
1556
|
+
# % routing_key
|
|
1557
|
+
# )
|
|
1558
|
+
# return
|
|
1547
1559
|
|
|
1548
|
-
|
|
1560
|
+
mode, message_type = MessageHandler.validate_decorated_fn(handler_method)
|
|
1561
|
+
|
|
1562
|
+
built_message = AioPikaMessage(aio_pika_message, message_type)
|
|
1549
1563
|
|
|
1550
1564
|
incoming_message_spec = MessageHandler.get_last(handler)
|
|
1551
|
-
assert incoming_message_spec is not None
|
|
1565
|
+
assert incoming_message_spec is not None, "Incoming message spec must be set"
|
|
1566
|
+
# Extract retry count from headers if available
|
|
1567
|
+
headers = aio_pika_message.headers or {}
|
|
1568
|
+
retry_count = int(str(headers.get("x-retry-count", 0)))
|
|
1552
1569
|
|
|
1553
1570
|
with provide_implicit_headers(aio_pika_message.headers), provide_shutdown_state(
|
|
1554
1571
|
self.consumer.shutdown_state
|
|
@@ -1557,8 +1574,10 @@ class MessageHandlerCallback:
|
|
|
1557
1574
|
AppTransactionContext(
|
|
1558
1575
|
controller_member_reflect=handler_data.controller_member,
|
|
1559
1576
|
transaction_data=MessageBusTransactionData(
|
|
1577
|
+
message_id=aio_pika_message.message_id,
|
|
1578
|
+
processing_attempt=retry_count + 1,
|
|
1560
1579
|
message_type=message_type,
|
|
1561
|
-
message=
|
|
1580
|
+
message=built_message,
|
|
1562
1581
|
topic=routing_key,
|
|
1563
1582
|
),
|
|
1564
1583
|
)
|
|
@@ -1575,94 +1594,149 @@ class MessageHandlerCallback:
|
|
|
1575
1594
|
with provide_bus_message_controller(
|
|
1576
1595
|
AioPikaMessageBusController(aio_pika_message)
|
|
1577
1596
|
):
|
|
1578
|
-
await handler(builded_message)
|
|
1579
|
-
with suppress(aio_pika.MessageProcessError):
|
|
1580
|
-
# Use channel context for acknowledgement with retry
|
|
1581
|
-
try:
|
|
1582
|
-
await aio_pika_message.ack()
|
|
1583
|
-
except Exception as ack_error:
|
|
1584
|
-
logger.warning(
|
|
1585
|
-
"Failed to acknowledge message %s: %s",
|
|
1586
|
-
aio_pika_message.message_id or "unknown",
|
|
1587
|
-
ack_error,
|
|
1588
|
-
)
|
|
1589
|
-
successfully = True
|
|
1590
|
-
except BaseException as base_exc:
|
|
1591
|
-
successfully = False
|
|
1592
|
-
# Get message id for logging
|
|
1593
|
-
message_id = aio_pika_message.message_id or "unknown"
|
|
1594
|
-
|
|
1595
|
-
# Extract retry count from headers if available
|
|
1596
|
-
headers = aio_pika_message.headers or {}
|
|
1597
|
-
retry_count = int(str(headers.get("x-retry-count", 0)))
|
|
1598
|
-
|
|
1599
|
-
# Process exception handler if configured
|
|
1600
|
-
if incoming_message_spec.exception_handler is not None:
|
|
1601
1597
|
try:
|
|
1602
|
-
|
|
1603
|
-
|
|
1604
|
-
|
|
1605
|
-
|
|
1606
|
-
|
|
1598
|
+
if mode == "WRAPPED":
|
|
1599
|
+
future = handler(built_message)
|
|
1600
|
+
else:
|
|
1601
|
+
try:
|
|
1602
|
+
|
|
1603
|
+
payload = built_message.payload()
|
|
1604
|
+
except ValidationError as exc:
|
|
1605
|
+
logger.exception(
|
|
1606
|
+
"Validation error parsing message %s on topic %s",
|
|
1607
|
+
aio_pika_message.message_id or "unknown",
|
|
1608
|
+
routing_key,
|
|
1609
|
+
)
|
|
1610
|
+
aio_pika_message.headers["x-last-error"] = (
|
|
1611
|
+
"Validation error parsing message payload"
|
|
1612
|
+
)
|
|
1613
|
+
await aio_pika_message.reject(requeue=False)
|
|
1614
|
+
record_exception(
|
|
1615
|
+
exc,
|
|
1616
|
+
)
|
|
1617
|
+
set_span_status("ERROR")
|
|
1618
|
+
return
|
|
1619
|
+
future = handler(payload)
|
|
1620
|
+
|
|
1621
|
+
await future
|
|
1622
|
+
|
|
1623
|
+
with suppress(aio_pika.MessageProcessError):
|
|
1624
|
+
# Use channel context for acknowledgement with retry
|
|
1625
|
+
try:
|
|
1626
|
+
await aio_pika_message.ack()
|
|
1627
|
+
set_span_status("OK")
|
|
1628
|
+
except Exception as ack_error:
|
|
1629
|
+
logger.warning(
|
|
1630
|
+
"Failed to acknowledge message %s: %s",
|
|
1631
|
+
aio_pika_message.message_id or "unknown",
|
|
1632
|
+
ack_error,
|
|
1633
|
+
)
|
|
1634
|
+
successfully = True
|
|
1635
|
+
except Exception as base_exc:
|
|
1636
|
+
set_span_status("ERROR")
|
|
1637
|
+
record_exception(
|
|
1607
1638
|
base_exc,
|
|
1608
|
-
|
|
1639
|
+
{
|
|
1640
|
+
"message_id": aio_pika_message.message_id
|
|
1641
|
+
or "unknown",
|
|
1642
|
+
"routing_key": routing_key,
|
|
1643
|
+
},
|
|
1609
1644
|
)
|
|
1610
|
-
|
|
1611
|
-
|
|
1612
|
-
|
|
1613
|
-
|
|
1614
|
-
|
|
1615
|
-
|
|
1616
|
-
|
|
1645
|
+
successfully = False
|
|
1646
|
+
# Get message id for logging
|
|
1647
|
+
message_id = aio_pika_message.message_id or "unknown"
|
|
1648
|
+
|
|
1649
|
+
# Process exception handler if configured
|
|
1650
|
+
if incoming_message_spec.exception_handler is not None:
|
|
1651
|
+
try:
|
|
1652
|
+
incoming_message_spec.exception_handler(
|
|
1653
|
+
base_exc
|
|
1654
|
+
)
|
|
1655
|
+
except Exception as nested_exc:
|
|
1656
|
+
logger.exception(
|
|
1657
|
+
"Error processing exception handler for message %s: %s | %s",
|
|
1658
|
+
message_id,
|
|
1659
|
+
base_exc,
|
|
1660
|
+
nested_exc,
|
|
1661
|
+
)
|
|
1662
|
+
else:
|
|
1663
|
+
logger.exception(
|
|
1664
|
+
"Error processing message %s on topic %s: %s",
|
|
1665
|
+
message_id,
|
|
1666
|
+
routing_key,
|
|
1667
|
+
str(base_exc),
|
|
1668
|
+
)
|
|
1617
1669
|
|
|
1618
|
-
|
|
1619
|
-
|
|
1620
|
-
|
|
1621
|
-
|
|
1622
|
-
|
|
1623
|
-
|
|
1624
|
-
|
|
1670
|
+
# Handle rejection with retry logic
|
|
1671
|
+
if incoming_message_spec.nack_on_exception:
|
|
1672
|
+
await self.handle_reject_message(
|
|
1673
|
+
aio_pika_message,
|
|
1674
|
+
requeue_timeout=incoming_message_spec.nack_delay_on_exception,
|
|
1675
|
+
requeue=False, # Don't requeue directly, use our backoff mechanism
|
|
1676
|
+
retry_count=retry_count,
|
|
1677
|
+
exception=base_exc,
|
|
1678
|
+
)
|
|
1679
|
+
else:
|
|
1680
|
+
# Message shouldn't be retried, reject it
|
|
1681
|
+
await self.handle_reject_message(
|
|
1682
|
+
aio_pika_message,
|
|
1683
|
+
requeue=False,
|
|
1684
|
+
requeue_timeout=incoming_message_spec.nack_delay_on_exception,
|
|
1685
|
+
exception=base_exc,
|
|
1686
|
+
)
|
|
1687
|
+
|
|
1688
|
+
elapsed_time = time.perf_counter() - start_time
|
|
1689
|
+
# Message processed successfully, log and clean up any retry state
|
|
1690
|
+
message_id = aio_pika_message.message_id or str(
|
|
1691
|
+
uuid.uuid4()
|
|
1625
1692
|
)
|
|
1626
|
-
|
|
1627
|
-
|
|
1628
|
-
|
|
1629
|
-
|
|
1693
|
+
if message_id in self.retry_state:
|
|
1694
|
+
del self.retry_state[message_id]
|
|
1695
|
+
|
|
1696
|
+
# Log success with retry information if applicable
|
|
1697
|
+
headers = aio_pika_message.headers or {}
|
|
1698
|
+
traceparent = headers.get("traceparent")
|
|
1699
|
+
trace_info = (
|
|
1700
|
+
f" [traceparent={str(traceparent)}]"
|
|
1701
|
+
if traceparent
|
|
1702
|
+
else ""
|
|
1630
1703
|
)
|
|
1631
1704
|
|
|
1632
|
-
|
|
1633
|
-
|
|
1634
|
-
|
|
1635
|
-
|
|
1636
|
-
|
|
1637
|
-
|
|
1638
|
-
|
|
1639
|
-
|
|
1640
|
-
|
|
1641
|
-
|
|
1642
|
-
|
|
1643
|
-
|
|
1705
|
+
if "x-retry-count" in headers:
|
|
1706
|
+
retry_count = int(str(headers.get("x-retry-count", 0)))
|
|
1707
|
+
logger.debug(
|
|
1708
|
+
"Message %s#%s processed "
|
|
1709
|
+
+ (
|
|
1710
|
+
"successfully"
|
|
1711
|
+
if successfully
|
|
1712
|
+
else "with errors"
|
|
1713
|
+
)
|
|
1714
|
+
+ " after %s retries in %.4fs%s",
|
|
1715
|
+
message_id,
|
|
1716
|
+
self.queue_name,
|
|
1717
|
+
retry_count,
|
|
1718
|
+
elapsed_time,
|
|
1719
|
+
trace_info,
|
|
1720
|
+
)
|
|
1721
|
+
else:
|
|
1722
|
+
logger.debug(
|
|
1723
|
+
"Message %s#%s processed "
|
|
1724
|
+
+ (
|
|
1725
|
+
"successfully"
|
|
1726
|
+
if successfully
|
|
1727
|
+
else "with errors"
|
|
1728
|
+
)
|
|
1729
|
+
+ " in %.4fs%s",
|
|
1730
|
+
message_id,
|
|
1731
|
+
self.queue_name,
|
|
1732
|
+
elapsed_time,
|
|
1733
|
+
trace_info,
|
|
1734
|
+
)
|
|
1735
|
+
...
|
|
1644
1736
|
|
|
1645
|
-
|
|
1646
|
-
|
|
1647
|
-
|
|
1648
|
-
"Message %s#%s processed "
|
|
1649
|
-
+ ("successfully" if successfully else "with errors")
|
|
1650
|
-
+ " after %s retries in %.4fs%s",
|
|
1651
|
-
message_id,
|
|
1652
|
-
self.queue_name,
|
|
1653
|
-
retry_count,
|
|
1654
|
-
elapsed_time,
|
|
1655
|
-
trace_info,
|
|
1656
|
-
)
|
|
1657
|
-
else:
|
|
1658
|
-
logger.debug(
|
|
1659
|
-
"Message %s#%s processed "
|
|
1660
|
-
+ ("successfully" if successfully else "with errors")
|
|
1661
|
-
+ " in %.4fs%s",
|
|
1662
|
-
message_id,
|
|
1663
|
-
self.queue_name,
|
|
1664
|
-
elapsed_time,
|
|
1665
|
-
trace_info,
|
|
1737
|
+
except Exception as base_exc:
|
|
1738
|
+
logger.critical(
|
|
1739
|
+
f"Critical error processing message {aio_pika_message.message_id} when providing bus message controller: {base_exc}"
|
|
1666
1740
|
)
|
|
1667
1741
|
|
|
1668
1742
|
|
|
@@ -1841,13 +1915,15 @@ class AioPikaMessageBusController(BusMessageController):
|
|
|
1841
1915
|
async def reject(self) -> None:
|
|
1842
1916
|
await self.aio_pika_message.reject()
|
|
1843
1917
|
|
|
1844
|
-
async def retry(self) -> None:
|
|
1918
|
+
async def retry(self, delay: float = 5) -> None:
|
|
1845
1919
|
"""
|
|
1846
1920
|
Retry the message immediately by rejecting with requeue flag.
|
|
1847
1921
|
This doesn't use the exponential backoff mechanism.
|
|
1848
1922
|
"""
|
|
1849
1923
|
callback = self._get_callback()
|
|
1850
|
-
await callback.handle_reject_message(
|
|
1924
|
+
await callback.handle_reject_message(
|
|
1925
|
+
self.aio_pika_message, requeue=True, requeue_timeout=delay
|
|
1926
|
+
)
|
|
1851
1927
|
|
|
1852
1928
|
async def retry_later(self, delay: int) -> None:
|
|
1853
1929
|
"""
|