jararaca 0.3.16__tar.gz → 0.3.18__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of jararaca might be problematic. Click here for more details.
- {jararaca-0.3.16 → jararaca-0.3.18}/PKG-INFO +1 -1
- {jararaca-0.3.16 → jararaca-0.3.18}/pyproject.toml +1 -1
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/messagebus/decorators.py +1 -1
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/messagebus/worker.py +111 -237
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/presentation/websocket/redis.py +164 -49
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/presentation/websocket/websocket_interceptor.py +35 -12
- {jararaca-0.3.16 → jararaca-0.3.18}/LICENSE +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/README.md +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/docs/CNAME +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/docs/architecture.md +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/docs/assets/_f04774c9-7e05-4da4-8b17-8be23f6a1475.jpeg +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/docs/assets/_f04774c9-7e05-4da4-8b17-8be23f6a1475.webp +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/docs/assets/tracing_example.png +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/docs/expose-type.md +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/docs/http-rpc.md +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/docs/index.md +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/docs/interceptors.md +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/docs/messagebus.md +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/docs/retry.md +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/docs/scheduler.md +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/docs/stylesheets/custom.css +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/docs/websocket.md +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/__init__.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/__main__.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/broker_backend/__init__.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/broker_backend/mapper.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/broker_backend/redis_broker_backend.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/cli.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/common/__init__.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/core/__init__.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/core/providers.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/core/uow.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/di.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/files/entity.py.mako +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/lifecycle.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/messagebus/__init__.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/messagebus/bus_message_controller.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/messagebus/consumers/__init__.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/messagebus/interceptors/__init__.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/messagebus/interceptors/aiopika_publisher_interceptor.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/messagebus/interceptors/publisher_interceptor.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/messagebus/message.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/messagebus/publisher.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/microservice.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/observability/decorators.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/observability/interceptor.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/observability/providers/__init__.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/observability/providers/otel.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/persistence/base.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/persistence/exports.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/persistence/interceptors/__init__.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/persistence/interceptors/aiosqa_interceptor.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/persistence/interceptors/constants.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/persistence/interceptors/decorators.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/persistence/session.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/persistence/sort_filter.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/persistence/utilities.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/presentation/__init__.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/presentation/decorators.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/presentation/hooks.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/presentation/http_microservice.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/presentation/server.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/presentation/websocket/__init__.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/presentation/websocket/base_types.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/presentation/websocket/context.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/presentation/websocket/decorators.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/presentation/websocket/types.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/py.typed +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/reflect/__init__.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/reflect/controller_inspect.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/reflect/metadata.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/rpc/__init__.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/rpc/http/__init__.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/rpc/http/backends/__init__.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/rpc/http/backends/httpx.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/rpc/http/backends/otel.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/rpc/http/decorators.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/rpc/http/httpx.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/scheduler/__init__.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/scheduler/beat_worker.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/scheduler/decorators.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/scheduler/types.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/tools/app_config/__init__.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/tools/app_config/decorators.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/tools/app_config/interceptor.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/tools/typescript/__init__.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/tools/typescript/decorators.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/tools/typescript/interface_parser.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/utils/__init__.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/utils/rabbitmq_utils.py +0 -0
- {jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/utils/retry.py +0 -0
|
@@ -69,7 +69,7 @@ class MessageHandler(Generic[INHERITS_MESSAGE_CO]):
|
|
|
69
69
|
class MessageHandlerData:
|
|
70
70
|
message_type: type[Any]
|
|
71
71
|
spec: MessageHandler[Message]
|
|
72
|
-
instance_callable: Callable[
|
|
72
|
+
instance_callable: Callable[..., Awaitable[None]]
|
|
73
73
|
controller_member: ControllerMemberReflect
|
|
74
74
|
|
|
75
75
|
|
|
@@ -188,11 +188,8 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
|
|
|
188
188
|
# Connection resilience attributes
|
|
189
189
|
self.connection_healthy = False
|
|
190
190
|
self.connection_lock = asyncio.Lock()
|
|
191
|
-
self.reconnection_event = asyncio.Event()
|
|
192
|
-
self.reconnection_in_progress = False
|
|
193
191
|
self.consumer_tags: dict[str, str] = {} # Track consumer tags for cleanup
|
|
194
192
|
self.health_check_task: asyncio.Task[Any] | None = None
|
|
195
|
-
self.reconnection_task: asyncio.Task[Any] | None = None
|
|
196
193
|
|
|
197
194
|
async def _verify_infrastructure(self) -> bool:
|
|
198
195
|
"""
|
|
@@ -229,10 +226,6 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
|
|
|
229
226
|
routing_key = f"{handler.message_type.MESSAGE_TOPIC}.#"
|
|
230
227
|
|
|
231
228
|
async def setup_consumer() -> None:
|
|
232
|
-
# Wait for connection to be healthy if reconnection is in progress
|
|
233
|
-
if self.reconnection_in_progress:
|
|
234
|
-
await self.reconnection_event.wait()
|
|
235
|
-
|
|
236
229
|
# Create a channel using the context manager
|
|
237
230
|
async with self.create_channel(queue_name) as channel:
|
|
238
231
|
queue = await RabbitmqUtils.get_queue(
|
|
@@ -289,10 +282,6 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
|
|
|
289
282
|
routing_key = queue_name
|
|
290
283
|
|
|
291
284
|
async def setup_consumer() -> None:
|
|
292
|
-
# Wait for connection to be healthy if reconnection is in progress
|
|
293
|
-
if self.reconnection_in_progress:
|
|
294
|
-
await self.reconnection_event.wait()
|
|
295
|
-
|
|
296
285
|
# Create a channel using the context manager
|
|
297
286
|
async with self.create_channel(queue_name) as channel:
|
|
298
287
|
queue = await RabbitmqUtils.get_queue(
|
|
@@ -341,106 +330,107 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
|
|
|
341
330
|
Main consume method that sets up all message handlers and scheduled actions with retry mechanisms.
|
|
342
331
|
"""
|
|
343
332
|
# Establish initial connection
|
|
344
|
-
|
|
345
|
-
self.
|
|
346
|
-
|
|
347
|
-
# Start connection health monitoring
|
|
348
|
-
self.health_check_task = asyncio.create_task(
|
|
349
|
-
self._monitor_connection_health()
|
|
350
|
-
)
|
|
333
|
+
try:
|
|
334
|
+
async with self.connect() as connection:
|
|
335
|
+
self.connection_healthy = True
|
|
351
336
|
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
retry_exceptions=(Exception,),
|
|
357
|
-
)
|
|
337
|
+
# Start connection health monitoring
|
|
338
|
+
self.health_check_task = asyncio.create_task(
|
|
339
|
+
self._monitor_connection_health()
|
|
340
|
+
)
|
|
358
341
|
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
342
|
+
# Verify infrastructure with retry
|
|
343
|
+
infra_check_success = await retry_with_backoff(
|
|
344
|
+
self._verify_infrastructure,
|
|
345
|
+
retry_config=self.config.connection_retry_config,
|
|
346
|
+
retry_exceptions=(Exception,),
|
|
362
347
|
)
|
|
363
|
-
self.shutdown_event.set()
|
|
364
|
-
return
|
|
365
348
|
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
349
|
+
if not infra_check_success:
|
|
350
|
+
logger.critical(
|
|
351
|
+
"Failed to verify RabbitMQ infrastructure. Shutting down."
|
|
352
|
+
)
|
|
353
|
+
self.shutdown_event.set()
|
|
354
|
+
return
|
|
370
355
|
|
|
371
|
-
|
|
356
|
+
async def wait_for(
|
|
357
|
+
type: str, name: str, coroutine: Awaitable[bool]
|
|
358
|
+
) -> tuple[str, str, bool]:
|
|
359
|
+
return type, name, await coroutine
|
|
372
360
|
|
|
373
|
-
|
|
374
|
-
for handler in self.message_handler_set:
|
|
375
|
-
queue_name = f"{handler.message_type.MESSAGE_TOPIC}.{handler.instance_callable.__module__}.{handler.instance_callable.__qualname__}"
|
|
376
|
-
self.incoming_map[queue_name] = handler
|
|
361
|
+
tasks: set[asyncio.Task[tuple[str, str, bool]]] = set()
|
|
377
362
|
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
queue_name,
|
|
383
|
-
self._setup_message_handler_consumer(handler),
|
|
384
|
-
)
|
|
385
|
-
)
|
|
386
|
-
)
|
|
363
|
+
# Setup message handlers
|
|
364
|
+
for handler in self.message_handler_set:
|
|
365
|
+
queue_name = f"{handler.message_type.MESSAGE_TOPIC}.{handler.instance_callable.__module__}.{handler.instance_callable.__qualname__}"
|
|
366
|
+
self.incoming_map[queue_name] = handler
|
|
387
367
|
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
queue_name,
|
|
396
|
-
self._setup_scheduled_action_consumer(scheduled_action),
|
|
368
|
+
tasks.add(
|
|
369
|
+
task := asyncio.create_task(
|
|
370
|
+
wait_for(
|
|
371
|
+
"message_handler",
|
|
372
|
+
queue_name,
|
|
373
|
+
self._setup_message_handler_consumer(handler),
|
|
374
|
+
)
|
|
397
375
|
)
|
|
398
376
|
)
|
|
399
|
-
)
|
|
400
377
|
|
|
401
|
-
|
|
402
|
-
for
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
378
|
+
# Setup scheduled actions
|
|
379
|
+
for scheduled_action in self.scheduled_actions:
|
|
380
|
+
queue_name = f"{scheduled_action.callable.__module__}.{scheduled_action.callable.__qualname__}"
|
|
381
|
+
tasks.add(
|
|
382
|
+
task := asyncio.create_task(
|
|
383
|
+
wait_for(
|
|
384
|
+
"scheduled_action",
|
|
385
|
+
queue_name,
|
|
386
|
+
self._setup_scheduled_action_consumer(scheduled_action),
|
|
387
|
+
)
|
|
409
388
|
)
|
|
389
|
+
)
|
|
410
390
|
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
391
|
+
async def handle_task_results() -> None:
|
|
392
|
+
for task in asyncio.as_completed(tasks):
|
|
393
|
+
type, name, success = await task
|
|
394
|
+
if success:
|
|
395
|
+
logger.info(
|
|
396
|
+
f"Successfully set up {type} consumer for {name}"
|
|
397
|
+
)
|
|
398
|
+
else:
|
|
399
|
+
logger.warning(
|
|
400
|
+
f"Failed to set up {type} consumer for {name}, will not process messages from this queue"
|
|
401
|
+
)
|
|
416
402
|
|
|
417
|
-
|
|
418
|
-
if self.health_check_task:
|
|
419
|
-
self.health_check_task.cancel()
|
|
420
|
-
with suppress(asyncio.CancelledError):
|
|
421
|
-
await self.health_check_task
|
|
403
|
+
handle_task_results_task = asyncio.create_task(handle_task_results())
|
|
422
404
|
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
with suppress(asyncio.CancelledError):
|
|
427
|
-
await self.reconnection_task
|
|
405
|
+
# Wait for shutdown signal
|
|
406
|
+
await self.shutdown_event.wait()
|
|
407
|
+
logger.info("Shutdown event received, stopping consumers")
|
|
428
408
|
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
for task in tasks:
|
|
433
|
-
if not task.done():
|
|
434
|
-
task.cancel()
|
|
409
|
+
# Cancel health monitoring
|
|
410
|
+
if self.health_check_task:
|
|
411
|
+
self.health_check_task.cancel()
|
|
435
412
|
with suppress(asyncio.CancelledError):
|
|
436
|
-
await
|
|
437
|
-
logger.info("Worker shutting down")
|
|
413
|
+
await self.health_check_task
|
|
438
414
|
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
415
|
+
handle_task_results_task.cancel()
|
|
416
|
+
with suppress(asyncio.CancelledError):
|
|
417
|
+
await handle_task_results_task
|
|
418
|
+
for task in tasks:
|
|
419
|
+
if not task.done():
|
|
420
|
+
task.cancel()
|
|
421
|
+
with suppress(asyncio.CancelledError):
|
|
422
|
+
await task
|
|
423
|
+
logger.info("Worker shutting down")
|
|
424
|
+
|
|
425
|
+
# Wait for all tasks to complete
|
|
426
|
+
await self.wait_all_tasks_done()
|
|
427
|
+
|
|
428
|
+
# Close all channels and the connection
|
|
429
|
+
await self.close_channels_and_connection()
|
|
430
|
+
except Exception as e:
|
|
431
|
+
logger.critical(f"Failed to establish initial connection to RabbitMQ: {e}")
|
|
432
|
+
# Re-raise the exception so it can be caught by the caller
|
|
433
|
+
raise
|
|
444
434
|
|
|
445
435
|
async def wait_all_tasks_done(self) -> None:
|
|
446
436
|
if not self.tasks:
|
|
@@ -478,12 +468,6 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
|
|
|
478
468
|
with suppress(asyncio.CancelledError):
|
|
479
469
|
await self.health_check_task
|
|
480
470
|
|
|
481
|
-
# Cancel reconnection task if running
|
|
482
|
-
if self.reconnection_task:
|
|
483
|
-
self.reconnection_task.cancel()
|
|
484
|
-
with suppress(asyncio.CancelledError):
|
|
485
|
-
await self.reconnection_task
|
|
486
|
-
|
|
487
471
|
await self.wait_all_tasks_done()
|
|
488
472
|
await self.close_channels_and_connection()
|
|
489
473
|
|
|
@@ -492,16 +476,6 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
|
|
|
492
476
|
Get the channel for a specific queue, or None if not found.
|
|
493
477
|
This helps with error handling when a channel might have been closed.
|
|
494
478
|
"""
|
|
495
|
-
# If reconnection is in progress, wait for it to complete
|
|
496
|
-
if self.reconnection_in_progress:
|
|
497
|
-
try:
|
|
498
|
-
await asyncio.wait_for(self.reconnection_event.wait(), timeout=30.0)
|
|
499
|
-
except asyncio.TimeoutError:
|
|
500
|
-
logger.warning(
|
|
501
|
-
f"Timeout waiting for reconnection when getting channel for {queue_name}"
|
|
502
|
-
)
|
|
503
|
-
return None
|
|
504
|
-
|
|
505
479
|
if queue_name not in self.channels:
|
|
506
480
|
logger.warning(f"No channel found for queue {queue_name}")
|
|
507
481
|
return None
|
|
@@ -530,17 +504,17 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
|
|
|
530
504
|
logger.error(
|
|
531
505
|
f"Failed to recreate channel for {queue_name}: {e}"
|
|
532
506
|
)
|
|
533
|
-
# Trigger
|
|
507
|
+
# Trigger shutdown if channel creation fails
|
|
534
508
|
self._trigger_reconnection()
|
|
535
509
|
return None
|
|
536
510
|
else:
|
|
537
|
-
# Connection is not healthy, trigger
|
|
511
|
+
# Connection is not healthy, trigger shutdown
|
|
538
512
|
self._trigger_reconnection()
|
|
539
513
|
return None
|
|
540
514
|
return channel
|
|
541
515
|
except Exception as e:
|
|
542
516
|
logger.error(f"Error accessing channel for queue {queue_name}: {e}")
|
|
543
|
-
# Trigger
|
|
517
|
+
# Trigger shutdown on any channel access error
|
|
544
518
|
self._trigger_reconnection()
|
|
545
519
|
return None
|
|
546
520
|
|
|
@@ -691,33 +665,14 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
|
|
|
691
665
|
yield new_channel
|
|
692
666
|
return
|
|
693
667
|
else:
|
|
694
|
-
# Connection is not healthy,
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
except asyncio.TimeoutError:
|
|
703
|
-
logger.warning(
|
|
704
|
-
f"Timeout waiting for reconnection for queue {queue_name}"
|
|
705
|
-
)
|
|
706
|
-
|
|
707
|
-
# Still no connection, trigger reconnection
|
|
708
|
-
if not self.reconnection_in_progress:
|
|
709
|
-
self._trigger_reconnection()
|
|
710
|
-
|
|
711
|
-
if attempt < max_retries - 1:
|
|
712
|
-
logger.info(
|
|
713
|
-
f"Retrying channel access for {queue_name} in {retry_delay}s"
|
|
714
|
-
)
|
|
715
|
-
await asyncio.sleep(retry_delay)
|
|
716
|
-
retry_delay *= 2
|
|
717
|
-
else:
|
|
718
|
-
raise RuntimeError(
|
|
719
|
-
f"Cannot get channel for queue {queue_name}: no connection available after {max_retries} attempts"
|
|
720
|
-
)
|
|
668
|
+
# Connection is not healthy, trigger shutdown
|
|
669
|
+
logger.error(
|
|
670
|
+
f"Connection not healthy while getting channel for {queue_name}, triggering shutdown"
|
|
671
|
+
)
|
|
672
|
+
self._trigger_reconnection()
|
|
673
|
+
raise RuntimeError(
|
|
674
|
+
f"Cannot get channel for queue {queue_name}: connection is not healthy"
|
|
675
|
+
)
|
|
721
676
|
|
|
722
677
|
except Exception as e:
|
|
723
678
|
if attempt < max_retries - 1:
|
|
@@ -734,7 +689,7 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
|
|
|
734
689
|
|
|
735
690
|
async def _monitor_connection_health(self) -> None:
|
|
736
691
|
"""
|
|
737
|
-
Monitor connection health and trigger
|
|
692
|
+
Monitor connection health and trigger shutdown if connection is lost.
|
|
738
693
|
This runs as a background task.
|
|
739
694
|
"""
|
|
740
695
|
while not self.shutdown_event.is_set():
|
|
@@ -746,11 +701,11 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
|
|
|
746
701
|
|
|
747
702
|
# Check connection health
|
|
748
703
|
if not await self._is_connection_healthy():
|
|
749
|
-
logger.
|
|
750
|
-
"Connection health check failed,
|
|
704
|
+
logger.error(
|
|
705
|
+
"Connection health check failed, initiating worker shutdown"
|
|
751
706
|
)
|
|
752
|
-
|
|
753
|
-
|
|
707
|
+
self.shutdown()
|
|
708
|
+
break
|
|
754
709
|
|
|
755
710
|
except asyncio.CancelledError:
|
|
756
711
|
logger.info("Connection health monitoring cancelled")
|
|
@@ -778,74 +733,12 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
|
|
|
778
733
|
|
|
779
734
|
def _trigger_reconnection(self) -> None:
|
|
780
735
|
"""
|
|
781
|
-
Trigger
|
|
736
|
+
Trigger worker shutdown due to connection loss.
|
|
782
737
|
"""
|
|
783
|
-
if not self.
|
|
784
|
-
|
|
738
|
+
if not self.shutdown_event.is_set():
|
|
739
|
+
logger.error("Connection lost, initiating worker shutdown")
|
|
785
740
|
self.connection_healthy = False
|
|
786
|
-
self.
|
|
787
|
-
|
|
788
|
-
# Start reconnection task
|
|
789
|
-
self.reconnection_task = asyncio.create_task(self._handle_reconnection())
|
|
790
|
-
self.reconnection_task.add_done_callback(self._on_reconnection_done)
|
|
791
|
-
|
|
792
|
-
def _on_reconnection_done(self, task: asyncio.Task[Any]) -> None:
|
|
793
|
-
"""
|
|
794
|
-
Handle completion of reconnection task.
|
|
795
|
-
"""
|
|
796
|
-
self.reconnection_in_progress = False
|
|
797
|
-
if task.exception():
|
|
798
|
-
logger.error(f"Reconnection task failed: {task.exception()}")
|
|
799
|
-
else:
|
|
800
|
-
logger.info("Reconnection completed successfully")
|
|
801
|
-
|
|
802
|
-
async def _handle_reconnection(self) -> None:
|
|
803
|
-
"""
|
|
804
|
-
Handle the reconnection process with exponential backoff.
|
|
805
|
-
"""
|
|
806
|
-
logger.info("Starting reconnection process")
|
|
807
|
-
|
|
808
|
-
# Close existing connection and channels
|
|
809
|
-
await self._cleanup_connection()
|
|
810
|
-
|
|
811
|
-
reconnection_config = self.config.reconnection_backoff_config
|
|
812
|
-
attempt = 0
|
|
813
|
-
|
|
814
|
-
while not self.shutdown_event.is_set():
|
|
815
|
-
try:
|
|
816
|
-
attempt += 1
|
|
817
|
-
logger.info(f"Reconnection attempt {attempt}")
|
|
818
|
-
|
|
819
|
-
# Establish new connection
|
|
820
|
-
self.connection = await self._establish_connection()
|
|
821
|
-
self.connection_healthy = True
|
|
822
|
-
|
|
823
|
-
# Re-establish all consumers
|
|
824
|
-
await self._reestablish_consumers()
|
|
825
|
-
|
|
826
|
-
logger.info("Reconnection successful")
|
|
827
|
-
self.reconnection_event.set()
|
|
828
|
-
return
|
|
829
|
-
|
|
830
|
-
except Exception as e:
|
|
831
|
-
logger.error(f"Reconnection attempt {attempt} failed: {e}")
|
|
832
|
-
|
|
833
|
-
if self.shutdown_event.is_set():
|
|
834
|
-
break
|
|
835
|
-
|
|
836
|
-
# Calculate backoff delay
|
|
837
|
-
delay = reconnection_config.initial_delay * (
|
|
838
|
-
reconnection_config.backoff_factor ** (attempt - 1)
|
|
839
|
-
)
|
|
840
|
-
if reconnection_config.jitter:
|
|
841
|
-
jitter_amount = delay * 0.25
|
|
842
|
-
delay = delay + random.uniform(-jitter_amount, jitter_amount)
|
|
843
|
-
delay = max(delay, 0.1)
|
|
844
|
-
|
|
845
|
-
delay = min(delay, reconnection_config.max_delay)
|
|
846
|
-
|
|
847
|
-
logger.info(f"Retrying reconnection in {delay:.2f} seconds")
|
|
848
|
-
await asyncio.sleep(delay)
|
|
741
|
+
self.shutdown()
|
|
849
742
|
|
|
850
743
|
async def _cleanup_connection(self) -> None:
|
|
851
744
|
"""
|
|
@@ -889,32 +782,6 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
|
|
|
889
782
|
self.connection = None
|
|
890
783
|
self.connection_healthy = False
|
|
891
784
|
|
|
892
|
-
async def _reestablish_consumers(self) -> None:
|
|
893
|
-
"""
|
|
894
|
-
Re-establish all consumers after reconnection.
|
|
895
|
-
"""
|
|
896
|
-
logger.info("Re-establishing consumers after reconnection")
|
|
897
|
-
|
|
898
|
-
# Re-establish message handlers
|
|
899
|
-
for handler in self.message_handler_set:
|
|
900
|
-
queue_name = f"{handler.message_type.MESSAGE_TOPIC}.{handler.instance_callable.__module__}.{handler.instance_callable.__qualname__}"
|
|
901
|
-
try:
|
|
902
|
-
await self._setup_message_handler_consumer(handler)
|
|
903
|
-
logger.info(f"Re-established consumer for {queue_name}")
|
|
904
|
-
except Exception as e:
|
|
905
|
-
logger.error(f"Failed to re-establish consumer for {queue_name}: {e}")
|
|
906
|
-
|
|
907
|
-
# Re-establish scheduled actions
|
|
908
|
-
for scheduled_action in self.scheduled_actions:
|
|
909
|
-
queue_name = f"{scheduled_action.callable.__module__}.{scheduled_action.callable.__qualname__}"
|
|
910
|
-
try:
|
|
911
|
-
await self._setup_scheduled_action_consumer(scheduled_action)
|
|
912
|
-
logger.info(f"Re-established scheduler consumer for {queue_name}")
|
|
913
|
-
except Exception as e:
|
|
914
|
-
logger.error(
|
|
915
|
-
f"Failed to re-establish scheduler consumer for {queue_name}: {e}"
|
|
916
|
-
)
|
|
917
|
-
|
|
918
785
|
|
|
919
786
|
def create_message_bus(
|
|
920
787
|
broker_url: str,
|
|
@@ -1798,7 +1665,14 @@ class MessageBusWorker:
|
|
|
1798
1665
|
loop.add_signal_handler(signal.SIGINT, on_shutdown, loop)
|
|
1799
1666
|
# Add graceful shutdown handler for SIGTERM as well
|
|
1800
1667
|
loop.add_signal_handler(signal.SIGTERM, on_shutdown, loop)
|
|
1801
|
-
|
|
1668
|
+
try:
|
|
1669
|
+
runner.run(self.start_async())
|
|
1670
|
+
except Exception as e:
|
|
1671
|
+
logger.critical(f"Worker failed to start due to connection error: {e}")
|
|
1672
|
+
# Exit with error code 1 to indicate startup failure
|
|
1673
|
+
import sys
|
|
1674
|
+
|
|
1675
|
+
sys.exit(1)
|
|
1802
1676
|
|
|
1803
1677
|
async def _graceful_shutdown(self) -> None:
|
|
1804
1678
|
"""Handles graceful shutdown process"""
|
|
@@ -59,6 +59,7 @@ class RedisWebSocketConnectionBackend(WebSocketConnectionBackend):
|
|
|
59
59
|
consume_broadcast_timeout: int = 1,
|
|
60
60
|
consume_send_timeout: int = 1,
|
|
61
61
|
retry_delay: float = 5.0,
|
|
62
|
+
max_concurrent_tasks: int = 1000,
|
|
62
63
|
) -> None:
|
|
63
64
|
|
|
64
65
|
self.redis = conn
|
|
@@ -67,6 +68,8 @@ class RedisWebSocketConnectionBackend(WebSocketConnectionBackend):
|
|
|
67
68
|
|
|
68
69
|
self.lock = asyncio.Lock()
|
|
69
70
|
self.tasks: set[asyncio.Task[Any]] = set()
|
|
71
|
+
self.max_concurrent_tasks = max_concurrent_tasks
|
|
72
|
+
self.task_semaphore = asyncio.Semaphore(max_concurrent_tasks)
|
|
70
73
|
|
|
71
74
|
self.consume_broadcast_timeout = consume_broadcast_timeout
|
|
72
75
|
self.consume_send_timeout = consume_send_timeout
|
|
@@ -101,16 +104,26 @@ class RedisWebSocketConnectionBackend(WebSocketConnectionBackend):
|
|
|
101
104
|
return self.__broadcast_func
|
|
102
105
|
|
|
103
106
|
async def broadcast(self, message: bytes) -> None:
|
|
104
|
-
|
|
105
|
-
self.
|
|
106
|
-
|
|
107
|
-
|
|
107
|
+
try:
|
|
108
|
+
await self.redis.publish(
|
|
109
|
+
self.broadcast_pubsub_channel,
|
|
110
|
+
BroadcastMessage.from_message(message).encode(),
|
|
111
|
+
)
|
|
112
|
+
except Exception as e:
|
|
113
|
+
logger.error(
|
|
114
|
+
f"Failed to publish broadcast message to Redis: {e}", exc_info=True
|
|
115
|
+
)
|
|
116
|
+
raise
|
|
108
117
|
|
|
109
118
|
async def send(self, rooms: list[str], message: bytes) -> None:
|
|
110
|
-
|
|
111
|
-
self.
|
|
112
|
-
|
|
113
|
-
|
|
119
|
+
try:
|
|
120
|
+
await self.redis.publish(
|
|
121
|
+
self.send_pubsub_channel,
|
|
122
|
+
SendToRoomsMessage.from_message(rooms, message).encode(),
|
|
123
|
+
)
|
|
124
|
+
except Exception as e:
|
|
125
|
+
logger.error(f"Failed to publish send message to Redis: {e}", exc_info=True)
|
|
126
|
+
raise
|
|
114
127
|
|
|
115
128
|
def configure(
|
|
116
129
|
self, broadcast: BroadcastFunc, send: SendFunc, shutdown_event: asyncio.Event
|
|
@@ -129,7 +142,12 @@ class RedisWebSocketConnectionBackend(WebSocketConnectionBackend):
|
|
|
129
142
|
self.consume_send(self.send_func, self.shutdown_event)
|
|
130
143
|
)
|
|
131
144
|
|
|
132
|
-
|
|
145
|
+
# Use lock when modifying tasks set to prevent race conditions
|
|
146
|
+
async def add_task() -> None:
|
|
147
|
+
async with self.lock:
|
|
148
|
+
self.tasks.add(send_task)
|
|
149
|
+
|
|
150
|
+
asyncio.get_event_loop().create_task(add_task())
|
|
133
151
|
send_task.add_done_callback(self.handle_send_task_done)
|
|
134
152
|
|
|
135
153
|
def setup_broadcast_consumer(self) -> None:
|
|
@@ -138,11 +156,23 @@ class RedisWebSocketConnectionBackend(WebSocketConnectionBackend):
|
|
|
138
156
|
self.consume_broadcast(self.broadcast_func, self.shutdown_event)
|
|
139
157
|
)
|
|
140
158
|
|
|
141
|
-
|
|
159
|
+
# Use lock when modifying tasks set to prevent race conditions
|
|
160
|
+
async def add_task() -> None:
|
|
161
|
+
async with self.lock:
|
|
162
|
+
self.tasks.add(broadcast_task)
|
|
163
|
+
|
|
164
|
+
asyncio.get_event_loop().create_task(add_task())
|
|
142
165
|
|
|
143
166
|
broadcast_task.add_done_callback(self.handle_broadcast_task_done)
|
|
144
167
|
|
|
145
168
|
def handle_broadcast_task_done(self, task: asyncio.Task[Any]) -> None:
|
|
169
|
+
# Remove task from set safely with lock
|
|
170
|
+
async def remove_task() -> None:
|
|
171
|
+
async with self.lock:
|
|
172
|
+
self.tasks.discard(task)
|
|
173
|
+
|
|
174
|
+
asyncio.get_event_loop().create_task(remove_task())
|
|
175
|
+
|
|
146
176
|
if task.cancelled():
|
|
147
177
|
logger.warning("Broadcast task was cancelled.")
|
|
148
178
|
elif task.exception() is not None:
|
|
@@ -162,6 +192,13 @@ class RedisWebSocketConnectionBackend(WebSocketConnectionBackend):
|
|
|
162
192
|
)
|
|
163
193
|
|
|
164
194
|
def handle_send_task_done(self, task: asyncio.Task[Any]) -> None:
|
|
195
|
+
# Remove task from set safely with lock
|
|
196
|
+
async def remove_task() -> None:
|
|
197
|
+
async with self.lock:
|
|
198
|
+
self.tasks.discard(task)
|
|
199
|
+
|
|
200
|
+
asyncio.get_event_loop().create_task(remove_task())
|
|
201
|
+
|
|
165
202
|
if task.cancelled():
|
|
166
203
|
logger.warning("Send task was cancelled.")
|
|
167
204
|
elif task.exception() is not None:
|
|
@@ -204,54 +241,132 @@ class RedisWebSocketConnectionBackend(WebSocketConnectionBackend):
|
|
|
204
241
|
self, broadcast: BroadcastFunc, shutdown_event: asyncio.Event
|
|
205
242
|
) -> None:
|
|
206
243
|
logger.info("Starting broadcast consumer...")
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
244
|
+
try:
|
|
245
|
+
# Validate Redis connection before starting
|
|
246
|
+
try:
|
|
247
|
+
await self.redis.ping()
|
|
248
|
+
logger.info("Redis connection validated for broadcast consumer")
|
|
249
|
+
except Exception as e:
|
|
250
|
+
logger.error(f"Redis connection validation failed: {e}", exc_info=True)
|
|
251
|
+
raise
|
|
252
|
+
|
|
253
|
+
async with self.redis.pubsub() as pubsub:
|
|
254
|
+
await pubsub.subscribe(self.broadcast_pubsub_channel)
|
|
255
|
+
logger.info(
|
|
256
|
+
f"Subscribed to broadcast channel: {self.broadcast_pubsub_channel}"
|
|
214
257
|
)
|
|
215
258
|
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
async with self.lock:
|
|
222
|
-
task = asyncio.get_event_loop().create_task(
|
|
223
|
-
broadcast(message=broadcast_message.message)
|
|
259
|
+
while not shutdown_event.is_set():
|
|
260
|
+
message: dict[str, Any] | None = await pubsub.get_message(
|
|
261
|
+
ignore_subscribe_messages=True,
|
|
262
|
+
timeout=self.consume_broadcast_timeout,
|
|
224
263
|
)
|
|
225
264
|
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
265
|
+
if message is None:
|
|
266
|
+
continue
|
|
267
|
+
|
|
268
|
+
broadcast_message = BroadcastMessage.decode(message["data"])
|
|
269
|
+
|
|
270
|
+
# Use semaphore for backpressure control
|
|
271
|
+
acquired = False
|
|
272
|
+
try:
|
|
273
|
+
await self.task_semaphore.acquire()
|
|
274
|
+
acquired = True
|
|
275
|
+
|
|
276
|
+
async def broadcast_with_cleanup(msg: bytes) -> None:
|
|
277
|
+
try:
|
|
278
|
+
await broadcast(message=msg)
|
|
279
|
+
finally:
|
|
280
|
+
self.task_semaphore.release()
|
|
281
|
+
|
|
282
|
+
async with self.lock:
|
|
283
|
+
task = asyncio.get_event_loop().create_task(
|
|
284
|
+
broadcast_with_cleanup(broadcast_message.message)
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
self.tasks.add(task)
|
|
288
|
+
|
|
289
|
+
task.add_done_callback(self.tasks.discard)
|
|
290
|
+
except Exception as e:
|
|
291
|
+
# Release semaphore if we acquired it but failed to create task
|
|
292
|
+
if acquired:
|
|
293
|
+
self.task_semaphore.release()
|
|
294
|
+
logger.error(
|
|
295
|
+
f"Error processing broadcast message: {e}", exc_info=True
|
|
296
|
+
)
|
|
297
|
+
# Continue processing other messages
|
|
298
|
+
continue
|
|
299
|
+
except Exception as e:
|
|
300
|
+
logger.error(
|
|
301
|
+
f"Fatal error in broadcast consumer, will retry: {e}", exc_info=True
|
|
302
|
+
)
|
|
303
|
+
raise
|
|
229
304
|
|
|
230
305
|
async def consume_send(self, send: SendFunc, shutdown_event: asyncio.Event) -> None:
|
|
231
306
|
logger.info("Starting send consumer...")
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
send(send_message.rooms, send_message.message)
|
|
307
|
+
try:
|
|
308
|
+
# Validate Redis connection before starting
|
|
309
|
+
try:
|
|
310
|
+
await self.redis.ping()
|
|
311
|
+
logger.info("Redis connection validated for send consumer")
|
|
312
|
+
except Exception as e:
|
|
313
|
+
logger.error(f"Redis connection validation failed: {e}", exc_info=True)
|
|
314
|
+
raise
|
|
315
|
+
|
|
316
|
+
async with self.redis.pubsub() as pubsub:
|
|
317
|
+
await pubsub.subscribe(self.send_pubsub_channel)
|
|
318
|
+
logger.info(f"Subscribed to send channel: {self.send_pubsub_channel}")
|
|
319
|
+
|
|
320
|
+
while not shutdown_event.is_set():
|
|
321
|
+
message: dict[str, Any] | None = await pubsub.get_message(
|
|
322
|
+
ignore_subscribe_messages=True,
|
|
323
|
+
timeout=self.consume_send_timeout,
|
|
250
324
|
)
|
|
251
325
|
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
326
|
+
if message is None:
|
|
327
|
+
continue
|
|
328
|
+
|
|
329
|
+
send_message = SendToRoomsMessage.decode(message["data"])
|
|
330
|
+
|
|
331
|
+
# Use semaphore for backpressure control
|
|
332
|
+
acquired = False
|
|
333
|
+
try:
|
|
334
|
+
await self.task_semaphore.acquire()
|
|
335
|
+
acquired = True
|
|
336
|
+
|
|
337
|
+
async def send_with_cleanup(
|
|
338
|
+
rooms: list[str], msg: bytes
|
|
339
|
+
) -> None:
|
|
340
|
+
try:
|
|
341
|
+
await send(rooms, msg)
|
|
342
|
+
finally:
|
|
343
|
+
self.task_semaphore.release()
|
|
344
|
+
|
|
345
|
+
async with self.lock:
|
|
346
|
+
|
|
347
|
+
task = asyncio.get_event_loop().create_task(
|
|
348
|
+
send_with_cleanup(
|
|
349
|
+
send_message.rooms, send_message.message
|
|
350
|
+
)
|
|
351
|
+
)
|
|
352
|
+
|
|
353
|
+
self.tasks.add(task)
|
|
354
|
+
|
|
355
|
+
task.add_done_callback(self.tasks.discard)
|
|
356
|
+
except Exception as e:
|
|
357
|
+
# Release semaphore if we acquired it but failed to create task
|
|
358
|
+
if acquired:
|
|
359
|
+
self.task_semaphore.release()
|
|
360
|
+
logger.error(
|
|
361
|
+
f"Error processing send message: {e}", exc_info=True
|
|
362
|
+
)
|
|
363
|
+
# Continue processing other messages
|
|
364
|
+
continue
|
|
365
|
+
except Exception as e:
|
|
366
|
+
logger.error(
|
|
367
|
+
f"Fatal error in send consumer, will retry: {e}", exc_info=True
|
|
368
|
+
)
|
|
369
|
+
raise
|
|
255
370
|
|
|
256
371
|
async def shutdown(self) -> None:
|
|
257
372
|
async with self.lock:
|
{jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/presentation/websocket/websocket_interceptor.py
RENAMED
|
@@ -85,13 +85,24 @@ class WebSocketConnectionManagerImpl(WebSocketConnectionManager):
|
|
|
85
85
|
await self.backend.broadcast(message)
|
|
86
86
|
|
|
87
87
|
async def _broadcast_from_backend(self, message: bytes) -> None:
|
|
88
|
-
|
|
88
|
+
# Create a copy of the websockets set to avoid modification during iteration
|
|
89
|
+
async with self.lock:
|
|
90
|
+
websockets_to_send = list(self.all_websockets)
|
|
91
|
+
|
|
92
|
+
disconnected_websockets: list[WebSocket] = []
|
|
93
|
+
|
|
94
|
+
for websocket in websockets_to_send:
|
|
89
95
|
try:
|
|
90
96
|
if websocket.client_state == WebSocketState.CONNECTED:
|
|
91
97
|
await websocket.send_bytes(message)
|
|
92
98
|
except WebSocketDisconnect:
|
|
93
|
-
|
|
94
|
-
|
|
99
|
+
disconnected_websockets.append(websocket)
|
|
100
|
+
|
|
101
|
+
# Clean up disconnected websockets in a single lock acquisition
|
|
102
|
+
if disconnected_websockets:
|
|
103
|
+
async with self.lock:
|
|
104
|
+
for websocket in disconnected_websockets:
|
|
105
|
+
self.all_websockets.discard(websocket)
|
|
95
106
|
|
|
96
107
|
async def send(self, rooms: list[str], message: WebSocketMessageBase) -> None:
|
|
97
108
|
|
|
@@ -103,16 +114,28 @@ class WebSocketConnectionManagerImpl(WebSocketConnectionManager):
|
|
|
103
114
|
)
|
|
104
115
|
|
|
105
116
|
async def _send_from_backend(self, rooms: list[str], message: bytes) -> None:
|
|
117
|
+
# Create a copy of room memberships to avoid modification during iteration
|
|
118
|
+
async with self.lock:
|
|
119
|
+
room_websockets: dict[str, list[WebSocket]] = {
|
|
120
|
+
room: list(self.rooms.get(room, set())) for room in rooms
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
disconnected_by_room: dict[str, list[WebSocket]] = {room: [] for room in rooms}
|
|
124
|
+
|
|
125
|
+
for room, websockets in room_websockets.items():
|
|
126
|
+
for websocket in websockets:
|
|
127
|
+
try:
|
|
128
|
+
if websocket.client_state == WebSocketState.CONNECTED:
|
|
129
|
+
await websocket.send_bytes(message)
|
|
130
|
+
except WebSocketDisconnect:
|
|
131
|
+
disconnected_by_room[room].append(websocket)
|
|
132
|
+
|
|
133
|
+
# Clean up disconnected websockets in a single lock acquisition
|
|
106
134
|
async with self.lock:
|
|
107
|
-
for room in
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
await websocket.send_bytes(message)
|
|
112
|
-
except WebSocketDisconnect:
|
|
113
|
-
async with self.lock:
|
|
114
|
-
if websocket in self.rooms[room]:
|
|
115
|
-
self.rooms[room].remove(websocket)
|
|
135
|
+
for room, disconnected_websockets in disconnected_by_room.items():
|
|
136
|
+
if room in self.rooms:
|
|
137
|
+
for websocket in disconnected_websockets:
|
|
138
|
+
self.rooms[room].discard(websocket)
|
|
116
139
|
|
|
117
140
|
async def join(self, rooms: list[str], websocket: WebSocket) -> None:
|
|
118
141
|
for room in rooms:
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/messagebus/interceptors/publisher_interceptor.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{jararaca-0.3.16 → jararaca-0.3.18}/src/jararaca/persistence/interceptors/aiosqa_interceptor.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|