jararaca 0.3.16__py3-none-any.whl → 0.3.18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of jararaca might be problematic. Click here for more details.

@@ -69,7 +69,7 @@ class MessageHandler(Generic[INHERITS_MESSAGE_CO]):
69
69
  class MessageHandlerData:
70
70
  message_type: type[Any]
71
71
  spec: MessageHandler[Message]
72
- instance_callable: Callable[[MessageOf[Any]], Awaitable[None]]
72
+ instance_callable: Callable[..., Awaitable[None]]
73
73
  controller_member: ControllerMemberReflect
74
74
 
75
75
 
@@ -188,11 +188,8 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
188
188
  # Connection resilience attributes
189
189
  self.connection_healthy = False
190
190
  self.connection_lock = asyncio.Lock()
191
- self.reconnection_event = asyncio.Event()
192
- self.reconnection_in_progress = False
193
191
  self.consumer_tags: dict[str, str] = {} # Track consumer tags for cleanup
194
192
  self.health_check_task: asyncio.Task[Any] | None = None
195
- self.reconnection_task: asyncio.Task[Any] | None = None
196
193
 
197
194
  async def _verify_infrastructure(self) -> bool:
198
195
  """
@@ -229,10 +226,6 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
229
226
  routing_key = f"{handler.message_type.MESSAGE_TOPIC}.#"
230
227
 
231
228
  async def setup_consumer() -> None:
232
- # Wait for connection to be healthy if reconnection is in progress
233
- if self.reconnection_in_progress:
234
- await self.reconnection_event.wait()
235
-
236
229
  # Create a channel using the context manager
237
230
  async with self.create_channel(queue_name) as channel:
238
231
  queue = await RabbitmqUtils.get_queue(
@@ -289,10 +282,6 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
289
282
  routing_key = queue_name
290
283
 
291
284
  async def setup_consumer() -> None:
292
- # Wait for connection to be healthy if reconnection is in progress
293
- if self.reconnection_in_progress:
294
- await self.reconnection_event.wait()
295
-
296
285
  # Create a channel using the context manager
297
286
  async with self.create_channel(queue_name) as channel:
298
287
  queue = await RabbitmqUtils.get_queue(
@@ -341,106 +330,107 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
341
330
  Main consume method that sets up all message handlers and scheduled actions with retry mechanisms.
342
331
  """
343
332
  # Establish initial connection
344
- async with self.connect() as connection:
345
- self.connection_healthy = True
346
-
347
- # Start connection health monitoring
348
- self.health_check_task = asyncio.create_task(
349
- self._monitor_connection_health()
350
- )
333
+ try:
334
+ async with self.connect() as connection:
335
+ self.connection_healthy = True
351
336
 
352
- # Verify infrastructure with retry
353
- infra_check_success = await retry_with_backoff(
354
- self._verify_infrastructure,
355
- retry_config=self.config.connection_retry_config,
356
- retry_exceptions=(Exception,),
357
- )
337
+ # Start connection health monitoring
338
+ self.health_check_task = asyncio.create_task(
339
+ self._monitor_connection_health()
340
+ )
358
341
 
359
- if not infra_check_success:
360
- logger.critical(
361
- "Failed to verify RabbitMQ infrastructure. Shutting down."
342
+ # Verify infrastructure with retry
343
+ infra_check_success = await retry_with_backoff(
344
+ self._verify_infrastructure,
345
+ retry_config=self.config.connection_retry_config,
346
+ retry_exceptions=(Exception,),
362
347
  )
363
- self.shutdown_event.set()
364
- return
365
348
 
366
- async def wait_for(
367
- type: str, name: str, coroutine: Awaitable[bool]
368
- ) -> tuple[str, str, bool]:
369
- return type, name, await coroutine
349
+ if not infra_check_success:
350
+ logger.critical(
351
+ "Failed to verify RabbitMQ infrastructure. Shutting down."
352
+ )
353
+ self.shutdown_event.set()
354
+ return
370
355
 
371
- tasks: set[asyncio.Task[tuple[str, str, bool]]] = set()
356
+ async def wait_for(
357
+ type: str, name: str, coroutine: Awaitable[bool]
358
+ ) -> tuple[str, str, bool]:
359
+ return type, name, await coroutine
372
360
 
373
- # Setup message handlers
374
- for handler in self.message_handler_set:
375
- queue_name = f"{handler.message_type.MESSAGE_TOPIC}.{handler.instance_callable.__module__}.{handler.instance_callable.__qualname__}"
376
- self.incoming_map[queue_name] = handler
361
+ tasks: set[asyncio.Task[tuple[str, str, bool]]] = set()
377
362
 
378
- tasks.add(
379
- task := asyncio.create_task(
380
- wait_for(
381
- "message_handler",
382
- queue_name,
383
- self._setup_message_handler_consumer(handler),
384
- )
385
- )
386
- )
363
+ # Setup message handlers
364
+ for handler in self.message_handler_set:
365
+ queue_name = f"{handler.message_type.MESSAGE_TOPIC}.{handler.instance_callable.__module__}.{handler.instance_callable.__qualname__}"
366
+ self.incoming_map[queue_name] = handler
387
367
 
388
- # Setup scheduled actions
389
- for scheduled_action in self.scheduled_actions:
390
- queue_name = f"{scheduled_action.callable.__module__}.{scheduled_action.callable.__qualname__}"
391
- tasks.add(
392
- task := asyncio.create_task(
393
- wait_for(
394
- "scheduled_action",
395
- queue_name,
396
- self._setup_scheduled_action_consumer(scheduled_action),
368
+ tasks.add(
369
+ task := asyncio.create_task(
370
+ wait_for(
371
+ "message_handler",
372
+ queue_name,
373
+ self._setup_message_handler_consumer(handler),
374
+ )
397
375
  )
398
376
  )
399
- )
400
377
 
401
- async def handle_task_results() -> None:
402
- for task in asyncio.as_completed(tasks):
403
- type, name, success = await task
404
- if success:
405
- logger.info(f"Successfully set up {type} consumer for {name}")
406
- else:
407
- logger.warning(
408
- f"Failed to set up {type} consumer for {name}, will not process messages from this queue"
378
+ # Setup scheduled actions
379
+ for scheduled_action in self.scheduled_actions:
380
+ queue_name = f"{scheduled_action.callable.__module__}.{scheduled_action.callable.__qualname__}"
381
+ tasks.add(
382
+ task := asyncio.create_task(
383
+ wait_for(
384
+ "scheduled_action",
385
+ queue_name,
386
+ self._setup_scheduled_action_consumer(scheduled_action),
387
+ )
409
388
  )
389
+ )
410
390
 
411
- handle_task_results_task = asyncio.create_task(handle_task_results())
412
-
413
- # Wait for shutdown signal
414
- await self.shutdown_event.wait()
415
- logger.info("Shutdown event received, stopping consumers")
391
+ async def handle_task_results() -> None:
392
+ for task in asyncio.as_completed(tasks):
393
+ type, name, success = await task
394
+ if success:
395
+ logger.info(
396
+ f"Successfully set up {type} consumer for {name}"
397
+ )
398
+ else:
399
+ logger.warning(
400
+ f"Failed to set up {type} consumer for {name}, will not process messages from this queue"
401
+ )
416
402
 
417
- # Cancel health monitoring
418
- if self.health_check_task:
419
- self.health_check_task.cancel()
420
- with suppress(asyncio.CancelledError):
421
- await self.health_check_task
403
+ handle_task_results_task = asyncio.create_task(handle_task_results())
422
404
 
423
- # Cancel reconnection task if running
424
- if self.reconnection_task:
425
- self.reconnection_task.cancel()
426
- with suppress(asyncio.CancelledError):
427
- await self.reconnection_task
405
+ # Wait for shutdown signal
406
+ await self.shutdown_event.wait()
407
+ logger.info("Shutdown event received, stopping consumers")
428
408
 
429
- handle_task_results_task.cancel()
430
- with suppress(asyncio.CancelledError):
431
- await handle_task_results_task
432
- for task in tasks:
433
- if not task.done():
434
- task.cancel()
409
+ # Cancel health monitoring
410
+ if self.health_check_task:
411
+ self.health_check_task.cancel()
435
412
  with suppress(asyncio.CancelledError):
436
- await task
437
- logger.info("Worker shutting down")
413
+ await self.health_check_task
438
414
 
439
- # Wait for all tasks to complete
440
- await self.wait_all_tasks_done()
441
-
442
- # Close all channels and the connection
443
- await self.close_channels_and_connection()
415
+ handle_task_results_task.cancel()
416
+ with suppress(asyncio.CancelledError):
417
+ await handle_task_results_task
418
+ for task in tasks:
419
+ if not task.done():
420
+ task.cancel()
421
+ with suppress(asyncio.CancelledError):
422
+ await task
423
+ logger.info("Worker shutting down")
424
+
425
+ # Wait for all tasks to complete
426
+ await self.wait_all_tasks_done()
427
+
428
+ # Close all channels and the connection
429
+ await self.close_channels_and_connection()
430
+ except Exception as e:
431
+ logger.critical(f"Failed to establish initial connection to RabbitMQ: {e}")
432
+ # Re-raise the exception so it can be caught by the caller
433
+ raise
444
434
 
445
435
  async def wait_all_tasks_done(self) -> None:
446
436
  if not self.tasks:
@@ -478,12 +468,6 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
478
468
  with suppress(asyncio.CancelledError):
479
469
  await self.health_check_task
480
470
 
481
- # Cancel reconnection task if running
482
- if self.reconnection_task:
483
- self.reconnection_task.cancel()
484
- with suppress(asyncio.CancelledError):
485
- await self.reconnection_task
486
-
487
471
  await self.wait_all_tasks_done()
488
472
  await self.close_channels_and_connection()
489
473
 
@@ -492,16 +476,6 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
492
476
  Get the channel for a specific queue, or None if not found.
493
477
  This helps with error handling when a channel might have been closed.
494
478
  """
495
- # If reconnection is in progress, wait for it to complete
496
- if self.reconnection_in_progress:
497
- try:
498
- await asyncio.wait_for(self.reconnection_event.wait(), timeout=30.0)
499
- except asyncio.TimeoutError:
500
- logger.warning(
501
- f"Timeout waiting for reconnection when getting channel for {queue_name}"
502
- )
503
- return None
504
-
505
479
  if queue_name not in self.channels:
506
480
  logger.warning(f"No channel found for queue {queue_name}")
507
481
  return None
@@ -530,17 +504,17 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
530
504
  logger.error(
531
505
  f"Failed to recreate channel for {queue_name}: {e}"
532
506
  )
533
- # Trigger reconnection if channel creation fails
507
+ # Trigger shutdown if channel creation fails
534
508
  self._trigger_reconnection()
535
509
  return None
536
510
  else:
537
- # Connection is not healthy, trigger reconnection
511
+ # Connection is not healthy, trigger shutdown
538
512
  self._trigger_reconnection()
539
513
  return None
540
514
  return channel
541
515
  except Exception as e:
542
516
  logger.error(f"Error accessing channel for queue {queue_name}: {e}")
543
- # Trigger reconnection on any channel access error
517
+ # Trigger shutdown on any channel access error
544
518
  self._trigger_reconnection()
545
519
  return None
546
520
 
@@ -691,33 +665,14 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
691
665
  yield new_channel
692
666
  return
693
667
  else:
694
- # Connection is not healthy, wait for reconnection
695
- if self.reconnection_in_progress:
696
- try:
697
- await asyncio.wait_for(
698
- self.reconnection_event.wait(), timeout=30.0
699
- )
700
- # Retry after reconnection
701
- continue
702
- except asyncio.TimeoutError:
703
- logger.warning(
704
- f"Timeout waiting for reconnection for queue {queue_name}"
705
- )
706
-
707
- # Still no connection, trigger reconnection
708
- if not self.reconnection_in_progress:
709
- self._trigger_reconnection()
710
-
711
- if attempt < max_retries - 1:
712
- logger.info(
713
- f"Retrying channel access for {queue_name} in {retry_delay}s"
714
- )
715
- await asyncio.sleep(retry_delay)
716
- retry_delay *= 2
717
- else:
718
- raise RuntimeError(
719
- f"Cannot get channel for queue {queue_name}: no connection available after {max_retries} attempts"
720
- )
668
+ # Connection is not healthy, trigger shutdown
669
+ logger.error(
670
+ f"Connection not healthy while getting channel for {queue_name}, triggering shutdown"
671
+ )
672
+ self._trigger_reconnection()
673
+ raise RuntimeError(
674
+ f"Cannot get channel for queue {queue_name}: connection is not healthy"
675
+ )
721
676
 
722
677
  except Exception as e:
723
678
  if attempt < max_retries - 1:
@@ -734,7 +689,7 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
734
689
 
735
690
  async def _monitor_connection_health(self) -> None:
736
691
  """
737
- Monitor connection health and trigger reconnection if needed.
692
+ Monitor connection health and trigger shutdown if connection is lost.
738
693
  This runs as a background task.
739
694
  """
740
695
  while not self.shutdown_event.is_set():
@@ -746,11 +701,11 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
746
701
 
747
702
  # Check connection health
748
703
  if not await self._is_connection_healthy():
749
- logger.warning(
750
- "Connection health check failed, triggering reconnection"
704
+ logger.error(
705
+ "Connection health check failed, initiating worker shutdown"
751
706
  )
752
- if not self.reconnection_in_progress:
753
- self._trigger_reconnection()
707
+ self.shutdown()
708
+ break
754
709
 
755
710
  except asyncio.CancelledError:
756
711
  logger.info("Connection health monitoring cancelled")
@@ -778,74 +733,12 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
778
733
 
779
734
  def _trigger_reconnection(self) -> None:
780
735
  """
781
- Trigger reconnection process.
736
+ Trigger worker shutdown due to connection loss.
782
737
  """
783
- if not self.reconnection_in_progress and not self.shutdown_event.is_set():
784
- self.reconnection_in_progress = True
738
+ if not self.shutdown_event.is_set():
739
+ logger.error("Connection lost, initiating worker shutdown")
785
740
  self.connection_healthy = False
786
- self.reconnection_event.clear()
787
-
788
- # Start reconnection task
789
- self.reconnection_task = asyncio.create_task(self._handle_reconnection())
790
- self.reconnection_task.add_done_callback(self._on_reconnection_done)
791
-
792
- def _on_reconnection_done(self, task: asyncio.Task[Any]) -> None:
793
- """
794
- Handle completion of reconnection task.
795
- """
796
- self.reconnection_in_progress = False
797
- if task.exception():
798
- logger.error(f"Reconnection task failed: {task.exception()}")
799
- else:
800
- logger.info("Reconnection completed successfully")
801
-
802
- async def _handle_reconnection(self) -> None:
803
- """
804
- Handle the reconnection process with exponential backoff.
805
- """
806
- logger.info("Starting reconnection process")
807
-
808
- # Close existing connection and channels
809
- await self._cleanup_connection()
810
-
811
- reconnection_config = self.config.reconnection_backoff_config
812
- attempt = 0
813
-
814
- while not self.shutdown_event.is_set():
815
- try:
816
- attempt += 1
817
- logger.info(f"Reconnection attempt {attempt}")
818
-
819
- # Establish new connection
820
- self.connection = await self._establish_connection()
821
- self.connection_healthy = True
822
-
823
- # Re-establish all consumers
824
- await self._reestablish_consumers()
825
-
826
- logger.info("Reconnection successful")
827
- self.reconnection_event.set()
828
- return
829
-
830
- except Exception as e:
831
- logger.error(f"Reconnection attempt {attempt} failed: {e}")
832
-
833
- if self.shutdown_event.is_set():
834
- break
835
-
836
- # Calculate backoff delay
837
- delay = reconnection_config.initial_delay * (
838
- reconnection_config.backoff_factor ** (attempt - 1)
839
- )
840
- if reconnection_config.jitter:
841
- jitter_amount = delay * 0.25
842
- delay = delay + random.uniform(-jitter_amount, jitter_amount)
843
- delay = max(delay, 0.1)
844
-
845
- delay = min(delay, reconnection_config.max_delay)
846
-
847
- logger.info(f"Retrying reconnection in {delay:.2f} seconds")
848
- await asyncio.sleep(delay)
741
+ self.shutdown()
849
742
 
850
743
  async def _cleanup_connection(self) -> None:
851
744
  """
@@ -889,32 +782,6 @@ class AioPikaMicroserviceConsumer(MessageBusConsumer):
889
782
  self.connection = None
890
783
  self.connection_healthy = False
891
784
 
892
- async def _reestablish_consumers(self) -> None:
893
- """
894
- Re-establish all consumers after reconnection.
895
- """
896
- logger.info("Re-establishing consumers after reconnection")
897
-
898
- # Re-establish message handlers
899
- for handler in self.message_handler_set:
900
- queue_name = f"{handler.message_type.MESSAGE_TOPIC}.{handler.instance_callable.__module__}.{handler.instance_callable.__qualname__}"
901
- try:
902
- await self._setup_message_handler_consumer(handler)
903
- logger.info(f"Re-established consumer for {queue_name}")
904
- except Exception as e:
905
- logger.error(f"Failed to re-establish consumer for {queue_name}: {e}")
906
-
907
- # Re-establish scheduled actions
908
- for scheduled_action in self.scheduled_actions:
909
- queue_name = f"{scheduled_action.callable.__module__}.{scheduled_action.callable.__qualname__}"
910
- try:
911
- await self._setup_scheduled_action_consumer(scheduled_action)
912
- logger.info(f"Re-established scheduler consumer for {queue_name}")
913
- except Exception as e:
914
- logger.error(
915
- f"Failed to re-establish scheduler consumer for {queue_name}: {e}"
916
- )
917
-
918
785
 
919
786
  def create_message_bus(
920
787
  broker_url: str,
@@ -1798,7 +1665,14 @@ class MessageBusWorker:
1798
1665
  loop.add_signal_handler(signal.SIGINT, on_shutdown, loop)
1799
1666
  # Add graceful shutdown handler for SIGTERM as well
1800
1667
  loop.add_signal_handler(signal.SIGTERM, on_shutdown, loop)
1801
- runner.run(self.start_async())
1668
+ try:
1669
+ runner.run(self.start_async())
1670
+ except Exception as e:
1671
+ logger.critical(f"Worker failed to start due to connection error: {e}")
1672
+ # Exit with error code 1 to indicate startup failure
1673
+ import sys
1674
+
1675
+ sys.exit(1)
1802
1676
 
1803
1677
  async def _graceful_shutdown(self) -> None:
1804
1678
  """Handles graceful shutdown process"""
@@ -59,6 +59,7 @@ class RedisWebSocketConnectionBackend(WebSocketConnectionBackend):
59
59
  consume_broadcast_timeout: int = 1,
60
60
  consume_send_timeout: int = 1,
61
61
  retry_delay: float = 5.0,
62
+ max_concurrent_tasks: int = 1000,
62
63
  ) -> None:
63
64
 
64
65
  self.redis = conn
@@ -67,6 +68,8 @@ class RedisWebSocketConnectionBackend(WebSocketConnectionBackend):
67
68
 
68
69
  self.lock = asyncio.Lock()
69
70
  self.tasks: set[asyncio.Task[Any]] = set()
71
+ self.max_concurrent_tasks = max_concurrent_tasks
72
+ self.task_semaphore = asyncio.Semaphore(max_concurrent_tasks)
70
73
 
71
74
  self.consume_broadcast_timeout = consume_broadcast_timeout
72
75
  self.consume_send_timeout = consume_send_timeout
@@ -101,16 +104,26 @@ class RedisWebSocketConnectionBackend(WebSocketConnectionBackend):
101
104
  return self.__broadcast_func
102
105
 
103
106
  async def broadcast(self, message: bytes) -> None:
104
- await self.redis.publish(
105
- self.broadcast_pubsub_channel,
106
- BroadcastMessage.from_message(message).encode(),
107
- )
107
+ try:
108
+ await self.redis.publish(
109
+ self.broadcast_pubsub_channel,
110
+ BroadcastMessage.from_message(message).encode(),
111
+ )
112
+ except Exception as e:
113
+ logger.error(
114
+ f"Failed to publish broadcast message to Redis: {e}", exc_info=True
115
+ )
116
+ raise
108
117
 
109
118
  async def send(self, rooms: list[str], message: bytes) -> None:
110
- await self.redis.publish(
111
- self.send_pubsub_channel,
112
- SendToRoomsMessage.from_message(rooms, message).encode(),
113
- )
119
+ try:
120
+ await self.redis.publish(
121
+ self.send_pubsub_channel,
122
+ SendToRoomsMessage.from_message(rooms, message).encode(),
123
+ )
124
+ except Exception as e:
125
+ logger.error(f"Failed to publish send message to Redis: {e}", exc_info=True)
126
+ raise
114
127
 
115
128
  def configure(
116
129
  self, broadcast: BroadcastFunc, send: SendFunc, shutdown_event: asyncio.Event
@@ -129,7 +142,12 @@ class RedisWebSocketConnectionBackend(WebSocketConnectionBackend):
129
142
  self.consume_send(self.send_func, self.shutdown_event)
130
143
  )
131
144
 
132
- self.tasks.add(send_task)
145
+ # Use lock when modifying tasks set to prevent race conditions
146
+ async def add_task() -> None:
147
+ async with self.lock:
148
+ self.tasks.add(send_task)
149
+
150
+ asyncio.get_event_loop().create_task(add_task())
133
151
  send_task.add_done_callback(self.handle_send_task_done)
134
152
 
135
153
  def setup_broadcast_consumer(self) -> None:
@@ -138,11 +156,23 @@ class RedisWebSocketConnectionBackend(WebSocketConnectionBackend):
138
156
  self.consume_broadcast(self.broadcast_func, self.shutdown_event)
139
157
  )
140
158
 
141
- self.tasks.add(broadcast_task)
159
+ # Use lock when modifying tasks set to prevent race conditions
160
+ async def add_task() -> None:
161
+ async with self.lock:
162
+ self.tasks.add(broadcast_task)
163
+
164
+ asyncio.get_event_loop().create_task(add_task())
142
165
 
143
166
  broadcast_task.add_done_callback(self.handle_broadcast_task_done)
144
167
 
145
168
  def handle_broadcast_task_done(self, task: asyncio.Task[Any]) -> None:
169
+ # Remove task from set safely with lock
170
+ async def remove_task() -> None:
171
+ async with self.lock:
172
+ self.tasks.discard(task)
173
+
174
+ asyncio.get_event_loop().create_task(remove_task())
175
+
146
176
  if task.cancelled():
147
177
  logger.warning("Broadcast task was cancelled.")
148
178
  elif task.exception() is not None:
@@ -162,6 +192,13 @@ class RedisWebSocketConnectionBackend(WebSocketConnectionBackend):
162
192
  )
163
193
 
164
194
  def handle_send_task_done(self, task: asyncio.Task[Any]) -> None:
195
+ # Remove task from set safely with lock
196
+ async def remove_task() -> None:
197
+ async with self.lock:
198
+ self.tasks.discard(task)
199
+
200
+ asyncio.get_event_loop().create_task(remove_task())
201
+
165
202
  if task.cancelled():
166
203
  logger.warning("Send task was cancelled.")
167
204
  elif task.exception() is not None:
@@ -204,54 +241,132 @@ class RedisWebSocketConnectionBackend(WebSocketConnectionBackend):
204
241
  self, broadcast: BroadcastFunc, shutdown_event: asyncio.Event
205
242
  ) -> None:
206
243
  logger.info("Starting broadcast consumer...")
207
- async with self.redis.pubsub() as pubsub:
208
- await pubsub.subscribe(self.broadcast_pubsub_channel)
209
-
210
- while not shutdown_event.is_set():
211
- message: dict[str, Any] | None = await pubsub.get_message(
212
- ignore_subscribe_messages=True,
213
- timeout=self.consume_broadcast_timeout,
244
+ try:
245
+ # Validate Redis connection before starting
246
+ try:
247
+ await self.redis.ping()
248
+ logger.info("Redis connection validated for broadcast consumer")
249
+ except Exception as e:
250
+ logger.error(f"Redis connection validation failed: {e}", exc_info=True)
251
+ raise
252
+
253
+ async with self.redis.pubsub() as pubsub:
254
+ await pubsub.subscribe(self.broadcast_pubsub_channel)
255
+ logger.info(
256
+ f"Subscribed to broadcast channel: {self.broadcast_pubsub_channel}"
214
257
  )
215
258
 
216
- if message is None:
217
- continue
218
-
219
- broadcast_message = BroadcastMessage.decode(message["data"])
220
-
221
- async with self.lock:
222
- task = asyncio.get_event_loop().create_task(
223
- broadcast(message=broadcast_message.message)
259
+ while not shutdown_event.is_set():
260
+ message: dict[str, Any] | None = await pubsub.get_message(
261
+ ignore_subscribe_messages=True,
262
+ timeout=self.consume_broadcast_timeout,
224
263
  )
225
264
 
226
- self.tasks.add(task)
227
-
228
- task.add_done_callback(self.tasks.discard)
265
+ if message is None:
266
+ continue
267
+
268
+ broadcast_message = BroadcastMessage.decode(message["data"])
269
+
270
+ # Use semaphore for backpressure control
271
+ acquired = False
272
+ try:
273
+ await self.task_semaphore.acquire()
274
+ acquired = True
275
+
276
+ async def broadcast_with_cleanup(msg: bytes) -> None:
277
+ try:
278
+ await broadcast(message=msg)
279
+ finally:
280
+ self.task_semaphore.release()
281
+
282
+ async with self.lock:
283
+ task = asyncio.get_event_loop().create_task(
284
+ broadcast_with_cleanup(broadcast_message.message)
285
+ )
286
+
287
+ self.tasks.add(task)
288
+
289
+ task.add_done_callback(self.tasks.discard)
290
+ except Exception as e:
291
+ # Release semaphore if we acquired it but failed to create task
292
+ if acquired:
293
+ self.task_semaphore.release()
294
+ logger.error(
295
+ f"Error processing broadcast message: {e}", exc_info=True
296
+ )
297
+ # Continue processing other messages
298
+ continue
299
+ except Exception as e:
300
+ logger.error(
301
+ f"Fatal error in broadcast consumer, will retry: {e}", exc_info=True
302
+ )
303
+ raise
229
304
 
230
305
  async def consume_send(self, send: SendFunc, shutdown_event: asyncio.Event) -> None:
231
306
  logger.info("Starting send consumer...")
232
- async with self.redis.pubsub() as pubsub:
233
- await pubsub.subscribe(self.send_pubsub_channel)
234
-
235
- while not shutdown_event.is_set():
236
-
237
- message: dict[str, Any] | None = await pubsub.get_message(
238
- ignore_subscribe_messages=True, timeout=self.consume_send_timeout
239
- )
240
-
241
- if message is None:
242
- continue
243
-
244
- send_message = SendToRoomsMessage.decode(message["data"])
245
-
246
- async with self.lock:
247
-
248
- task = asyncio.get_event_loop().create_task(
249
- send(send_message.rooms, send_message.message)
307
+ try:
308
+ # Validate Redis connection before starting
309
+ try:
310
+ await self.redis.ping()
311
+ logger.info("Redis connection validated for send consumer")
312
+ except Exception as e:
313
+ logger.error(f"Redis connection validation failed: {e}", exc_info=True)
314
+ raise
315
+
316
+ async with self.redis.pubsub() as pubsub:
317
+ await pubsub.subscribe(self.send_pubsub_channel)
318
+ logger.info(f"Subscribed to send channel: {self.send_pubsub_channel}")
319
+
320
+ while not shutdown_event.is_set():
321
+ message: dict[str, Any] | None = await pubsub.get_message(
322
+ ignore_subscribe_messages=True,
323
+ timeout=self.consume_send_timeout,
250
324
  )
251
325
 
252
- self.tasks.add(task)
253
-
254
- task.add_done_callback(self.tasks.discard)
326
+ if message is None:
327
+ continue
328
+
329
+ send_message = SendToRoomsMessage.decode(message["data"])
330
+
331
+ # Use semaphore for backpressure control
332
+ acquired = False
333
+ try:
334
+ await self.task_semaphore.acquire()
335
+ acquired = True
336
+
337
+ async def send_with_cleanup(
338
+ rooms: list[str], msg: bytes
339
+ ) -> None:
340
+ try:
341
+ await send(rooms, msg)
342
+ finally:
343
+ self.task_semaphore.release()
344
+
345
+ async with self.lock:
346
+
347
+ task = asyncio.get_event_loop().create_task(
348
+ send_with_cleanup(
349
+ send_message.rooms, send_message.message
350
+ )
351
+ )
352
+
353
+ self.tasks.add(task)
354
+
355
+ task.add_done_callback(self.tasks.discard)
356
+ except Exception as e:
357
+ # Release semaphore if we acquired it but failed to create task
358
+ if acquired:
359
+ self.task_semaphore.release()
360
+ logger.error(
361
+ f"Error processing send message: {e}", exc_info=True
362
+ )
363
+ # Continue processing other messages
364
+ continue
365
+ except Exception as e:
366
+ logger.error(
367
+ f"Fatal error in send consumer, will retry: {e}", exc_info=True
368
+ )
369
+ raise
255
370
 
256
371
  async def shutdown(self) -> None:
257
372
  async with self.lock:
@@ -85,13 +85,24 @@ class WebSocketConnectionManagerImpl(WebSocketConnectionManager):
85
85
  await self.backend.broadcast(message)
86
86
 
87
87
  async def _broadcast_from_backend(self, message: bytes) -> None:
88
- for websocket in self.all_websockets:
88
+ # Create a copy of the websockets set to avoid modification during iteration
89
+ async with self.lock:
90
+ websockets_to_send = list(self.all_websockets)
91
+
92
+ disconnected_websockets: list[WebSocket] = []
93
+
94
+ for websocket in websockets_to_send:
89
95
  try:
90
96
  if websocket.client_state == WebSocketState.CONNECTED:
91
97
  await websocket.send_bytes(message)
92
98
  except WebSocketDisconnect:
93
- async with self.lock: # TODO: check if this can cause concurrency slowdown issues
94
- self.all_websockets.remove(websocket)
99
+ disconnected_websockets.append(websocket)
100
+
101
+ # Clean up disconnected websockets in a single lock acquisition
102
+ if disconnected_websockets:
103
+ async with self.lock:
104
+ for websocket in disconnected_websockets:
105
+ self.all_websockets.discard(websocket)
95
106
 
96
107
  async def send(self, rooms: list[str], message: WebSocketMessageBase) -> None:
97
108
 
@@ -103,16 +114,28 @@ class WebSocketConnectionManagerImpl(WebSocketConnectionManager):
103
114
  )
104
115
 
105
116
  async def _send_from_backend(self, rooms: list[str], message: bytes) -> None:
117
+ # Create a copy of room memberships to avoid modification during iteration
118
+ async with self.lock:
119
+ room_websockets: dict[str, list[WebSocket]] = {
120
+ room: list(self.rooms.get(room, set())) for room in rooms
121
+ }
122
+
123
+ disconnected_by_room: dict[str, list[WebSocket]] = {room: [] for room in rooms}
124
+
125
+ for room, websockets in room_websockets.items():
126
+ for websocket in websockets:
127
+ try:
128
+ if websocket.client_state == WebSocketState.CONNECTED:
129
+ await websocket.send_bytes(message)
130
+ except WebSocketDisconnect:
131
+ disconnected_by_room[room].append(websocket)
132
+
133
+ # Clean up disconnected websockets in a single lock acquisition
106
134
  async with self.lock:
107
- for room in rooms:
108
- for websocket in self.rooms.get(room, set()):
109
- try:
110
- if websocket.client_state == WebSocketState.CONNECTED:
111
- await websocket.send_bytes(message)
112
- except WebSocketDisconnect:
113
- async with self.lock:
114
- if websocket in self.rooms[room]:
115
- self.rooms[room].remove(websocket)
135
+ for room, disconnected_websockets in disconnected_by_room.items():
136
+ if room in self.rooms:
137
+ for websocket in disconnected_websockets:
138
+ self.rooms[room].discard(websocket)
116
139
 
117
140
  async def join(self, rooms: list[str], websocket: WebSocket) -> None:
118
141
  for room in rooms:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: jararaca
3
- Version: 0.3.16
3
+ Version: 0.3.18
4
4
  Summary: A simple and fast API framework for Python
5
5
  Home-page: https://github.com/LuscasLeo/jararaca
6
6
  Author: Lucas S
@@ -1,6 +1,6 @@
1
1
  LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
2
2
  README.md,sha256=YmCngjU8llW0l7L3tuXkkfr8qH7V9aBMgfp2jEzeiKg,3517
3
- pyproject.toml,sha256=Qn_u7qKJbAetHjXlx0gwVX_gnib76NkIQd6zQMYfkro,2832
3
+ pyproject.toml,sha256=qmLCDQEsY1Me9LPJoJM7-ZQM5qJZKE_uzyOazfgOP4I,2832
4
4
  jararaca/__init__.py,sha256=IMnvfDoyNWTGVittF_wq2Uxtv_BY_wLN5Om6C3vUsCw,22302
5
5
  jararaca/__main__.py,sha256=-O3vsB5lHdqNFjUtoELDF81IYFtR-DSiiFMzRaiSsv4,67
6
6
  jararaca/broker_backend/__init__.py,sha256=GzEIuHR1xzgCJD4FE3harNjoaYzxHMHoEL0_clUaC-k,3528
@@ -17,13 +17,13 @@ jararaca/lifecycle.py,sha256=qKlzLQQioS8QkxNJ_FC_5WbmT77cNbc_S7OcQeOoHkI,1895
17
17
  jararaca/messagebus/__init__.py,sha256=5jAqPqdcEMYBfQyfZDWPnplYdrfMyJLMcacf3qLyUhk,56
18
18
  jararaca/messagebus/bus_message_controller.py,sha256=Xd_qwnX5jUvgBTCarHR36fvtol9lPTsYp2IIGKyQQaE,1487
19
19
  jararaca/messagebus/consumers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
- jararaca/messagebus/decorators.py,sha256=P5z0BBL4hJfgCJHKbsDhSZmlRwzV4PlCPKvEZ4sguXM,6013
20
+ jararaca/messagebus/decorators.py,sha256=71ZvZ5slKruAsTaUHzr2f-D8yaUFl9Xxh9chNLaAs9E,6000
21
21
  jararaca/messagebus/interceptors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
22
  jararaca/messagebus/interceptors/aiopika_publisher_interceptor.py,sha256=_DEHwIH9LYsA26Hu1mo9oHzLZuATgjilU9E3o-ecDjs,6520
23
23
  jararaca/messagebus/interceptors/publisher_interceptor.py,sha256=ojy1bRhqMgrkQljcGGS8cd8-8pUjL8ZHjIUkdmaAnNM,1325
24
24
  jararaca/messagebus/message.py,sha256=U6cyd2XknX8mtm0333slz5fanky2PFLWCmokAO56vvU,819
25
25
  jararaca/messagebus/publisher.py,sha256=JTkxdKbvxvDWT8nK8PVEyyX061vYYbKQMxRHXrZtcEY,2173
26
- jararaca/messagebus/worker.py,sha256=TdWdKHFUc3AAsqhLZI08Y3uc5iqs0GtDHmEhyqYWObI,73968
26
+ jararaca/messagebus/worker.py,sha256=Feh7b3z68sj7a94z0lwMowLgU41pFF2PGgefhdH8rw8,68926
27
27
  jararaca/microservice.py,sha256=OYCw5C4797X_tVnM_9sEz8BdjbICPHSVsCixsA_FwE4,11419
28
28
  jararaca/observability/decorators.py,sha256=MOIr2PttPYYvRwEdfQZEwD5RxKHOTv8UEy9n1YQVoKw,2281
29
29
  jararaca/observability/interceptor.py,sha256=U4ZLM0f8j6Q7gMUKKnA85bnvD-Qa0ii79Qa_X8KsXAQ,1498
@@ -47,9 +47,9 @@ jararaca/presentation/websocket/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm
47
47
  jararaca/presentation/websocket/base_types.py,sha256=AvUeeZ1TFhSiRMcYqZU1HaQNqSrcgTkC5R0ArP5dGmA,146
48
48
  jararaca/presentation/websocket/context.py,sha256=A6K5W3kqo9Hgeh1m6JiI7Cdz5SfbXcaICSVX7u1ARZo,1903
49
49
  jararaca/presentation/websocket/decorators.py,sha256=ZNd5aoA9UkyfHOt1C8D2Ffy2gQUNDEsusVnQuTgExgs,2157
50
- jararaca/presentation/websocket/redis.py,sha256=XG_kfr-msgEdfIZkD5JB_GH5lWoEHXwTzpQNVOONvfc,8985
50
+ jararaca/presentation/websocket/redis.py,sha256=1vykr3mcdSDGpSu1rbb4vGnUZNZEvjRfXlIR7TiSho8,13931
51
51
  jararaca/presentation/websocket/types.py,sha256=M8snAMSdaQlKrwEM2qOgF2qrefo5Meio_oOw620Joc8,308
52
- jararaca/presentation/websocket/websocket_interceptor.py,sha256=JWn_G8Q2WO0-1kmN7-Gv0HkIM6nZ_yjCdGRuXUS8F7A,9191
52
+ jararaca/presentation/websocket/websocket_interceptor.py,sha256=c5q8sBi82jXidK4m9KJo-OXmwb-nKsW-dK1DfRqJnlc,10124
53
53
  jararaca/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
54
54
  jararaca/reflect/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
55
55
  jararaca/reflect/controller_inspect.py,sha256=UtV4pRIOqCoK4ogBTXQE0dyopEQ5LDFhwm-1iJvrkJc,2326
@@ -74,8 +74,8 @@ jararaca/tools/typescript/interface_parser.py,sha256=yOSuOXKOeG0soGFo0fKiZIabu4Y
74
74
  jararaca/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
75
75
  jararaca/utils/rabbitmq_utils.py,sha256=ytdAFUyv-OBkaVnxezuJaJoLrmN7giZgtKeet_IsMBs,10918
76
76
  jararaca/utils/retry.py,sha256=DzPX_fXUvTqej6BQ8Mt2dvLo9nNlTBm7Kx2pFZ26P2Q,4668
77
- jararaca-0.3.16.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
78
- jararaca-0.3.16.dist-info/METADATA,sha256=mIIZae49Gn54udR4m7qk3ZwTjx66iSQpgIp9Abm_Ymo,5149
79
- jararaca-0.3.16.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
80
- jararaca-0.3.16.dist-info/entry_points.txt,sha256=WIh3aIvz8LwUJZIDfs4EeH3VoFyCGEk7cWJurW38q0I,45
81
- jararaca-0.3.16.dist-info/RECORD,,
77
+ jararaca-0.3.18.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
78
+ jararaca-0.3.18.dist-info/METADATA,sha256=5BT5cExvZh792lstVc-YrVbWM51tGXEG1EE6VD-EH4k,5149
79
+ jararaca-0.3.18.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
80
+ jararaca-0.3.18.dist-info/entry_points.txt,sha256=WIh3aIvz8LwUJZIDfs4EeH3VoFyCGEk7cWJurW38q0I,45
81
+ jararaca-0.3.18.dist-info/RECORD,,
pyproject.toml CHANGED
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "jararaca"
3
- version = "0.3.16"
3
+ version = "0.3.18"
4
4
  description = "A simple and fast API framework for Python"
5
5
  authors = ["Lucas S <me@luscasleo.dev>"]
6
6
  readme = "README.md"