buz 2.14.2__py3-none-any.whl → 2.14.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,3 +1,4 @@
1
+ import json
1
2
  import traceback
2
3
  from abc import abstractmethod
3
4
  from asyncio import Lock, Task, create_task, gather, Semaphore, Event as AsyncIOEvent, sleep
@@ -5,6 +6,7 @@ from datetime import timedelta, datetime
5
6
  from itertools import cycle
6
7
  from logging import Logger
7
8
  from typing import AsyncIterator, Optional, Sequence, Type, TypeVar
9
+ from aiohttp import web
8
10
 
9
11
  from aiokafka import TopicPartition
10
12
  from aiokafka.coordinator.assignors.abstract import AbstractPartitionAssignor
@@ -43,6 +45,7 @@ class BaseBuzAIOKafkaAsyncConsumer(AsyncConsumer):
43
45
  kafka_partition_assignors: tuple[Type[AbstractPartitionAssignor], ...] = (),
44
46
  subscribers: Sequence[MetaSubscriber],
45
47
  logger: Logger,
48
+ health_check_port: Optional[int],
46
49
  consumer_initial_offset_position: ConsumerInitialOffsetPosition,
47
50
  auto_create_topic_configuration: Optional[AutoCreateTopicConfiguration] = None,
48
51
  seconds_between_executions_if_there_are_no_tasks_in_the_queue: int = 1,
@@ -55,6 +58,7 @@ class BaseBuzAIOKafkaAsyncConsumer(AsyncConsumer):
55
58
  self.__kafka_partition_assignors = kafka_partition_assignors
56
59
  self.__subscribers = subscribers
57
60
  self._logger = logger
61
+ self.__health_check_port = health_check_port
58
62
  self.__consumer_initial_offset_position = consumer_initial_offset_position
59
63
  self.__max_records_retrieved_per_poll = 1
60
64
  self.__executor_per_consumer_mapper: dict[AIOKafkaConsumer, KafkaEventSubscriberExecutor] = {}
@@ -78,12 +82,26 @@ class BaseBuzAIOKafkaAsyncConsumer(AsyncConsumer):
78
82
  )
79
83
  self.__seconds_between_polls_if_there_are_no_new_tasks = seconds_between_polls_if_there_are_no_new_tasks
80
84
  self.__polling_tasks_semaphore = Semaphore(max_number_of_concurrent_polling_tasks)
81
-
82
85
  self.__task_execution_mutex = Lock()
83
86
 
87
+ async def configure_health_check_server(self, health_check_port: int) -> web.TCPSite:
88
+ self._logger.info(f"Starting health check server on port {health_check_port}")
89
+ app = web.Application()
90
+ app.router.add_get("/health", lambda request: self.__health_check())
91
+ runner = web.AppRunner(app)
92
+ await runner.setup()
93
+ site = web.TCPSite(runner, "localhost", health_check_port)
94
+ await site.start()
95
+ return site
96
+
84
97
  async def run(self) -> None:
85
98
  start_time = datetime.now()
86
99
  await self.__generate_kafka_consumers()
100
+ health_check_server: Optional[web.TCPSite] = None
101
+
102
+ if self.__health_check_port is not None:
103
+ health_check_server = await self.configure_health_check_server(self.__health_check_port)
104
+
87
105
  self.__initial_coroutines_created_elapsed_time = datetime.now() - start_time
88
106
 
89
107
  if len(self.__executor_per_consumer_mapper) == 0:
@@ -94,6 +112,9 @@ class BaseBuzAIOKafkaAsyncConsumer(AsyncConsumer):
94
112
  worker_errors = await self.__run_worker()
95
113
  self.__events_processed_elapsed_time = datetime.now() - start_consumption_time
96
114
 
115
+ if health_check_server is not None:
116
+ await health_check_server.stop()
117
+
97
118
  await self.__handle_graceful_stop(worker_errors)
98
119
 
99
120
  async def __handle_graceful_stop(self, worker_errors: tuple[Optional[Exception], Optional[Exception]]) -> None:
@@ -270,6 +291,15 @@ class BaseBuzAIOKafkaAsyncConsumer(AsyncConsumer):
270
291
  for kafka_consumer in self.__queue_per_consumer_mapper.keys():
271
292
  await kafka_consumer.stop()
272
293
 
294
+ async def __health_check(self) -> web.Response:
295
+ health_information = {
296
+ "subscribers": [subscriber.fqn() for subscriber in self.__subscribers],
297
+ "number_of_subscribers": len(self.__subscribers),
298
+ "event_processed": self.__events_processed,
299
+ }
300
+
301
+ return web.Response(text=json.dumps(health_information), content_type="application/json")
302
+
273
303
  def __print_statistics(self) -> None:
274
304
  self._logger.info("Number of subscribers: %d", len(self.__subscribers))
275
305
  self._logger.info(f"Start kafka consumers elapsed time: {self.__start_kafka_consumers_elapsed_time}")
@@ -51,6 +51,7 @@ class BuzAIOKafkaAsyncConsumer(BaseBuzAIOKafkaAsyncConsumer):
51
51
  seconds_between_polls_if_there_are_tasks_in_the_queue: int = 1,
52
52
  seconds_between_polls_if_there_are_no_new_tasks: int = 1,
53
53
  max_number_of_concurrent_polling_tasks: int = 20,
54
+ health_check_port: Optional[int] = None,
54
55
  ):
55
56
  super().__init__(
56
57
  connection_config=connection_config,
@@ -67,6 +68,7 @@ class BuzAIOKafkaAsyncConsumer(BaseBuzAIOKafkaAsyncConsumer):
67
68
  seconds_between_polls_if_there_are_tasks_in_the_queue=seconds_between_polls_if_there_are_tasks_in_the_queue,
68
69
  seconds_between_polls_if_there_are_no_new_tasks=seconds_between_polls_if_there_are_no_new_tasks,
69
70
  max_number_of_concurrent_polling_tasks=max_number_of_concurrent_polling_tasks,
71
+ health_check_port=health_check_port,
70
72
  )
71
73
  self.__on_fail_strategy = on_fail_strategy
72
74
  self.__consume_middlewares = consume_middlewares
@@ -52,6 +52,7 @@ class BuzAIOKafkaMultiThreadedConsumer(BaseBuzAIOKafkaAsyncConsumer):
52
52
  seconds_between_polls_if_there_are_tasks_in_the_queue: int = 1,
53
53
  seconds_between_polls_if_there_are_no_new_tasks: int = 1,
54
54
  max_number_of_concurrent_polling_tasks: int = 20,
55
+ health_check_port: Optional[int] = 3123,
55
56
  ):
56
57
  super().__init__(
57
58
  connection_config=connection_config,
@@ -68,6 +69,7 @@ class BuzAIOKafkaMultiThreadedConsumer(BaseBuzAIOKafkaAsyncConsumer):
68
69
  seconds_between_polls_if_there_are_tasks_in_the_queue=seconds_between_polls_if_there_are_tasks_in_the_queue,
69
70
  seconds_between_polls_if_there_are_no_new_tasks=seconds_between_polls_if_there_are_no_new_tasks,
70
71
  max_number_of_concurrent_polling_tasks=max_number_of_concurrent_polling_tasks,
72
+ health_check_port=health_check_port,
71
73
  )
72
74
  self.__on_fail_strategy = on_fail_strategy
73
75
  self.__consume_middlewares = consume_middlewares
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: buz
3
- Version: 2.14.2
3
+ Version: 2.14.3
4
4
  Summary: Buz is a set of light, simple and extensible implementations of event, command and query buses.
5
5
  License: MIT
6
6
  Author: Luis Pintado Lozano
@@ -22,6 +22,7 @@ Classifier: Typing :: Typed
22
22
  Provides-Extra: aiokafka
23
23
  Provides-Extra: kombu
24
24
  Provides-Extra: pypendency
25
+ Requires-Dist: aiohttp (>=3.11.13,<4.0.0)
25
26
  Requires-Dist: aiokafka[lz4] (==0.12.0) ; extra == "aiokafka"
26
27
  Requires-Dist: asgiref (>=3.8.1,<4.0.0) ; extra == "aiokafka"
27
28
  Requires-Dist: asyncio (>=3.4.3,<4.0.0) ; extra == "aiokafka"
@@ -47,9 +47,9 @@ buz/event/exceptions/worker_execution_exception.py,sha256=6mgztvXOCG_9VZ_Jptkk72
47
47
  buz/event/infrastructure/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
48
48
  buz/event/infrastructure/buz_kafka/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
49
49
  buz/event/infrastructure/buz_kafka/async_buz_kafka_event_bus.py,sha256=SyLblUVlwWOaNfZzK7vL6Ee4m-85vZVCH0rjOgqVAww,4913
50
- buz/event/infrastructure/buz_kafka/base_buz_aiokafka_async_consumer.py,sha256=E9Sy6IDZrywowcO9qIOJF5zjFvnE4CncTiZD3VC-554,13793
51
- buz/event/infrastructure/buz_kafka/buz_aiokafka_async_consumer.py,sha256=u69_YYDsztq4cZDKQPo4x8FPIx-NRzHJe1SCq0GaCjg,5732
52
- buz/event/infrastructure/buz_kafka/buz_aiokafka_multi_threaded_consumer.py,sha256=Si7bqWfsKqOjH6PIxnqtOgO_fSviVJbt2G9avv2DgiM,5675
50
+ buz/event/infrastructure/buz_kafka/base_buz_aiokafka_async_consumer.py,sha256=ybdytgOGUYN8ql-7wRk-zLYFK4_prZdNeb5uzKXZY7Q,15084
51
+ buz/event/infrastructure/buz_kafka/buz_aiokafka_async_consumer.py,sha256=bArqX8vE_jUOjfIzo1QGROxsachat-5n2vz013utDFA,5830
52
+ buz/event/infrastructure/buz_kafka/buz_aiokafka_multi_threaded_consumer.py,sha256=S7veewA3hIabW_ILfNoJbWbry1VmUKlhBEo6Ocj7c_k,5773
53
53
  buz/event/infrastructure/buz_kafka/buz_kafka_event_bus.py,sha256=ymRSvcYVgbVCPgHN6rMBVBHQ5heCSwCDl6EffyqGVX8,4601
54
54
  buz/event/infrastructure/buz_kafka/consume_strategy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
55
55
  buz/event/infrastructure/buz_kafka/consume_strategy/consume_strategy.py,sha256=RqlXe5W2S6rH3FTr--tcxzFJTAVLb-Dhl7m6qjgNz2M,331
@@ -245,7 +245,7 @@ buz/serializer/message_to_json_bytes_serializer.py,sha256=RGZJ64t4t4Pz2FCASZZCv-
245
245
  buz/wrapper/__init__.py,sha256=GnRdJFcncn-qp0hzDG9dBHLmTJSbHFVjE_yr-MdW_n4,77
246
246
  buz/wrapper/async_to_sync.py,sha256=OfK-vrVUhuN-LLLvekLdMbQYtH0ue5lfbvuasj6ovMI,698
247
247
  buz/wrapper/event_loop.py,sha256=pfBJ1g-8A2a3YgW8Gf9Fg0kkewoh3-wgTy2KIFDyfHk,266
248
- buz-2.14.2.dist-info/LICENSE,sha256=Jytu2S-2SPEgsB0y6BF-_LUxIWY7402fl0JSh36TLZE,1062
249
- buz-2.14.2.dist-info/METADATA,sha256=LIxzwAWa6gNGKmz32C5UzoI6hMr-g1lc9AWIEoguWno,1617
250
- buz-2.14.2.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
251
- buz-2.14.2.dist-info/RECORD,,
248
+ buz-2.14.3.dist-info/LICENSE,sha256=Jytu2S-2SPEgsB0y6BF-_LUxIWY7402fl0JSh36TLZE,1062
249
+ buz-2.14.3.dist-info/METADATA,sha256=mIxvjnK8YV9oyOgKXUnEoF_1OfXv1DuzTguIlFX6z_g,1659
250
+ buz-2.14.3.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
251
+ buz-2.14.3.dist-info/RECORD,,
File without changes
File without changes