buz 2.13.0rc1__py3-none-any.whl → 2.13.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. buz/event/async_event_bus.py +15 -0
  2. buz/event/dead_letter_queue/dlq_record.py +13 -0
  3. buz/event/infrastructure/buz_kafka/async_buz_kafka_event_bus.py +107 -0
  4. buz/event/infrastructure/buz_kafka/base_buz_aiokafka_async_consumer.py +67 -78
  5. buz/event/infrastructure/buz_kafka/buz_aiokafka_async_consumer.py +3 -4
  6. buz/event/infrastructure/buz_kafka/buz_aiokafka_multi_threaded_consumer.py +2 -4
  7. buz/event/infrastructure/buz_kafka/buz_kafka_event_bus.py +19 -16
  8. buz/event/infrastructure/buz_kafka/kafka_event_sync_subscriber_executor.py +2 -7
  9. buz/event/infrastructure/kombu/kombu_consumer.py +1 -0
  10. buz/event/middleware/async_publish_middleware.py +13 -0
  11. buz/event/middleware/async_publish_middleware_chain_resolver.py +22 -0
  12. buz/kafka/__init__.py +28 -2
  13. buz/kafka/domain/exceptions/not_all_partition_assigned_exception.py +8 -0
  14. buz/kafka/domain/exceptions/topic_not_found_exception.py +6 -0
  15. buz/kafka/domain/models/__init__.py +0 -0
  16. buz/kafka/domain/models/kafka_supported_compression_type.py +8 -0
  17. buz/kafka/domain/services/__init__.py +0 -0
  18. buz/kafka/domain/services/async_kafka_producer.py +21 -0
  19. buz/kafka/domain/services/kafka_admin_client.py +16 -1
  20. buz/kafka/domain/services/kafka_producer.py +3 -1
  21. buz/kafka/infrastructure/aiokafka/aiokafka_consumer.py +23 -15
  22. buz/kafka/infrastructure/aiokafka/aiokafka_producer.py +98 -0
  23. buz/kafka/infrastructure/cdc/cdc_message.py +3 -1
  24. buz/kafka/infrastructure/deserializers/implementations/cdc/cdc_record_bytes_to_event_deserializer.py +9 -4
  25. buz/kafka/infrastructure/interfaces/__init__.py +0 -0
  26. buz/kafka/infrastructure/interfaces/async_connection_manager.py +11 -0
  27. buz/kafka/infrastructure/interfaces/connection_manager.py +11 -0
  28. buz/kafka/infrastructure/kafka_python/kafka_python_admin_client.py +132 -16
  29. buz/kafka/infrastructure/kafka_python/kafka_python_admin_test_client.py +11 -2
  30. buz/kafka/infrastructure/kafka_python/kafka_python_producer.py +51 -22
  31. buz/kafka/infrastructure/serializers/implementations/cdc_record_bytes_to_event_serializer.py +6 -1
  32. buz/queue/in_memory/in_memory_multiqueue_repository.py +0 -20
  33. {buz-2.13.0rc1.dist-info → buz-2.13.1.dist-info}/METADATA +2 -2
  34. {buz-2.13.0rc1.dist-info → buz-2.13.1.dist-info}/RECORD +37 -24
  35. buz/kafka/infrastructure/kafka_python/factories/kafka_python_producer_factory.py +0 -20
  36. /buz/kafka/{infrastructure/kafka_python/factories → domain/exceptions}/__init__.py +0 -0
  37. {buz-2.13.0rc1.dist-info → buz-2.13.1.dist-info}/LICENSE +0 -0
  38. {buz-2.13.0rc1.dist-info → buz-2.13.1.dist-info}/WHEEL +0 -0
@@ -0,0 +1,15 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import Collection
3
+
4
+ from buz.event import Event
5
+ from buz.kafka.infrastructure.interfaces.async_connection_manager import AsyncConnectionManager
6
+
7
+
8
+ class AsyncEventBus(AsyncConnectionManager, ABC):
9
+ @abstractmethod
10
+ async def publish(self, event: Event) -> None:
11
+ pass
12
+
13
+ @abstractmethod
14
+ async def bulk_publish(self, events: Collection[Event]) -> None:
15
+ pass
@@ -8,6 +8,7 @@ DlqRecordId = UUID
8
8
 
9
9
  @dataclass
10
10
  class DlqRecord:
11
+ __EXCEPTION_MESSAGE_MAX_CHARACTERS: ClassVar[int] = 300
11
12
  DATE_TIME_FORMAT: ClassVar[str] = "%Y-%m-%d %H:%M:%S.%f"
12
13
 
13
14
  id: DlqRecordId
@@ -18,6 +19,18 @@ class DlqRecord:
18
19
  exception_message: str
19
20
  last_failed_at: datetime
20
21
 
22
+ def __post_init__(self) -> None:
23
+ self.exception_message = self.__add_ellipsis(self.exception_message)
24
+
25
+ def __add_ellipsis(self, message: str) -> str:
26
+ if len(message) <= self.__EXCEPTION_MESSAGE_MAX_CHARACTERS:
27
+ return message
28
+ return message[: self.__EXCEPTION_MESSAGE_MAX_CHARACTERS - 3] + "..."
29
+
30
+ def set_exception(self, exception: Exception) -> None:
31
+ self.exception_type = type(exception).__name__
32
+ self.exception_message = self.__add_ellipsis(str(exception))
33
+
21
34
  def mark_as_failed(self) -> None:
22
35
  self.last_failed_at = datetime.now()
23
36
 
@@ -0,0 +1,107 @@
1
+ from logging import Logger
2
+ from typing import Collection, Optional
3
+
4
+ from buz.event import Event
5
+ from buz.event.async_event_bus import AsyncEventBus
6
+ from buz.event.exceptions.event_not_published_exception import EventNotPublishedException
7
+ from buz.event.infrastructure.buz_kafka.exceptions.kafka_event_bus_config_not_valid_exception import (
8
+ KafkaEventBusConfigNotValidException,
9
+ )
10
+ from buz.event.infrastructure.buz_kafka.publish_strategy.publish_strategy import KafkaPublishStrategy
11
+ from buz.event.middleware.async_publish_middleware import AsyncPublishMiddleware
12
+ from buz.event.middleware.async_publish_middleware_chain_resolver import AsyncPublishMiddlewareChainResolver
13
+ from buz.kafka.domain.exceptions.topic_already_created_exception import KafkaTopicsAlreadyCreatedException
14
+ from buz.kafka.domain.models.auto_create_topic_configuration import AutoCreateTopicConfiguration
15
+ from buz.kafka.domain.models.create_kafka_topic import CreateKafkaTopic
16
+ from buz.kafka.domain.services.async_kafka_producer import AsyncKafkaProducer
17
+ from buz.kafka.domain.services.kafka_admin_client import KafkaAdminClient
18
+
19
+
20
+ class AsyncBuzKafkaEventBus(AsyncEventBus):
21
+ def __init__(
22
+ self,
23
+ *,
24
+ publish_strategy: KafkaPublishStrategy,
25
+ producer: AsyncKafkaProducer,
26
+ logger: Logger,
27
+ kafka_admin_client: Optional[KafkaAdminClient] = None,
28
+ publish_middlewares: Optional[list[AsyncPublishMiddleware]] = None,
29
+ auto_create_topic_configuration: Optional[AutoCreateTopicConfiguration] = None,
30
+ ):
31
+ self.__publish_middleware_chain_resolver = AsyncPublishMiddlewareChainResolver(publish_middlewares or [])
32
+ self.__publish_strategy = publish_strategy
33
+ self.__producer = producer
34
+ self.__topics_checked: dict[str, bool] = {}
35
+ self.__kafka_admin_client = kafka_admin_client
36
+ self.__auto_create_topic_configuration = auto_create_topic_configuration
37
+ self.__logger = logger
38
+ self.__check_kafka_admin_client_is_needed()
39
+
40
+ def __check_kafka_admin_client_is_needed(self) -> None:
41
+ if self.__kafka_admin_client is None and self.__auto_create_topic_configuration is not None:
42
+ raise KafkaEventBusConfigNotValidException(
43
+ "A KafkaAdminClient is needed to create topics when 'auto_create_topic_configuration' is set."
44
+ )
45
+
46
+ async def publish(self, event: Event) -> None:
47
+ await self.__publish_middleware_chain_resolver.resolve(event, self.__perform_publish)
48
+
49
+ async def __perform_publish(self, event: Event) -> None:
50
+ try:
51
+ topic = self.__publish_strategy.get_topic(event)
52
+
53
+ if self.__auto_create_topic_configuration is not None and self.__is_topic_created(topic) is False:
54
+ try:
55
+ self.__logger.info(f"Creating missing topic: {topic}..")
56
+ self.__get_kafka_admin_client().create_topics(
57
+ topics=[
58
+ CreateKafkaTopic(
59
+ name=topic,
60
+ partitions=self.__auto_create_topic_configuration.partitions,
61
+ replication_factor=self.__auto_create_topic_configuration.replication_factor,
62
+ configs=self.__auto_create_topic_configuration.configs,
63
+ )
64
+ ]
65
+ )
66
+ self.__logger.info(f"Created missing topic: {topic}")
67
+ self.__topics_checked[topic] = True
68
+ except KafkaTopicsAlreadyCreatedException:
69
+ pass
70
+
71
+ headers = self.__get_event_headers(event)
72
+ await self.__producer.produce(
73
+ message=event,
74
+ headers=headers,
75
+ topic=topic,
76
+ )
77
+ except Exception as exc:
78
+ raise EventNotPublishedException(event) from exc
79
+
80
+ def __get_kafka_admin_client(self) -> KafkaAdminClient:
81
+ if self.__kafka_admin_client is None:
82
+ raise KafkaEventBusConfigNotValidException("KafkaAdminClient is not set.")
83
+ return self.__kafka_admin_client
84
+
85
+ def __is_topic_created(self, topic: str) -> bool:
86
+ is_topic_created = self.__topics_checked.get(topic, None)
87
+
88
+ if is_topic_created is not None:
89
+ return is_topic_created
90
+
91
+ is_topic_created = self.__get_kafka_admin_client().is_topic_created(topic)
92
+ self.__topics_checked[topic] = is_topic_created
93
+
94
+ return is_topic_created
95
+
96
+ async def bulk_publish(self, events: Collection[Event]) -> None:
97
+ for event in events:
98
+ await self.publish(event)
99
+
100
+ def __get_event_headers(self, event: Event) -> dict:
101
+ return {"id": event.id}
102
+
103
+ async def connect(self) -> None:
104
+ await self.__producer.connect()
105
+
106
+ async def disconnect(self) -> None:
107
+ await self.__producer.disconnect()
@@ -1,10 +1,10 @@
1
- from abc import abstractmethod
2
1
  import traceback
3
- from asyncio import Lock, gather, Semaphore, Event as AsyncIOEvent, sleep
2
+ from abc import abstractmethod
3
+ from asyncio import Lock, Task, create_task, gather, Semaphore, Event as AsyncIOEvent, sleep
4
4
  from datetime import timedelta, datetime
5
5
  from itertools import cycle
6
6
  from logging import Logger
7
- from typing import AsyncIterator, Coroutine, Optional, Sequence, Type, TypeVar
7
+ from typing import AsyncIterator, Optional, Sequence, Type, TypeVar
8
8
 
9
9
  from aiokafka import TopicPartition
10
10
  from aiokafka.coordinator.assignors.abstract import AbstractPartitionAssignor
@@ -17,18 +17,15 @@ from buz.event.infrastructure.buz_kafka.consume_strategy.consume_strategy import
17
17
  from buz.event.infrastructure.buz_kafka.kafka_event_subscriber_executor import KafkaEventSubscriberExecutor
18
18
  from buz.event.infrastructure.models.consuming_task import ConsumingTask
19
19
  from buz.event.meta_subscriber import MetaSubscriber
20
- from buz.kafka import (
21
- KafkaConnectionConfig,
22
- ConsumerInitialOffsetPosition,
23
- )
24
20
  from buz.kafka.domain.models.auto_create_topic_configuration import AutoCreateTopicConfiguration
21
+ from buz.kafka.domain.models.consumer_initial_offset_position import ConsumerInitialOffsetPosition
22
+ from buz.kafka.domain.models.kafka_connection_config import KafkaConnectionConfig
25
23
  from buz.kafka.domain.models.kafka_poll_record import KafkaPollRecord
26
24
  from buz.kafka.domain.services.kafka_admin_client import KafkaAdminClient
27
25
  from buz.kafka.infrastructure.aiokafka.aiokafka_consumer import AIOKafkaConsumer
28
26
  from buz.queue.in_memory.in_memory_multiqueue_repository import InMemoryMultiqueueRepository
29
27
  from buz.queue.multiqueue_repository import MultiqueueRepository
30
28
 
31
-
32
29
  T = TypeVar("T", bound=Event)
33
30
 
34
31
 
@@ -60,7 +57,6 @@ class BaseBuzAIOKafkaAsyncConsumer(AsyncConsumer):
60
57
  self._logger = logger
61
58
  self.__consumer_initial_offset_position = consumer_initial_offset_position
62
59
  self.__max_records_retrieved_per_poll = 1
63
- self.__subscriber_per_consumer_mapper: dict[AIOKafkaConsumer, MetaSubscriber] = {}
64
60
  self.__executor_per_consumer_mapper: dict[AIOKafkaConsumer, KafkaEventSubscriberExecutor] = {}
65
61
  self.__queue_per_consumer_mapper: dict[
66
62
  AIOKafkaConsumer, MultiqueueRepository[TopicPartition, KafkaPollRecord]
@@ -88,16 +84,13 @@ class BaseBuzAIOKafkaAsyncConsumer(AsyncConsumer):
88
84
  async def run(self) -> None:
89
85
  start_time = datetime.now()
90
86
  await self.__generate_kafka_consumers()
87
+ self.__initial_coroutines_created_elapsed_time = datetime.now() - start_time
91
88
 
92
- if len(self.__subscriber_per_consumer_mapper) == 0:
89
+ if len(self.__executor_per_consumer_mapper) == 0:
93
90
  self._logger.error("There are no valid subscribers to execute, finalizing consumer")
94
91
  return
95
92
 
96
- self.__create_queue_repository_per_consumer()
97
- self.__initial_coroutines_created_elapsed_time = datetime.now() - start_time
98
-
99
93
  start_consumption_time = datetime.now()
100
- self._logger.info("Starting to consume events")
101
94
  worker_errors = await self.__run_worker()
102
95
  self.__events_processed_elapsed_time = datetime.now() - start_consumption_time
103
96
 
@@ -112,16 +105,13 @@ class BaseBuzAIOKafkaAsyncConsumer(AsyncConsumer):
112
105
 
113
106
  if self.__exceptions_are_thrown(worker_errors):
114
107
  consume_events_exception, polling_task_exception = worker_errors
115
- if consume_events_exception:
116
- self._logger.error(consume_events_exception)
117
- if polling_task_exception:
118
- self._logger.error(polling_task_exception)
119
-
120
- raise WorkerExecutionException("The worker was closed by an unexpected exception")
108
+ raise WorkerExecutionException(
109
+ "The worker was closed by an unexpected exception"
110
+ ) from consume_events_exception or polling_task_exception
121
111
 
122
112
  async def __run_worker(self) -> tuple[Optional[Exception], Optional[Exception]]:
123
- consume_events_task = self.__consume_events_task()
124
- polling_task = self.__polling_task()
113
+ consume_events_task = create_task(self.__consume_events_task())
114
+ polling_task = create_task(self.__polling_task())
125
115
 
126
116
  try:
127
117
  await gather(consume_events_task, polling_task)
@@ -132,9 +122,9 @@ class BaseBuzAIOKafkaAsyncConsumer(AsyncConsumer):
132
122
  polling_task_exception = await self.__await_exception(polling_task)
133
123
  return (consume_events_exception, polling_task_exception)
134
124
 
135
- async def __await_exception(self, future: Coroutine) -> Optional[Exception]:
125
+ async def __await_exception(self, task: Task) -> Optional[Exception]:
136
126
  try:
137
- await future
127
+ await task
138
128
  return None
139
129
  except Exception as exception:
140
130
  return exception
@@ -144,11 +134,11 @@ class BaseBuzAIOKafkaAsyncConsumer(AsyncConsumer):
144
134
 
145
135
  async def __generate_kafka_consumers(self):
146
136
  start_time = datetime.now()
147
- tasks = [self.__initialize_kafka_consumer_for_subscriber(subscriber) for subscriber in self.__subscribers]
137
+ tasks = [self.__generate_kafka_consumer_for_subscriber(subscriber) for subscriber in self.__subscribers]
148
138
  await gather(*tasks)
149
139
  self.__start_kafka_consumers_elapsed_time = datetime.now() - start_time
150
140
 
151
- async def __initialize_kafka_consumer_for_subscriber(self, subscriber: MetaSubscriber) -> None:
141
+ async def __generate_kafka_consumer_for_subscriber(self, subscriber: MetaSubscriber) -> None:
152
142
  try:
153
143
  executor = await self._create_kafka_consumer_executor(subscriber)
154
144
  topics = self.__consume_strategy.get_topics(subscriber)
@@ -164,98 +154,96 @@ class BaseBuzAIOKafkaAsyncConsumer(AsyncConsumer):
164
154
  on_partition_revoked=self.__on_partition_revoked,
165
155
  )
166
156
 
167
- self.__subscriber_per_consumer_mapper[kafka_consumer] = subscriber
168
-
169
157
  self.__executor_per_consumer_mapper[kafka_consumer] = executor
170
158
 
171
- await kafka_consumer.init()
159
+ self.__queue_per_consumer_mapper[kafka_consumer] = InMemoryMultiqueueRepository()
160
+
172
161
  except Exception:
173
162
  self._logger.exception(
174
- f"Unexpected error during Kafka subscriber '{subscriber.fqn()}' initialization. Skipping it: {traceback.format_exc()}"
163
+ f"Unexpected error during Kafka subscriber '{subscriber.fqn()}' creation. Skipping it: {traceback.format_exc()}"
175
164
  )
176
165
 
177
166
  @abstractmethod
178
167
  async def _create_kafka_consumer_executor(self, subscriber: MetaSubscriber) -> KafkaEventSubscriberExecutor:
179
168
  pass
180
169
 
181
- def __create_queue_repository_per_consumer(self) -> None:
182
- for kafka_consumer in self.__subscriber_per_consumer_mapper.keys():
183
- self.__queue_per_consumer_mapper[kafka_consumer] = InMemoryMultiqueueRepository()
184
-
185
170
  async def __polling_task(self) -> None:
171
+ self._logger.info("Initializing subscribers")
186
172
  try:
187
- while not self.__should_stop.is_set():
188
- total_size = sum([queue.get_total_size() for queue in self.__queue_per_consumer_mapper.values()])
189
- if total_size >= self.__max_queue_size:
190
- await sleep(self.__seconds_between_polls_if_there_are_tasks_in_the_queue)
191
- continue
192
-
193
- raw_consuming_tasks = await gather(
194
- *[
195
- self.__polling_consuming_tasks(kafka_consumer=consumer)
196
- for consumer, subscriber in self.__subscriber_per_consumer_mapper.items()
197
- ]
198
- )
199
-
200
- poll_results: list[ConsumingTask] = [
201
- consuming_task for consuming_tasks in raw_consuming_tasks for consuming_task in consuming_tasks
202
- ]
203
- if len(poll_results) == 0:
204
- await sleep(self.__seconds_between_polls_if_there_are_no_new_tasks)
173
+ polling_task_per_consumer = [
174
+ create_task(self.__polling_consuming_tasks(consumer))
175
+ for consumer, subscriber in self.__queue_per_consumer_mapper.items()
176
+ ]
205
177
 
206
- for poll_result in poll_results:
207
- queue = self.__queue_per_consumer_mapper[poll_result.consumer]
208
- queue.push(
209
- key=TopicPartition(
210
- topic=poll_result.kafka_poll_record.topic, partition=poll_result.kafka_poll_record.partition
211
- ),
212
- record=poll_result.kafka_poll_record,
213
- )
178
+ await gather(*polling_task_per_consumer)
214
179
 
215
180
  except Exception:
216
181
  self._logger.error(f"Polling task failed with exception: {traceback.format_exc()}")
217
182
  self.__should_stop.set()
218
183
 
219
- return
184
+ async def __polling_consuming_tasks(self, consumer: AIOKafkaConsumer) -> None:
185
+ queue = self.__queue_per_consumer_mapper[consumer]
220
186
 
221
- async def __polling_consuming_tasks(self, kafka_consumer: AIOKafkaConsumer) -> list[ConsumingTask]:
222
- async with self.__polling_tasks_semaphore:
223
- results = await kafka_consumer.poll(
224
- number_of_messages_to_poll=self.__max_records_retrieved_per_poll,
187
+ try:
188
+ self._logger.info(
189
+ f"initializing consumer group: '{consumer.get_consumer_group()}' subscribed to the topics: '{consumer.get_topics()}'"
225
190
  )
226
- return [ConsumingTask(kafka_consumer, result) for result in results]
191
+ await consumer.init()
192
+ self._logger.info(f"initialized '{consumer.get_consumer_group()}'")
193
+ except Exception:
194
+ self._logger.exception(
195
+ f"Unexpected error during Kafka subscriber '{consumer.get_consumer_group()}' initialization. Skipping it: {traceback.format_exc()}"
196
+ )
197
+
198
+ while not self.__should_stop.is_set():
199
+ total_size = sum([queue.get_total_size() for queue in self.__queue_per_consumer_mapper.values()])
200
+ if total_size >= self.__max_queue_size:
201
+ await sleep(self.__seconds_between_polls_if_there_are_tasks_in_the_queue)
202
+ continue
203
+
204
+ async with self.__polling_tasks_semaphore:
205
+ kafka_poll_records = await consumer.poll(
206
+ number_of_messages_to_poll=self.__max_records_retrieved_per_poll,
207
+ )
208
+
209
+ for kafka_poll_record in kafka_poll_records:
210
+ queue.push(
211
+ key=TopicPartition(
212
+ topic=kafka_poll_record.topic,
213
+ partition=kafka_poll_record.partition,
214
+ ),
215
+ record=kafka_poll_record,
216
+ )
217
+
218
+ if len(kafka_poll_records) == 0:
219
+ await sleep(self.__seconds_between_polls_if_there_are_no_new_tasks)
227
220
 
228
221
  async def __consume_events_task(self) -> None:
229
- blocked_tasks_iterator = self.generate_blocked_consuming_tasks_iterator()
222
+ self._logger.info("Initializing consuming task")
223
+ blocked_tasks_iterator = self.__generate_blocked_consuming_tasks_iterator()
230
224
 
231
225
  async for consuming_task in blocked_tasks_iterator:
232
226
  consumer = consuming_task.consumer
233
227
  kafka_poll_record = consuming_task.kafka_poll_record
234
- executor = self.__executor_per_consumer_mapper[consuming_task.consumer]
235
228
 
229
+ executor = self.__executor_per_consumer_mapper[consumer]
236
230
  await executor.consume(kafka_poll_record=kafka_poll_record)
237
-
238
231
  await consumer.commit_poll_record(kafka_poll_record)
239
232
 
240
233
  self.__events_processed += 1
241
234
 
242
235
  # This iterator return a blocked task, that will be blocked for other process (like rebalancing), until the next task will be requested
243
- async def generate_blocked_consuming_tasks_iterator(self) -> AsyncIterator[ConsumingTask]:
236
+ async def __generate_blocked_consuming_tasks_iterator(self) -> AsyncIterator[ConsumingTask]:
244
237
  consumer_queues_cyclic_iterator = cycle(self.__queue_per_consumer_mapper.items())
245
238
  last_consumer, _ = next(consumer_queues_cyclic_iterator)
246
239
 
247
240
  while not self.__should_stop.is_set():
248
- all_queues_are_empty = all(
249
- [queue.is_totally_empty() for queue in self.__queue_per_consumer_mapper.values()]
250
- )
251
-
252
- if all_queues_are_empty:
241
+ if await self.__all_queues_are_empty():
253
242
  await sleep(self.__seconds_between_executions_if_there_are_no_tasks_in_the_queue)
254
243
  continue
255
244
 
256
245
  async with self.__task_execution_mutex:
257
246
  consumer: Optional[AIOKafkaConsumer] = None
258
- kafka_poll_record: Optional[KafkaPollRecord] = None
259
247
 
260
248
  while consumer != last_consumer:
261
249
  consumer, queue = next(consumer_queues_cyclic_iterator)
@@ -266,7 +254,8 @@ class BaseBuzAIOKafkaAsyncConsumer(AsyncConsumer):
266
254
  last_consumer = consumer
267
255
  break
268
256
 
269
- return
257
+ async def __all_queues_are_empty(self) -> bool:
258
+ return all([queue.is_totally_empty() for queue in self.__queue_per_consumer_mapper.values()])
270
259
 
271
260
  async def __on_partition_revoked(self, consumer: AIOKafkaConsumer, topics_partitions: set[TopicPartition]) -> None:
272
261
  async with self.__task_execution_mutex:
@@ -278,7 +267,7 @@ class BaseBuzAIOKafkaAsyncConsumer(AsyncConsumer):
278
267
  self._logger.info("Worker stop requested. Waiting for finalize the current task")
279
268
 
280
269
  async def __manage_kafka_consumers_stopping(self) -> None:
281
- for kafka_consumer in self.__subscriber_per_consumer_mapper.keys():
270
+ for kafka_consumer in self.__queue_per_consumer_mapper.keys():
282
271
  await kafka_consumer.stop()
283
272
 
284
273
  def __print_statistics(self) -> None:
@@ -15,11 +15,10 @@ from buz.event.meta_subscriber import MetaSubscriber
15
15
  from buz.event.middleware.async_consume_middleware import AsyncConsumeMiddleware
16
16
  from buz.event.strategies.retry.consume_retrier import ConsumeRetrier
17
17
  from buz.event.strategies.retry.reject_callback import RejectCallback
18
- from buz.kafka import (
19
- KafkaConnectionConfig,
20
- ConsumerInitialOffsetPosition,
21
- )
18
+
22
19
  from buz.kafka.domain.models.auto_create_topic_configuration import AutoCreateTopicConfiguration
20
+ from buz.kafka.domain.models.consumer_initial_offset_position import ConsumerInitialOffsetPosition
21
+ from buz.kafka.domain.models.kafka_connection_config import KafkaConnectionConfig
23
22
  from buz.kafka.domain.services.kafka_admin_client import KafkaAdminClient
24
23
  from buz.kafka.infrastructure.deserializers.bytes_to_message_deserializer import BytesToMessageDeserializer
25
24
  from buz.kafka.infrastructure.deserializers.implementations.json_bytes_to_message_deserializer import (
@@ -15,11 +15,9 @@ from buz.event.meta_subscriber import MetaSubscriber
15
15
  from buz.event.middleware.consume_middleware import ConsumeMiddleware
16
16
  from buz.event.strategies.retry.consume_retrier import ConsumeRetrier
17
17
  from buz.event.strategies.retry.reject_callback import RejectCallback
18
- from buz.kafka import (
19
- KafkaConnectionConfig,
20
- ConsumerInitialOffsetPosition,
21
- )
22
18
  from buz.kafka.domain.models.auto_create_topic_configuration import AutoCreateTopicConfiguration
19
+ from buz.kafka.domain.models.consumer_initial_offset_position import ConsumerInitialOffsetPosition
20
+ from buz.kafka.domain.models.kafka_connection_config import KafkaConnectionConfig
23
21
  from buz.kafka.domain.services.kafka_admin_client import KafkaAdminClient
24
22
  from buz.kafka.infrastructure.deserializers.bytes_to_message_deserializer import BytesToMessageDeserializer
25
23
  from buz.kafka.infrastructure.deserializers.implementations.json_bytes_to_message_deserializer import (
@@ -11,12 +11,11 @@ from buz.event.middleware import (
11
11
  PublishMiddleware,
12
12
  )
13
13
  from buz.event.middleware.publish_middleware_chain_resolver import PublishMiddlewareChainResolver
14
- from buz.kafka import (
15
- KafkaPythonProducer,
16
- )
14
+ from buz.kafka.domain.exceptions.topic_already_created_exception import KafkaTopicsAlreadyCreatedException
17
15
  from buz.kafka.domain.models.auto_create_topic_configuration import AutoCreateTopicConfiguration
18
16
  from buz.kafka.domain.models.create_kafka_topic import CreateKafkaTopic
19
17
  from buz.kafka.domain.services.kafka_admin_client import KafkaAdminClient
18
+ from buz.kafka.domain.services.kafka_producer import KafkaProducer
20
19
 
21
20
 
22
21
  class BuzKafkaEventBus(EventBus):
@@ -24,7 +23,7 @@ class BuzKafkaEventBus(EventBus):
24
23
  self,
25
24
  *,
26
25
  publish_strategy: KafkaPublishStrategy,
27
- producer: KafkaPythonProducer,
26
+ producer: KafkaProducer,
28
27
  logger: Logger,
29
28
  kafka_admin_client: Optional[KafkaAdminClient] = None,
30
29
  publish_middlewares: Optional[list[PublishMiddleware]] = None,
@@ -53,18 +52,22 @@ class BuzKafkaEventBus(EventBus):
53
52
  topic = self.__publish_strategy.get_topic(event)
54
53
 
55
54
  if self.__auto_create_topic_configuration is not None and self.__is_topic_created(topic) is False:
56
- self.__get_kafka_admin_client().create_topics(
57
- topics=[
58
- CreateKafkaTopic(
59
- name=topic,
60
- partitions=self.__auto_create_topic_configuration.partitions,
61
- replication_factor=self.__auto_create_topic_configuration.replication_factor,
62
- configs=self.__auto_create_topic_configuration.configs,
63
- )
64
- ]
65
- )
66
- self.__logger.info(f"Created missing topic: {topic}")
67
- self.__topics_checked[topic] = True
55
+ try:
56
+ self.__logger.info(f"Creating missing topic: {topic}..")
57
+ self.__get_kafka_admin_client().create_topics(
58
+ topics=[
59
+ CreateKafkaTopic(
60
+ name=topic,
61
+ partitions=self.__auto_create_topic_configuration.partitions,
62
+ replication_factor=self.__auto_create_topic_configuration.replication_factor,
63
+ configs=self.__auto_create_topic_configuration.configs,
64
+ )
65
+ ]
66
+ )
67
+ self.__logger.info(f"Created missing topic: {topic}")
68
+ self.__topics_checked[topic] = True
69
+ except KafkaTopicsAlreadyCreatedException:
70
+ pass
68
71
 
69
72
  headers = self.__get_event_headers(event)
70
73
  self.__producer.produce(
@@ -67,11 +67,6 @@ class KafkaEventSyncSubscriberExecutor(KafkaEventSubscriberExecutor):
67
67
  self.__logger.error(
68
68
  f'The message "{str(kafka_poll_record.value)}" is not valid, it will be consumed but not processed'
69
69
  )
70
- except Exception as exception:
71
- if self.__on_fail_strategy == KafkaOnFailStrategy.CONSUME_ON_FAIL:
72
- self.__logger.error(f"Error consuming event: {exception}")
73
- return
74
- raise exception
75
70
 
76
71
  def __execution_callback(self, subscriber: Subscriber, message: KafkaConsumerRecord[Event]) -> None:
77
72
  self.__consume_middleware_chain_resolver.resolve(
@@ -86,13 +81,13 @@ class KafkaEventSyncSubscriberExecutor(KafkaEventSubscriberExecutor):
86
81
  return
87
82
  except Exception as exception:
88
83
  self.__logger.warning(f"Event {event.id} could not be consumed by the subscriber {subscriber.fqn}")
89
- self.__logger.error(exception, exc_info=True)
90
-
91
84
  if self.__should_retry(event, subscriber) is True:
92
85
  self.__register_retry(event, subscriber)
93
86
  time.sleep(self.__seconds_between_retires)
94
87
  continue
95
88
 
89
+ self.__logger.exception(exception)
90
+
96
91
  if self.__reject_callback:
97
92
  self.__reject_callback.on_reject(event=event, subscribers=[subscriber], exception=exception)
98
93
 
@@ -135,6 +135,7 @@ class KombuConsumer(ConsumerMixin, Consumer):
135
135
  message.requeue()
136
136
  return
137
137
 
138
+ self.__logger.exception(exception)
138
139
  self.__reject_message(message, event, subscribers, exception)
139
140
 
140
141
  def __reject_message(
@@ -0,0 +1,13 @@
1
+ from abc import abstractmethod
2
+ from typing import Awaitable, Callable
3
+
4
+ from buz.event import Event
5
+ from buz.middleware import Middleware
6
+
7
+ AsyncPublishCallable = Callable[[Event], Awaitable[None]]
8
+
9
+
10
+ class AsyncPublishMiddleware(Middleware):
11
+ @abstractmethod
12
+ async def on_publish(self, event: Event, publish: AsyncPublishCallable) -> None:
13
+ pass
@@ -0,0 +1,22 @@
1
+ from buz.event import Event
2
+ from buz.event.middleware.async_publish_middleware import AsyncPublishCallable, AsyncPublishMiddleware
3
+ from buz.middleware import MiddlewareChainBuilder
4
+
5
+
6
+ class AsyncPublishMiddlewareChainResolver:
7
+ def __init__(self, middlewares: list[AsyncPublishMiddleware]):
8
+ self.__middlewares = middlewares
9
+ self.__middleware_chain_builder: MiddlewareChainBuilder[
10
+ AsyncPublishCallable, AsyncPublishMiddleware
11
+ ] = MiddlewareChainBuilder(middlewares)
12
+
13
+ async def resolve(self, event: Event, publish: AsyncPublishCallable) -> None:
14
+ chain_callable: AsyncPublishCallable = self.__middleware_chain_builder.get_chain_callable(
15
+ publish, self.__get_middleware_callable
16
+ )
17
+ await chain_callable(event)
18
+
19
+ def __get_middleware_callable(
20
+ self, middleware: AsyncPublishMiddleware, publish_callable: AsyncPublishCallable
21
+ ) -> AsyncPublishCallable:
22
+ return lambda event: middleware.on_publish(event, publish_callable)
buz/kafka/__init__.py CHANGED
@@ -1,18 +1,32 @@
1
+ from buz.kafka.domain.exceptions.not_all_partition_assigned_exception import NotAllPartitionAssignedException
2
+ from buz.kafka.domain.exceptions.not_valid_kafka_message_exception import NotValidKafkaMessageException
1
3
  from buz.kafka.domain.exceptions.topic_already_created_exception import KafkaTopicsAlreadyCreatedException
4
+ from buz.kafka.domain.exceptions.topic_not_found_exception import TopicNotFoundException
5
+ from buz.kafka.domain.models.auto_create_topic_configuration import AutoCreateTopicConfiguration
2
6
  from buz.kafka.domain.models.consumer_initial_offset_position import ConsumerInitialOffsetPosition
3
7
  from buz.kafka.domain.models.kafka_connection_config import KafkaConnectionConfig
8
+ from buz.kafka.domain.models.kafka_connection_credentials import KafkaConnectionCredentials
9
+ from buz.kafka.domain.models.kafka_connection_plain_text_credentials import KafkaConnectionPlainTextCredentials
10
+ from buz.kafka.domain.models.kafka_connection_sasl_credentials import KafkaConnectionSaslCredentials
4
11
  from buz.kafka.domain.models.kafka_consumer_record import KafkaConsumerRecord
12
+ from buz.kafka.domain.models.kafka_supported_sasl_mechanisms import KafkaSupportedSaslMechanisms
5
13
  from buz.kafka.domain.models.kafka_supported_security_protocols import KafkaSupportedSecurityProtocols
6
14
  from buz.kafka.domain.models.create_kafka_topic import CreateKafkaTopic
7
15
  from buz.kafka.domain.services.kafka_admin_client import KafkaAdminClient
8
16
  from buz.kafka.domain.services.kafka_admin_test_client import KafkaAdminTestClient
9
17
  from buz.kafka.domain.services.kafka_producer import KafkaProducer
10
- from buz.kafka.infrastructure.kafka_python.factories.kafka_python_producer_factory import KafkaPythonProducerFactory
18
+ from buz.kafka.infrastructure.aiokafka.aiokafka_producer import AIOKafkaProducer
11
19
  from buz.kafka.infrastructure.kafka_python.kafka_python_admin_client import KafkaPythonAdminClient
12
20
  from buz.kafka.infrastructure.kafka_python.kafka_python_admin_test_client import KafkaPythonAdminTestClient
13
21
  from buz.kafka.infrastructure.kafka_python.kafka_python_producer import KafkaPythonProducer
14
22
  from buz.kafka.infrastructure.serializers.byte_serializer import ByteSerializer
15
23
  from buz.kafka.infrastructure.serializers.implementations.json_byte_serializer import JSONByteSerializer
24
+ from buz.kafka.domain.models.kafka_supported_compression_type import KafkaSupportedCompressionType
25
+ from buz.event.infrastructure.buz_kafka.exceptions.kafka_event_bus_config_not_valid_exception import (
26
+ KafkaEventBusConfigNotValidException,
27
+ )
28
+ from buz.event.infrastructure.buz_kafka.async_buz_kafka_event_bus import AsyncBuzKafkaEventBus
29
+ from buz.event.infrastructure.buz_kafka.buz_kafka_event_bus import BuzKafkaEventBus
16
30
 
17
31
 
18
32
  __all__ = [
@@ -22,7 +36,6 @@ __all__ = [
22
36
  "KafkaAdminTestClient",
23
37
  "KafkaPythonAdminClient",
24
38
  "KafkaPythonAdminTestClient",
25
- "KafkaPythonProducerFactory",
26
39
  "KafkaTopicsAlreadyCreatedException",
27
40
  "KafkaConsumerRecord",
28
41
  "CreateKafkaTopic",
@@ -31,4 +44,17 @@ __all__ = [
31
44
  "ByteSerializer",
32
45
  "JSONByteSerializer",
33
46
  "ConsumerInitialOffsetPosition",
47
+ "KafkaSupportedCompressionType",
48
+ "KafkaEventBusConfigNotValidException",
49
+ "AsyncBuzKafkaEventBus",
50
+ "BuzKafkaEventBus",
51
+ "AutoCreateTopicConfiguration",
52
+ "NotAllPartitionAssignedException",
53
+ "NotValidKafkaMessageException",
54
+ "TopicNotFoundException",
55
+ "KafkaConnectionCredentials",
56
+ "KafkaConnectionPlainTextCredentials",
57
+ "KafkaConnectionSaslCredentials",
58
+ "KafkaSupportedSaslMechanisms",
59
+ "AIOKafkaProducer",
34
60
  ]
@@ -0,0 +1,8 @@
1
+ from __future__ import annotations
2
+
3
+
4
+ class NotAllPartitionAssignedException(Exception):
5
+ def __init__(self, topic_name: str) -> None:
6
+ super().__init__(
7
+ f'Not all the partition were assigned for the topic "{topic_name}", please disconnect the rest of subscribers'
8
+ )