buz 2.14.1__py3-none-any.whl → 2.14.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,3 +1,4 @@
1
+ import json
1
2
  import traceback
2
3
  from abc import abstractmethod
3
4
  from asyncio import Lock, Task, create_task, gather, Semaphore, Event as AsyncIOEvent, sleep
@@ -5,6 +6,7 @@ from datetime import timedelta, datetime
5
6
  from itertools import cycle
6
7
  from logging import Logger
7
8
  from typing import AsyncIterator, Optional, Sequence, Type, TypeVar
9
+ from aiohttp import web
8
10
 
9
11
  from aiokafka import TopicPartition
10
12
  from aiokafka.coordinator.assignors.abstract import AbstractPartitionAssignor
@@ -43,6 +45,7 @@ class BaseBuzAIOKafkaAsyncConsumer(AsyncConsumer):
43
45
  kafka_partition_assignors: tuple[Type[AbstractPartitionAssignor], ...] = (),
44
46
  subscribers: Sequence[MetaSubscriber],
45
47
  logger: Logger,
48
+ health_check_port: Optional[int],
46
49
  consumer_initial_offset_position: ConsumerInitialOffsetPosition,
47
50
  auto_create_topic_configuration: Optional[AutoCreateTopicConfiguration] = None,
48
51
  seconds_between_executions_if_there_are_no_tasks_in_the_queue: int = 1,
@@ -55,6 +58,7 @@ class BaseBuzAIOKafkaAsyncConsumer(AsyncConsumer):
55
58
  self.__kafka_partition_assignors = kafka_partition_assignors
56
59
  self.__subscribers = subscribers
57
60
  self._logger = logger
61
+ self.__health_check_port = health_check_port
58
62
  self.__consumer_initial_offset_position = consumer_initial_offset_position
59
63
  self.__max_records_retrieved_per_poll = 1
60
64
  self.__executor_per_consumer_mapper: dict[AIOKafkaConsumer, KafkaEventSubscriberExecutor] = {}
@@ -78,12 +82,26 @@ class BaseBuzAIOKafkaAsyncConsumer(AsyncConsumer):
78
82
  )
79
83
  self.__seconds_between_polls_if_there_are_no_new_tasks = seconds_between_polls_if_there_are_no_new_tasks
80
84
  self.__polling_tasks_semaphore = Semaphore(max_number_of_concurrent_polling_tasks)
81
-
82
85
  self.__task_execution_mutex = Lock()
83
86
 
87
+ async def configure_health_check_server(self, health_check_port: int) -> web.TCPSite:
88
+ self._logger.info(f"Starting health check server on port {health_check_port}")
89
+ app = web.Application()
90
+ app.router.add_get("/health", lambda request: self.__health_check())
91
+ runner = web.AppRunner(app)
92
+ await runner.setup()
93
+ site = web.TCPSite(runner, "localhost", health_check_port)
94
+ await site.start()
95
+ return site
96
+
84
97
  async def run(self) -> None:
85
98
  start_time = datetime.now()
86
99
  await self.__generate_kafka_consumers()
100
+ health_check_server: Optional[web.TCPSite] = None
101
+
102
+ if self.__health_check_port is not None:
103
+ health_check_server = await self.configure_health_check_server(self.__health_check_port)
104
+
87
105
  self.__initial_coroutines_created_elapsed_time = datetime.now() - start_time
88
106
 
89
107
  if len(self.__executor_per_consumer_mapper) == 0:
@@ -94,6 +112,9 @@ class BaseBuzAIOKafkaAsyncConsumer(AsyncConsumer):
94
112
  worker_errors = await self.__run_worker()
95
113
  self.__events_processed_elapsed_time = datetime.now() - start_consumption_time
96
114
 
115
+ if health_check_server is not None:
116
+ await health_check_server.stop()
117
+
97
118
  await self.__handle_graceful_stop(worker_errors)
98
119
 
99
120
  async def __handle_graceful_stop(self, worker_errors: tuple[Optional[Exception], Optional[Exception]]) -> None:
@@ -270,6 +291,15 @@ class BaseBuzAIOKafkaAsyncConsumer(AsyncConsumer):
270
291
  for kafka_consumer in self.__queue_per_consumer_mapper.keys():
271
292
  await kafka_consumer.stop()
272
293
 
294
+ async def __health_check(self) -> web.Response:
295
+ health_information = {
296
+ "subscribers": [subscriber.fqn() for subscriber in self.__subscribers],
297
+ "number_of_subscribers": len(self.__subscribers),
298
+ "event_processed": self.__events_processed,
299
+ }
300
+
301
+ return web.Response(text=json.dumps(health_information), content_type="application/json")
302
+
273
303
  def __print_statistics(self) -> None:
274
304
  self._logger.info("Number of subscribers: %d", len(self.__subscribers))
275
305
  self._logger.info(f"Start kafka consumers elapsed time: {self.__start_kafka_consumers_elapsed_time}")
@@ -4,7 +4,6 @@ from typing import Optional, Sequence, Type, TypeVar
4
4
  from aiokafka.coordinator.assignors.abstract import AbstractPartitionAssignor
5
5
 
6
6
  from buz.event import Event
7
-
8
7
  from buz.event.async_subscriber import AsyncSubscriber
9
8
  from buz.event.infrastructure.buz_kafka.base_buz_aiokafka_async_consumer import BaseBuzAIOKafkaAsyncConsumer
10
9
  from buz.event.infrastructure.buz_kafka.consume_strategy.consume_strategy import KafkaConsumeStrategy
@@ -15,11 +14,11 @@ from buz.event.meta_subscriber import MetaSubscriber
15
14
  from buz.event.middleware.async_consume_middleware import AsyncConsumeMiddleware
16
15
  from buz.event.strategies.retry.consume_retrier import ConsumeRetrier
17
16
  from buz.event.strategies.retry.reject_callback import RejectCallback
18
-
19
17
  from buz.kafka.domain.models.auto_create_topic_configuration import AutoCreateTopicConfiguration
20
18
  from buz.kafka.domain.models.consumer_initial_offset_position import ConsumerInitialOffsetPosition
21
19
  from buz.kafka.domain.models.kafka_connection_config import KafkaConnectionConfig
22
20
  from buz.kafka.domain.services.kafka_admin_client import KafkaAdminClient
21
+ from buz.kafka.infrastructure.deserializers.byte_deserializer import ByteDeserializer
23
22
  from buz.kafka.infrastructure.deserializers.bytes_to_message_deserializer import BytesToMessageDeserializer
24
23
  from buz.kafka.infrastructure.deserializers.implementations.json_bytes_to_message_deserializer import (
25
24
  JSONBytesToMessageDeserializer,
@@ -52,6 +51,7 @@ class BuzAIOKafkaAsyncConsumer(BaseBuzAIOKafkaAsyncConsumer):
52
51
  seconds_between_polls_if_there_are_tasks_in_the_queue: int = 1,
53
52
  seconds_between_polls_if_there_are_no_new_tasks: int = 1,
54
53
  max_number_of_concurrent_polling_tasks: int = 20,
54
+ health_check_port: Optional[int] = None,
55
55
  ):
56
56
  super().__init__(
57
57
  connection_config=connection_config,
@@ -68,6 +68,7 @@ class BuzAIOKafkaAsyncConsumer(BaseBuzAIOKafkaAsyncConsumer):
68
68
  seconds_between_polls_if_there_are_tasks_in_the_queue=seconds_between_polls_if_there_are_tasks_in_the_queue,
69
69
  seconds_between_polls_if_there_are_no_new_tasks=seconds_between_polls_if_there_are_no_new_tasks,
70
70
  max_number_of_concurrent_polling_tasks=max_number_of_concurrent_polling_tasks,
71
+ health_check_port=health_check_port,
71
72
  )
72
73
  self.__on_fail_strategy = on_fail_strategy
73
74
  self.__consume_middlewares = consume_middlewares
@@ -84,7 +85,9 @@ class BuzAIOKafkaAsyncConsumer(BaseBuzAIOKafkaAsyncConsumer):
84
85
  f"Subscriber {subscriber.__class__.__name__} is not a subclass of Subscriber, probably you are trying to use a synchronous subscriber"
85
86
  )
86
87
 
87
- byte_deserializer = self._deserializers_per_subscriber.get(subscriber) or JSONBytesToMessageDeserializer(
88
+ byte_deserializer: ByteDeserializer[Event] = self._deserializers_per_subscriber.get(
89
+ subscriber
90
+ ) or JSONBytesToMessageDeserializer(
88
91
  # todo: it looks like in next python versions the inference engine is powerful enough to ensure this type, so we can remove it when we upgrade the python version of the library
89
92
  event_class=subscriber.handles() # type: ignore
90
93
  )
@@ -19,6 +19,7 @@ from buz.kafka.domain.models.auto_create_topic_configuration import AutoCreateTo
19
19
  from buz.kafka.domain.models.consumer_initial_offset_position import ConsumerInitialOffsetPosition
20
20
  from buz.kafka.domain.models.kafka_connection_config import KafkaConnectionConfig
21
21
  from buz.kafka.domain.services.kafka_admin_client import KafkaAdminClient
22
+ from buz.kafka.infrastructure.deserializers.byte_deserializer import ByteDeserializer
22
23
  from buz.kafka.infrastructure.deserializers.bytes_to_message_deserializer import BytesToMessageDeserializer
23
24
  from buz.kafka.infrastructure.deserializers.implementations.json_bytes_to_message_deserializer import (
24
25
  JSONBytesToMessageDeserializer,
@@ -51,6 +52,7 @@ class BuzAIOKafkaMultiThreadedConsumer(BaseBuzAIOKafkaAsyncConsumer):
51
52
  seconds_between_polls_if_there_are_tasks_in_the_queue: int = 1,
52
53
  seconds_between_polls_if_there_are_no_new_tasks: int = 1,
53
54
  max_number_of_concurrent_polling_tasks: int = 20,
55
+ health_check_port: Optional[int] = 3123,
54
56
  ):
55
57
  super().__init__(
56
58
  connection_config=connection_config,
@@ -67,6 +69,7 @@ class BuzAIOKafkaMultiThreadedConsumer(BaseBuzAIOKafkaAsyncConsumer):
67
69
  seconds_between_polls_if_there_are_tasks_in_the_queue=seconds_between_polls_if_there_are_tasks_in_the_queue,
68
70
  seconds_between_polls_if_there_are_no_new_tasks=seconds_between_polls_if_there_are_no_new_tasks,
69
71
  max_number_of_concurrent_polling_tasks=max_number_of_concurrent_polling_tasks,
72
+ health_check_port=health_check_port,
70
73
  )
71
74
  self.__on_fail_strategy = on_fail_strategy
72
75
  self.__consume_middlewares = consume_middlewares
@@ -83,7 +86,9 @@ class BuzAIOKafkaMultiThreadedConsumer(BaseBuzAIOKafkaAsyncConsumer):
83
86
  f"Subscriber {subscriber.__class__.__name__} is not a subclass of Subscriber, probably you are trying to use an asynchronous subscriber"
84
87
  )
85
88
 
86
- byte_deserializer = self._deserializers_per_subscriber.get(subscriber) or JSONBytesToMessageDeserializer(
89
+ byte_deserializer: ByteDeserializer[Event] = self._deserializers_per_subscriber.get(
90
+ subscriber
91
+ ) or JSONBytesToMessageDeserializer(
87
92
  # todo: it looks like in next python versions the inference engine is powerful enough to ensure this type, so we can remove it when we upgrade the python version of the library
88
93
  event_class=subscriber.handles() # type: ignore
89
94
  )
@@ -0,0 +1,12 @@
1
+ class ConsumerRetryException(Exception):
2
+ def __init__(
3
+ self,
4
+ *,
5
+ event_id: str,
6
+ subscriber_fqn: str,
7
+ number_of_executions: int,
8
+ ) -> None:
9
+ super().__init__(
10
+ f"An exception happened during the consumption of the event '{event_id}' by the subscriber '{subscriber_fqn}' "
11
+ + f"during execution number '{number_of_executions}'. Retrying the consumption..."
12
+ )
@@ -6,6 +6,7 @@ from typing import Optional, Sequence, cast
6
6
  from buz.event import Event
7
7
  from buz.event.async_subscriber import AsyncSubscriber
8
8
  from buz.event.infrastructure.buz_kafka.consume_strategy.kafka_on_fail_strategy import KafkaOnFailStrategy
9
+ from buz.event.infrastructure.buz_kafka.exceptions.retry_exception import ConsumerRetryException
9
10
  from buz.event.infrastructure.buz_kafka.kafka_event_subscriber_executor import KafkaEventSubscriberExecutor
10
11
  from buz.event.middleware.async_consume_middleware import AsyncConsumeMiddleware
11
12
  from buz.event.middleware.async_consume_middleware_chain_resolver import AsyncConsumeMiddlewareChainResolver
@@ -26,7 +27,7 @@ class KafkaEventAsyncSubscriberExecutor(KafkaEventSubscriberExecutor):
26
27
  logger: Logger,
27
28
  consume_middlewares: Optional[Sequence[AsyncConsumeMiddleware]] = None,
28
29
  seconds_between_retries: float = 5,
29
- byte_deserializer: ByteDeserializer,
30
+ byte_deserializer: ByteDeserializer[Event],
30
31
  header_deserializer: KafkaHeaderSerializer,
31
32
  on_fail_strategy: KafkaOnFailStrategy,
32
33
  consume_retrier: Optional[ConsumeRetrier] = None,
@@ -53,10 +54,17 @@ class KafkaEventAsyncSubscriberExecutor(KafkaEventSubscriberExecutor):
53
54
 
54
55
  kafka_record_value = cast(bytes, kafka_poll_record.value)
55
56
 
57
+ deserialized_value = self.__byte_deserializer.deserialize(kafka_record_value)
58
+
59
+ self.__logger.info(
60
+ f"consuming the event '{deserialized_value.id}' by the subscriber '{self.__subscriber.fqn()}', "
61
+ + f"topic: '{kafka_poll_record.topic}', partition: '{kafka_poll_record.partition}', offset: '{kafka_poll_record.offset}'"
62
+ )
63
+
56
64
  await self.__consumption_callback(
57
65
  self.__subscriber,
58
66
  KafkaConsumerRecord(
59
- value=self.__byte_deserializer.deserialize(kafka_record_value),
67
+ value=deserialized_value,
60
68
  headers=self.__header_deserializer.deserialize(kafka_poll_record.headers),
61
69
  ),
62
70
  )
@@ -78,20 +86,28 @@ class KafkaEventAsyncSubscriberExecutor(KafkaEventSubscriberExecutor):
78
86
 
79
87
  async def __perform_consume(self, event: Event, subscriber: AsyncSubscriber) -> None:
80
88
  should_retry = True
89
+ number_of_executions = 0
81
90
  while should_retry is True:
82
91
  try:
92
+ number_of_executions += 1
83
93
  await subscriber.consume(event)
84
94
  return
85
95
  except Exception as exception:
86
- self.__logger.exception(
87
- f"Event {event.id} could not be consumed by the subscriber, error: {traceback.format_exc()}"
88
- )
89
-
90
96
  if self.__should_retry(event, subscriber) is True:
97
+ self.__logger.warning(
98
+ ConsumerRetryException(
99
+ number_of_executions=number_of_executions,
100
+ event_id=event.id,
101
+ subscriber_fqn=subscriber.fqn(),
102
+ ),
103
+ exc_info=exception,
104
+ )
91
105
  self.__register_retry(event, subscriber)
92
106
  await sleep(self.__seconds_between_retires)
93
107
  continue
94
108
 
109
+ self.__logger.exception(exception)
110
+
95
111
  if self.__reject_callback:
96
112
  self.__reject_callback.on_reject(event=event, subscribers=[subscriber], exception=exception)
97
113
 
@@ -4,6 +4,7 @@ import time
4
4
  from typing import Optional, Sequence, cast
5
5
  from buz.event import Event
6
6
  from buz.event.infrastructure.buz_kafka.consume_strategy.kafka_on_fail_strategy import KafkaOnFailStrategy
7
+ from buz.event.infrastructure.buz_kafka.exceptions.retry_exception import ConsumerRetryException
7
8
  from buz.event.infrastructure.buz_kafka.kafka_event_subscriber_executor import KafkaEventSubscriberExecutor
8
9
  from buz.event.middleware.consume_middleware import ConsumeMiddleware
9
10
  from buz.event.middleware.consume_middleware_chain_resolver import ConsumeMiddlewareChainResolver
@@ -25,7 +26,7 @@ class KafkaEventSyncSubscriberExecutor(KafkaEventSubscriberExecutor):
25
26
  logger: Logger,
26
27
  consume_middlewares: Optional[Sequence[ConsumeMiddleware]] = None,
27
28
  seconds_between_retries: float = 5,
28
- byte_deserializer: ByteDeserializer,
29
+ byte_deserializer: ByteDeserializer[Event],
29
30
  header_deserializer: KafkaHeaderSerializer,
30
31
  on_fail_strategy: KafkaOnFailStrategy,
31
32
  consume_retrier: Optional[ConsumeRetrier] = None,
@@ -52,12 +53,19 @@ class KafkaEventSyncSubscriberExecutor(KafkaEventSubscriberExecutor):
52
53
 
53
54
  kafka_record_value = cast(bytes, kafka_poll_record.value)
54
55
 
56
+ deserialized_value = self.__byte_deserializer.deserialize(kafka_record_value)
57
+
58
+ self.__logger.info(
59
+ f"consuming the event '{deserialized_value.id}' by the subscriber '{self.__subscriber.fqn()}', "
60
+ + f"topic: '{kafka_poll_record.topic}', partition: '{kafka_poll_record.partition}', offset: '{kafka_poll_record.offset}'"
61
+ )
62
+
55
63
  await get_running_loop().run_in_executor(
56
64
  None,
57
65
  lambda: self.__execution_callback(
58
66
  self.__subscriber,
59
67
  KafkaConsumerRecord(
60
- value=self.__byte_deserializer.deserialize(kafka_record_value),
68
+ value=deserialized_value,
61
69
  headers=self.__header_deserializer.deserialize(kafka_poll_record.headers),
62
70
  ),
63
71
  ),
@@ -75,13 +83,22 @@ class KafkaEventSyncSubscriberExecutor(KafkaEventSubscriberExecutor):
75
83
 
76
84
  def __perform_consume(self, event: Event, subscriber: Subscriber) -> None:
77
85
  should_retry = True
86
+ execution_number = 0
78
87
  while should_retry is True:
79
88
  try:
89
+ execution_number += 1
80
90
  subscriber.consume(event)
81
91
  return
82
92
  except Exception as exception:
83
- self.__logger.warning(f"Event {event.id} could not be consumed by the subscriber {subscriber.fqn}")
84
93
  if self.__should_retry(event, subscriber) is True:
94
+ self.__logger.warning(
95
+ ConsumerRetryException(
96
+ number_of_executions=execution_number,
97
+ event_id=event.id,
98
+ subscriber_fqn=subscriber.fqn(),
99
+ ),
100
+ exc_info=exception,
101
+ )
85
102
  self.__register_retry(event, subscriber)
86
103
  time.sleep(self.__seconds_between_retires)
87
104
  continue
@@ -1,5 +1,5 @@
1
1
  from abc import ABC, abstractmethod
2
- from typing import Awaitable, Union
2
+ from typing import Awaitable, Type, Union
3
3
 
4
4
  from buz import Handler
5
5
  from buz.event import Event
@@ -9,3 +9,8 @@ class MetaSubscriber(Handler, ABC):
9
9
  @abstractmethod
10
10
  def consume(self, event: Event) -> Union[None, Awaitable[None]]:
11
11
  pass
12
+
13
+ @classmethod
14
+ @abstractmethod
15
+ def handles(cls) -> Type[Event]:
16
+ pass
@@ -2,7 +2,12 @@ from __future__ import annotations
2
2
 
3
3
 
4
4
  class NotAllPartitionAssignedException(Exception):
5
- def __init__(self, topic_name: str) -> None:
5
+ def __init__(
6
+ self,
7
+ *,
8
+ topic_name: str,
9
+ consumer_group: str,
10
+ ) -> None:
6
11
  super().__init__(
7
- f'Not all the partition were assigned for the topic "{topic_name}", please disconnect the rest of subscribers'
12
+ f'Not all the partitions in the consumer group "{consumer_group}" were assigned in the topic "{topic_name}". Please disconnect the rest of subscribers'
8
13
  )
@@ -228,7 +228,10 @@ class KafkaPythonAdminClient(KafkaAdminClient):
228
228
 
229
229
  # We need all the partitions in order to update the offsets
230
230
  if len(consumer.assignment()) != len(topic_partitions):
231
- raise NotAllPartitionAssignedException(topic)
231
+ raise NotAllPartitionAssignedException(
232
+ topic_name=topic,
233
+ consumer_group=consumer_group,
234
+ )
232
235
 
233
236
  # This could produce a race condition, but it is a limitation of kafka admin (we are not able to check if all the partition are assigned using the manual assignment)
234
237
  # https://github.com/dpkp/kafka-python/blob/master/kafka/consumer/group.py#L430
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: buz
3
- Version: 2.14.1
3
+ Version: 2.14.3
4
4
  Summary: Buz is a set of light, simple and extensible implementations of event, command and query buses.
5
5
  License: MIT
6
6
  Author: Luis Pintado Lozano
@@ -22,6 +22,7 @@ Classifier: Typing :: Typed
22
22
  Provides-Extra: aiokafka
23
23
  Provides-Extra: kombu
24
24
  Provides-Extra: pypendency
25
+ Requires-Dist: aiohttp (>=3.11.13,<4.0.0)
25
26
  Requires-Dist: aiokafka[lz4] (==0.12.0) ; extra == "aiokafka"
26
27
  Requires-Dist: asgiref (>=3.8.1,<4.0.0) ; extra == "aiokafka"
27
28
  Requires-Dist: asyncio (>=3.4.3,<4.0.0) ; extra == "aiokafka"
@@ -47,9 +47,9 @@ buz/event/exceptions/worker_execution_exception.py,sha256=6mgztvXOCG_9VZ_Jptkk72
47
47
  buz/event/infrastructure/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
48
48
  buz/event/infrastructure/buz_kafka/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
49
49
  buz/event/infrastructure/buz_kafka/async_buz_kafka_event_bus.py,sha256=SyLblUVlwWOaNfZzK7vL6Ee4m-85vZVCH0rjOgqVAww,4913
50
- buz/event/infrastructure/buz_kafka/base_buz_aiokafka_async_consumer.py,sha256=E9Sy6IDZrywowcO9qIOJF5zjFvnE4CncTiZD3VC-554,13793
51
- buz/event/infrastructure/buz_kafka/buz_aiokafka_async_consumer.py,sha256=J_9NhImjlotueksFQ5mJ80Uto3BSgCJvOxJ29pzbW-U,5601
52
- buz/event/infrastructure/buz_kafka/buz_aiokafka_multi_threaded_consumer.py,sha256=ECuWXI2Es5W6mEplZ783JCGx6VvvG3VLp8TzobwuH_8,5542
50
+ buz/event/infrastructure/buz_kafka/base_buz_aiokafka_async_consumer.py,sha256=ybdytgOGUYN8ql-7wRk-zLYFK4_prZdNeb5uzKXZY7Q,15084
51
+ buz/event/infrastructure/buz_kafka/buz_aiokafka_async_consumer.py,sha256=bArqX8vE_jUOjfIzo1QGROxsachat-5n2vz013utDFA,5830
52
+ buz/event/infrastructure/buz_kafka/buz_aiokafka_multi_threaded_consumer.py,sha256=S7veewA3hIabW_ILfNoJbWbry1VmUKlhBEo6Ocj7c_k,5773
53
53
  buz/event/infrastructure/buz_kafka/buz_kafka_event_bus.py,sha256=ymRSvcYVgbVCPgHN6rMBVBHQ5heCSwCDl6EffyqGVX8,4601
54
54
  buz/event/infrastructure/buz_kafka/consume_strategy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
55
55
  buz/event/infrastructure/buz_kafka/consume_strategy/consume_strategy.py,sha256=RqlXe5W2S6rH3FTr--tcxzFJTAVLb-Dhl7m6qjgNz2M,331
@@ -57,9 +57,10 @@ buz/event/infrastructure/buz_kafka/consume_strategy/kafka_on_fail_strategy.py,sh
57
57
  buz/event/infrastructure/buz_kafka/consume_strategy/topic_and_subscription_group_per_subscriber_kafka_consumer_strategy.py,sha256=LX9mARKCLKyyo4loxegxR_s4qNP1QfabXXYp4Keusts,634
58
58
  buz/event/infrastructure/buz_kafka/exceptions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
59
59
  buz/event/infrastructure/buz_kafka/exceptions/kafka_event_bus_config_not_valid_exception.py,sha256=VUKZXA2ygjg21P4DADFl_Tace6RwSXia1MRYvJypxbM,135
60
- buz/event/infrastructure/buz_kafka/kafka_event_async_subscriber_executor.py,sha256=ULM5I35hmgUAYJobTYyCgMXYYhDvLNhRd5S_7tMUMog,5073
60
+ buz/event/infrastructure/buz_kafka/exceptions/retry_exception.py,sha256=Fq9kvI3DpFsGD3x2icmQ1fYIsuKZAFqI3tCibAuEtSQ,441
61
+ buz/event/infrastructure/buz_kafka/kafka_event_async_subscriber_executor.py,sha256=OGPuoFGKkaV0pLyBKqv_kHz-xCRFWcqHmJBGOfmJTfc,5845
61
62
  buz/event/infrastructure/buz_kafka/kafka_event_subscriber_executor.py,sha256=EyG2vsFYErWAyqxdXqSwxx5Zi_y0d6i0h05XavJMnxg,254
62
- buz/event/infrastructure/buz_kafka/kafka_event_sync_subscriber_executor.py,sha256=S24FMC4oEilO1kx5q1f-_p2Jl54ATQhINPwHeU_ZyME,4835
63
+ buz/event/infrastructure/buz_kafka/kafka_event_sync_subscriber_executor.py,sha256=S9ECzWpUQm7YKEtOFuzZMZvRVaiAMyMVfNksJ7Jno9A,5600
63
64
  buz/event/infrastructure/buz_kafka/publish_strategy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
64
65
  buz/event/infrastructure/buz_kafka/publish_strategy/publish_strategy.py,sha256=zIkgMnUU7ueG6QHEubMzdTHOtqdldIbS7k5FDLNmqVk,178
65
66
  buz/event/infrastructure/buz_kafka/publish_strategy/topic_per_event_kafka_publish_strategy.py,sha256=aLKj6GyLJNcMbuDA1QBa-RzWKBHEorBuPFkkqo_H60k,405
@@ -79,7 +80,7 @@ buz/event/infrastructure/kombu/retry_strategy/simple_publish_retry_policy.py,sha
79
80
  buz/event/infrastructure/models/consuming_task.py,sha256=GJvn6fGTN5ZQJaOuQCX17JP7SInIGvTLTk7DLoqnLQ4,302
80
81
  buz/event/infrastructure/queue/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
81
82
  buz/event/meta_base_subscriber.py,sha256=IP2Siol98OmoeCoOISiaCJxgxJG2SCrhmbAN4t01aWg,698
82
- buz/event/meta_subscriber.py,sha256=ieCOtOD2JTXizyFxisBZ4-d_4MvCmIW4BksstngV8oI,265
83
+ buz/event/meta_subscriber.py,sha256=JPhhRqHkDOBWhuqtPmseUtAoYde1OmTBViqVbLBhvME,359
83
84
  buz/event/middleware/__init__.py,sha256=1_33sdvRejCF4mHuKVkbldeJde6Y2jYtSrB5vMs0Rfo,773
84
85
  buz/event/middleware/async_consume_middleware.py,sha256=314z7ZyhvQIvi90kEO0t-FlnHSyRjArk3RqKOdDE6bM,459
85
86
  buz/event/middleware/async_consume_middleware_chain_resolver.py,sha256=Hw75JAs5pyZVDi7-nD4I1nbUXjwYpHQW9PctafGS4ks,1193
@@ -136,7 +137,7 @@ buz/event/worker.py,sha256=BL9TXB_kyr0Avql9fIcFm3CDNnXPvZB6O6BxVwjtCdA,942
136
137
  buz/handler.py,sha256=cZqV1NDPGVZQgJ3YSBDhOQ1sdJGdUopxi57yQ6fbPvc,272
137
138
  buz/kafka/__init__.py,sha256=R3fcyET-SNEAvk_XlBQbHIbQVb63Qiz6lVrif3nDhNU,3435
138
139
  buz/kafka/domain/exceptions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
139
- buz/kafka/domain/exceptions/not_all_partition_assigned_exception.py,sha256=9zDWoh0SbHLRuCvpfIGcvrmcscKsXpbAPIxr5-z-GYg,296
140
+ buz/kafka/domain/exceptions/not_all_partition_assigned_exception.py,sha256=1Ky6gDh_baD6cGB0MBnjbkkLcw2zQU_kFXPpDZn56z0,400
140
141
  buz/kafka/domain/exceptions/not_valid_kafka_message_exception.py,sha256=Dn6I_-eGQnOuu5WW24oKGOdKOu4EdM8ByH3DLAbz5SY,57
141
142
  buz/kafka/domain/exceptions/not_valid_partition_number_exception.py,sha256=YZyGbblHk6ON9sBtjRQTDa-nC88i4oe14_VSO8vSTm0,337
142
143
  buz/kafka/domain/exceptions/topic_already_created_exception.py,sha256=UrisdveZGa2BB0ko4mS7-5fwy8eGsIu409_grtq1r9k,333
@@ -181,7 +182,7 @@ buz/kafka/infrastructure/interfaces/async_connection_manager.py,sha256=JbaLu5UVV
181
182
  buz/kafka/infrastructure/interfaces/connection_manager.py,sha256=EWnvShJHOg8QYe6a3ma0urjKjmVMDBi7q8T2cv_i_MQ,200
182
183
  buz/kafka/infrastructure/kafka_python/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
183
184
  buz/kafka/infrastructure/kafka_python/exception/consumer_interrupted_exception.py,sha256=fqhgV7HILdVdv-p1CsOIaaESKY2ZXBtRGYbrVSdPLg0,164
184
- buz/kafka/infrastructure/kafka_python/kafka_python_admin_client.py,sha256=OwyDCXXMA3-yhBRVW4iRCr9M9DJQ65QBtEKx032z1rs,16140
185
+ buz/kafka/infrastructure/kafka_python/kafka_python_admin_client.py,sha256=_zTOY3ihiXESUlDj0SECEvixt9MMny0xBGzFPix0ZYM,16241
185
186
  buz/kafka/infrastructure/kafka_python/kafka_python_admin_test_client.py,sha256=5xP23dQ7FDuy7dIWNw39C3bMVmaUj9ZQhEEJISRv9ec,2986
186
187
  buz/kafka/infrastructure/kafka_python/kafka_python_producer.py,sha256=DkqqLSSXHBf4SXXf-IZwwLhxWrGE95Jg4MO_3RDsikU,3594
187
188
  buz/kafka/infrastructure/kafka_python/translators/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -244,7 +245,7 @@ buz/serializer/message_to_json_bytes_serializer.py,sha256=RGZJ64t4t4Pz2FCASZZCv-
244
245
  buz/wrapper/__init__.py,sha256=GnRdJFcncn-qp0hzDG9dBHLmTJSbHFVjE_yr-MdW_n4,77
245
246
  buz/wrapper/async_to_sync.py,sha256=OfK-vrVUhuN-LLLvekLdMbQYtH0ue5lfbvuasj6ovMI,698
246
247
  buz/wrapper/event_loop.py,sha256=pfBJ1g-8A2a3YgW8Gf9Fg0kkewoh3-wgTy2KIFDyfHk,266
247
- buz-2.14.1.dist-info/LICENSE,sha256=Jytu2S-2SPEgsB0y6BF-_LUxIWY7402fl0JSh36TLZE,1062
248
- buz-2.14.1.dist-info/METADATA,sha256=WL8Cp60N7_OJhpYuwyBLADYJ5cVohFC29oKmBRDri00,1617
249
- buz-2.14.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
250
- buz-2.14.1.dist-info/RECORD,,
248
+ buz-2.14.3.dist-info/LICENSE,sha256=Jytu2S-2SPEgsB0y6BF-_LUxIWY7402fl0JSh36TLZE,1062
249
+ buz-2.14.3.dist-info/METADATA,sha256=mIxvjnK8YV9oyOgKXUnEoF_1OfXv1DuzTguIlFX6z_g,1659
250
+ buz-2.14.3.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
251
+ buz-2.14.3.dist-info/RECORD,,
File without changes
File without changes