buz 2.14.0__py3-none-any.whl → 2.14.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,7 +4,6 @@ from typing import Optional, Sequence, Type, TypeVar
4
4
  from aiokafka.coordinator.assignors.abstract import AbstractPartitionAssignor
5
5
 
6
6
  from buz.event import Event
7
-
8
7
  from buz.event.async_subscriber import AsyncSubscriber
9
8
  from buz.event.infrastructure.buz_kafka.base_buz_aiokafka_async_consumer import BaseBuzAIOKafkaAsyncConsumer
10
9
  from buz.event.infrastructure.buz_kafka.consume_strategy.consume_strategy import KafkaConsumeStrategy
@@ -15,11 +14,11 @@ from buz.event.meta_subscriber import MetaSubscriber
15
14
  from buz.event.middleware.async_consume_middleware import AsyncConsumeMiddleware
16
15
  from buz.event.strategies.retry.consume_retrier import ConsumeRetrier
17
16
  from buz.event.strategies.retry.reject_callback import RejectCallback
18
-
19
17
  from buz.kafka.domain.models.auto_create_topic_configuration import AutoCreateTopicConfiguration
20
18
  from buz.kafka.domain.models.consumer_initial_offset_position import ConsumerInitialOffsetPosition
21
19
  from buz.kafka.domain.models.kafka_connection_config import KafkaConnectionConfig
22
20
  from buz.kafka.domain.services.kafka_admin_client import KafkaAdminClient
21
+ from buz.kafka.infrastructure.deserializers.byte_deserializer import ByteDeserializer
23
22
  from buz.kafka.infrastructure.deserializers.bytes_to_message_deserializer import BytesToMessageDeserializer
24
23
  from buz.kafka.infrastructure.deserializers.implementations.json_bytes_to_message_deserializer import (
25
24
  JSONBytesToMessageDeserializer,
@@ -84,7 +83,9 @@ class BuzAIOKafkaAsyncConsumer(BaseBuzAIOKafkaAsyncConsumer):
84
83
  f"Subscriber {subscriber.__class__.__name__} is not a subclass of Subscriber, probably you are trying to use a synchronous subscriber"
85
84
  )
86
85
 
87
- byte_deserializer = self._deserializers_per_subscriber.get(subscriber) or JSONBytesToMessageDeserializer(
86
+ byte_deserializer: ByteDeserializer[Event] = self._deserializers_per_subscriber.get(
87
+ subscriber
88
+ ) or JSONBytesToMessageDeserializer(
88
89
  # todo: it looks like in next python versions the inference engine is powerful enough to ensure this type, so we can remove it when we upgrade the python version of the library
89
90
  event_class=subscriber.handles() # type: ignore
90
91
  )
@@ -19,6 +19,7 @@ from buz.kafka.domain.models.auto_create_topic_configuration import AutoCreateTo
19
19
  from buz.kafka.domain.models.consumer_initial_offset_position import ConsumerInitialOffsetPosition
20
20
  from buz.kafka.domain.models.kafka_connection_config import KafkaConnectionConfig
21
21
  from buz.kafka.domain.services.kafka_admin_client import KafkaAdminClient
22
+ from buz.kafka.infrastructure.deserializers.byte_deserializer import ByteDeserializer
22
23
  from buz.kafka.infrastructure.deserializers.bytes_to_message_deserializer import BytesToMessageDeserializer
23
24
  from buz.kafka.infrastructure.deserializers.implementations.json_bytes_to_message_deserializer import (
24
25
  JSONBytesToMessageDeserializer,
@@ -83,7 +84,9 @@ class BuzAIOKafkaMultiThreadedConsumer(BaseBuzAIOKafkaAsyncConsumer):
83
84
  f"Subscriber {subscriber.__class__.__name__} is not a subclass of Subscriber, probably you are trying to use an asynchronous subscriber"
84
85
  )
85
86
 
86
- byte_deserializer = self._deserializers_per_subscriber.get(subscriber) or JSONBytesToMessageDeserializer(
87
+ byte_deserializer: ByteDeserializer[Event] = self._deserializers_per_subscriber.get(
88
+ subscriber
89
+ ) or JSONBytesToMessageDeserializer(
87
90
  # todo: it looks like in next python versions the inference engine is powerful enough to ensure this type, so we can remove it when we upgrade the python version of the library
88
91
  event_class=subscriber.handles() # type: ignore
89
92
  )
@@ -0,0 +1,12 @@
1
+ class ConsumerRetryException(Exception):
2
+ def __init__(
3
+ self,
4
+ *,
5
+ event_id: str,
6
+ subscriber_fqn: str,
7
+ number_of_executions: int,
8
+ ) -> None:
9
+ super().__init__(
10
+ f"An exception happened during the consumption of the event '{event_id}' by the subscriber '{subscriber_fqn}' "
11
+ + f"during execution number '{number_of_executions}'. Retrying the consumption..."
12
+ )
@@ -6,6 +6,7 @@ from typing import Optional, Sequence, cast
6
6
  from buz.event import Event
7
7
  from buz.event.async_subscriber import AsyncSubscriber
8
8
  from buz.event.infrastructure.buz_kafka.consume_strategy.kafka_on_fail_strategy import KafkaOnFailStrategy
9
+ from buz.event.infrastructure.buz_kafka.exceptions.retry_exception import ConsumerRetryException
9
10
  from buz.event.infrastructure.buz_kafka.kafka_event_subscriber_executor import KafkaEventSubscriberExecutor
10
11
  from buz.event.middleware.async_consume_middleware import AsyncConsumeMiddleware
11
12
  from buz.event.middleware.async_consume_middleware_chain_resolver import AsyncConsumeMiddlewareChainResolver
@@ -26,7 +27,7 @@ class KafkaEventAsyncSubscriberExecutor(KafkaEventSubscriberExecutor):
26
27
  logger: Logger,
27
28
  consume_middlewares: Optional[Sequence[AsyncConsumeMiddleware]] = None,
28
29
  seconds_between_retries: float = 5,
29
- byte_deserializer: ByteDeserializer,
30
+ byte_deserializer: ByteDeserializer[Event],
30
31
  header_deserializer: KafkaHeaderSerializer,
31
32
  on_fail_strategy: KafkaOnFailStrategy,
32
33
  consume_retrier: Optional[ConsumeRetrier] = None,
@@ -53,10 +54,17 @@ class KafkaEventAsyncSubscriberExecutor(KafkaEventSubscriberExecutor):
53
54
 
54
55
  kafka_record_value = cast(bytes, kafka_poll_record.value)
55
56
 
57
+ deserialized_value = self.__byte_deserializer.deserialize(kafka_record_value)
58
+
59
+ self.__logger.info(
60
+ f"consuming the event '{deserialized_value.id}' by the subscriber '{self.__subscriber.fqn()}', "
61
+ + f"topic: '{kafka_poll_record.topic}', partition: '{kafka_poll_record.partition}', offset: '{kafka_poll_record.offset}'"
62
+ )
63
+
56
64
  await self.__consumption_callback(
57
65
  self.__subscriber,
58
66
  KafkaConsumerRecord(
59
- value=self.__byte_deserializer.deserialize(kafka_record_value),
67
+ value=deserialized_value,
60
68
  headers=self.__header_deserializer.deserialize(kafka_poll_record.headers),
61
69
  ),
62
70
  )
@@ -78,20 +86,28 @@ class KafkaEventAsyncSubscriberExecutor(KafkaEventSubscriberExecutor):
78
86
 
79
87
  async def __perform_consume(self, event: Event, subscriber: AsyncSubscriber) -> None:
80
88
  should_retry = True
89
+ number_of_executions = 0
81
90
  while should_retry is True:
82
91
  try:
92
+ number_of_executions += 1
83
93
  await subscriber.consume(event)
84
94
  return
85
95
  except Exception as exception:
86
- self.__logger.exception(
87
- f"Event {event.id} could not be consumed by the subscriber, error: {traceback.format_exc()}"
88
- )
89
-
90
96
  if self.__should_retry(event, subscriber) is True:
97
+ self.__logger.warning(
98
+ ConsumerRetryException(
99
+ number_of_executions=number_of_executions,
100
+ event_id=event.id,
101
+ subscriber_fqn=subscriber.fqn(),
102
+ ),
103
+ exc_info=exception,
104
+ )
91
105
  self.__register_retry(event, subscriber)
92
106
  await sleep(self.__seconds_between_retires)
93
107
  continue
94
108
 
109
+ self.__logger.exception(exception)
110
+
95
111
  if self.__reject_callback:
96
112
  self.__reject_callback.on_reject(event=event, subscribers=[subscriber], exception=exception)
97
113
 
@@ -4,6 +4,7 @@ import time
4
4
  from typing import Optional, Sequence, cast
5
5
  from buz.event import Event
6
6
  from buz.event.infrastructure.buz_kafka.consume_strategy.kafka_on_fail_strategy import KafkaOnFailStrategy
7
+ from buz.event.infrastructure.buz_kafka.exceptions.retry_exception import ConsumerRetryException
7
8
  from buz.event.infrastructure.buz_kafka.kafka_event_subscriber_executor import KafkaEventSubscriberExecutor
8
9
  from buz.event.middleware.consume_middleware import ConsumeMiddleware
9
10
  from buz.event.middleware.consume_middleware_chain_resolver import ConsumeMiddlewareChainResolver
@@ -25,7 +26,7 @@ class KafkaEventSyncSubscriberExecutor(KafkaEventSubscriberExecutor):
25
26
  logger: Logger,
26
27
  consume_middlewares: Optional[Sequence[ConsumeMiddleware]] = None,
27
28
  seconds_between_retries: float = 5,
28
- byte_deserializer: ByteDeserializer,
29
+ byte_deserializer: ByteDeserializer[Event],
29
30
  header_deserializer: KafkaHeaderSerializer,
30
31
  on_fail_strategy: KafkaOnFailStrategy,
31
32
  consume_retrier: Optional[ConsumeRetrier] = None,
@@ -52,12 +53,19 @@ class KafkaEventSyncSubscriberExecutor(KafkaEventSubscriberExecutor):
52
53
 
53
54
  kafka_record_value = cast(bytes, kafka_poll_record.value)
54
55
 
56
+ deserialized_value = self.__byte_deserializer.deserialize(kafka_record_value)
57
+
58
+ self.__logger.info(
59
+ f"consuming the event '{deserialized_value.id}' by the subscriber '{self.__subscriber.fqn()}', "
60
+ + f"topic: '{kafka_poll_record.topic}', partition: '{kafka_poll_record.partition}', offset: '{kafka_poll_record.offset}'"
61
+ )
62
+
55
63
  await get_running_loop().run_in_executor(
56
64
  None,
57
65
  lambda: self.__execution_callback(
58
66
  self.__subscriber,
59
67
  KafkaConsumerRecord(
60
- value=self.__byte_deserializer.deserialize(kafka_record_value),
68
+ value=deserialized_value,
61
69
  headers=self.__header_deserializer.deserialize(kafka_poll_record.headers),
62
70
  ),
63
71
  ),
@@ -75,13 +83,22 @@ class KafkaEventSyncSubscriberExecutor(KafkaEventSubscriberExecutor):
75
83
 
76
84
  def __perform_consume(self, event: Event, subscriber: Subscriber) -> None:
77
85
  should_retry = True
86
+ execution_number = 0
78
87
  while should_retry is True:
79
88
  try:
89
+ execution_number += 1
80
90
  subscriber.consume(event)
81
91
  return
82
92
  except Exception as exception:
83
- self.__logger.warning(f"Event {event.id} could not be consumed by the subscriber {subscriber.fqn}")
84
93
  if self.__should_retry(event, subscriber) is True:
94
+ self.__logger.warning(
95
+ ConsumerRetryException(
96
+ number_of_executions=execution_number,
97
+ event_id=event.id,
98
+ subscriber_fqn=subscriber.fqn(),
99
+ ),
100
+ exc_info=exception,
101
+ )
85
102
  self.__register_retry(event, subscriber)
86
103
  time.sleep(self.__seconds_between_retires)
87
104
  continue
@@ -1,5 +1,5 @@
1
1
  from abc import ABC, abstractmethod
2
- from typing import Awaitable, Union
2
+ from typing import Awaitable, Type, Union
3
3
 
4
4
  from buz import Handler
5
5
  from buz.event import Event
@@ -9,3 +9,8 @@ class MetaSubscriber(Handler, ABC):
9
9
  @abstractmethod
10
10
  def consume(self, event: Event) -> Union[None, Awaitable[None]]:
11
11
  pass
12
+
13
+ @classmethod
14
+ @abstractmethod
15
+ def handles(cls) -> Type[Event]:
16
+ pass
@@ -2,7 +2,12 @@ from __future__ import annotations
2
2
 
3
3
 
4
4
  class NotAllPartitionAssignedException(Exception):
5
- def __init__(self, topic_name: str) -> None:
5
+ def __init__(
6
+ self,
7
+ *,
8
+ topic_name: str,
9
+ consumer_group: str,
10
+ ) -> None:
6
11
  super().__init__(
7
- f'Not all the partition were assigned for the topic "{topic_name}", please disconnect the rest of subscribers'
12
+ f'Not all the partitions in the consumer group "{consumer_group}" were assigned in the topic "{topic_name}". Please disconnect the rest of subscribers'
8
13
  )
@@ -0,0 +1,10 @@
1
+ class NotValidPartitionNumberException(Exception):
2
+ def __init__(
3
+ self,
4
+ *,
5
+ partition_number: int,
6
+ min_partition_number: int,
7
+ ) -> None:
8
+ super().__init__(
9
+ f'"{partition_number}" is not a valid partition number, the minimum partition number is "{min_partition_number}"'
10
+ )
@@ -40,6 +40,10 @@ class KafkaAdminClient(ConnectionManager, ABC):
40
40
  ) -> set[str]:
41
41
  pass
42
42
 
43
+ @abstractmethod
44
+ def get_number_of_partitions(self, topic: str) -> int:
45
+ pass
46
+
43
47
  # This function moves the following offset from the provided date
44
48
  # if there are no messages with a date greater than the provided offset
45
49
  # the offset will be moved to the end
@@ -52,3 +56,12 @@ class KafkaAdminClient(ConnectionManager, ABC):
52
56
  target_datetime: datetime,
53
57
  ) -> None:
54
58
  pass
59
+
60
+ @abstractmethod
61
+ def increase_topic_partitions_and_set_offset_of_related_consumer_groups_to_the_beginning_of_the_new_ones(
62
+ self,
63
+ *,
64
+ topic: str,
65
+ new_number_of_partitions: int,
66
+ ) -> None:
67
+ pass
@@ -8,10 +8,12 @@ from typing import Any, Callable, Optional, Sequence, cast
8
8
  from cachetools import TTLCache
9
9
  from kafka import KafkaClient, KafkaConsumer
10
10
  from kafka.admin import KafkaAdminClient as KafkaPythonLibraryAdminClient, NewTopic
11
+ from kafka.admin.new_partitions import NewPartitions
11
12
  from kafka.errors import TopicAlreadyExistsError
12
13
  from kafka.structs import TopicPartition, OffsetAndTimestamp
13
14
 
14
15
  from buz.kafka.domain.exceptions.not_all_partition_assigned_exception import NotAllPartitionAssignedException
16
+ from buz.kafka.domain.exceptions.not_valid_partition_number_exception import NotValidPartitionNumberException
15
17
  from buz.kafka.domain.exceptions.topic_already_created_exception import KafkaTopicsAlreadyCreatedException
16
18
  from buz.kafka.domain.exceptions.topic_not_found_exception import TopicNotFoundException
17
19
  from buz.kafka.domain.models.consumer_initial_offset_position import ConsumerInitialOffsetPosition
@@ -150,10 +152,10 @@ class KafkaPythonAdminClient(KafkaAdminClient):
150
152
  group_ids=subscription_groups,
151
153
  )
152
154
 
153
- def get_subscription_groups(
155
+ def get_cluster_consumer_groups(
154
156
  self,
155
157
  ) -> set[str]:
156
- return set(self._get_kafka_admin().list_consumer_groups())
158
+ return set([consumer_group_tuple[0] for consumer_group_tuple in self._get_kafka_admin().list_consumer_groups()])
157
159
 
158
160
  def _wait_for_cluster_update(self) -> None:
159
161
  future = self._get_kafka_client().cluster.request_update()
@@ -166,6 +168,46 @@ class KafkaPythonAdminClient(KafkaAdminClient):
166
168
  topic: str,
167
169
  target_datetime: datetime,
168
170
  ) -> None:
171
+ (consumer, topic_partitions) = self.__get_consumer_with_all_partitions_assigned(
172
+ consumer_group=consumer_group,
173
+ topic=topic,
174
+ )
175
+
176
+ offsets_for_date = self.__get_first_offset_after_date(
177
+ consumer=consumer,
178
+ topic_partitions=topic_partitions,
179
+ target_datetime=target_datetime,
180
+ )
181
+
182
+ try:
183
+ end_offsets = consumer.end_offsets(topic_partitions)
184
+
185
+ if end_offsets is None or len(end_offsets.keys()) != len(topic_partitions):
186
+ raise Exception(f'There was an error extracting the end offsets of the topic "{topic}"')
187
+
188
+ for topic_partition in topic_partitions:
189
+ offset_and_timestamp = offsets_for_date.get(topic_partition)
190
+ if offset_and_timestamp:
191
+ self._logger.info(f'moving "{topic_partition}" to the offset "{offset_and_timestamp.offset}"')
192
+ consumer.seek(topic_partition, offset_and_timestamp.offset)
193
+ else:
194
+ self._logger.info(
195
+ f'moving "{topic_partition}" to the end of the topic because there are no messages later than "{target_datetime}"'
196
+ )
197
+ consumer.seek(topic_partition, end_offsets[topic_partition])
198
+
199
+ consumer.commit()
200
+ except Exception as exception:
201
+ consumer.close()
202
+ raise exception
203
+
204
+ consumer.close()
205
+
206
+ def __get_consumer_with_all_partitions_assigned(
207
+ self,
208
+ consumer_group: str,
209
+ topic: str,
210
+ ) -> tuple[KafkaConsumer, Sequence[TopicPartition]]:
169
211
  consumer = KafkaConsumer(
170
212
  group_id=consumer_group,
171
213
  enable_auto_commit=False,
@@ -175,44 +217,32 @@ class KafkaPythonAdminClient(KafkaAdminClient):
175
217
  **self._config_in_library_format,
176
218
  )
177
219
 
178
- partitions = consumer.partitions_for_topic(topic)
179
-
180
- if partitions is None:
181
- raise TopicNotFoundException(topic)
220
+ try:
221
+ partitions = self.get_number_of_partitions(topic)
182
222
 
183
- topic_partitions = [TopicPartition(topic, p) for p in partitions]
184
- consumer.subscribe(topics=[topic])
223
+ topic_partitions = [TopicPartition(topic=topic, partition=partition) for partition in range(partitions)]
185
224
 
186
- self.__force_partition_assignment(consumer)
225
+ consumer.subscribe(topic)
187
226
 
188
- # We need all the partitions in order to update the offsets
189
- if len(consumer.assignment()) != len(topic_partitions):
190
- raise NotAllPartitionAssignedException(topic)
227
+ self.__force_partition_assignment(consumer)
191
228
 
192
- offsets_for_date = self.__get_first_offset_after_date(
193
- consumer=consumer,
194
- topic_partitions=topic_partitions,
195
- target_datetime=target_datetime,
196
- )
197
-
198
- end_offsets = consumer.end_offsets(topic_partitions)
199
-
200
- if end_offsets is None or len(end_offsets.keys()) != len(topic_partitions):
201
- raise Exception(f'There was an error extracting the end offsets of the topic "{topic}"')
202
-
203
- for topic_partition in topic_partitions:
204
- offset_and_timestamp = offsets_for_date.get(topic_partition)
205
- if offset_and_timestamp:
206
- self._logger.info(f'moving "{topic_partition}" to the offset "{offset_and_timestamp.offset}"')
207
- consumer.seek(topic_partition, offset_and_timestamp.offset)
208
- else:
209
- self._logger.info(
210
- f'moving "{topic_partition}" to the end of the topic because there are no messages later than "{target_datetime}"'
229
+ # We need all the partitions in order to update the offsets
230
+ if len(consumer.assignment()) != len(topic_partitions):
231
+ raise NotAllPartitionAssignedException(
232
+ topic_name=topic,
233
+ consumer_group=consumer_group,
211
234
  )
212
- consumer.seek(topic_partition, end_offsets[topic_partition])
213
235
 
214
- consumer.commit()
215
- consumer.close()
236
+ # This could produce a race condition, but it is a limitation of kafka admin (we are not able to check if all the partition are assigned using the manual assignment)
237
+ # https://github.com/dpkp/kafka-python/blob/master/kafka/consumer/group.py#L430
238
+ consumer.unsubscribe()
239
+ consumer.assign(topic_partitions)
240
+ self.__force_partition_assignment(consumer)
241
+
242
+ return (consumer, topic_partitions)
243
+ except Exception as exception:
244
+ consumer.close()
245
+ raise exception
216
246
 
217
247
  def __get_first_offset_after_date(
218
248
  self,
@@ -235,3 +265,138 @@ class KafkaPythonAdminClient(KafkaAdminClient):
235
265
  # We are not to commit the new offset, but we need to execute a polling in order to start the partition assignment
236
266
  def __force_partition_assignment(self, consumer: KafkaConsumer) -> None:
237
267
  consumer.poll(max_records=1, timeout_ms=0)
268
+
269
+ def increase_topic_partitions_and_set_offset_of_related_consumer_groups_to_the_beginning_of_the_new_ones(
270
+ self,
271
+ *,
272
+ topic: str,
273
+ new_number_of_partitions: int,
274
+ ) -> None:
275
+ self._logger.info(
276
+ f'Increasing topic "{topic}" partitions: Verifying the new number of partitions "{new_number_of_partitions}"'
277
+ )
278
+
279
+ previous_partitions_number = self.get_number_of_partitions(topic)
280
+ topic_partitions = [
281
+ TopicPartition(topic=topic, partition=partition) for partition in range(previous_partitions_number)
282
+ ]
283
+
284
+ if previous_partitions_number >= new_number_of_partitions:
285
+ raise NotValidPartitionNumberException(
286
+ partition_number=new_number_of_partitions,
287
+ min_partition_number=len(topic_partitions),
288
+ )
289
+
290
+ self._logger.info(f'Increasing topic "{topic}" partitions: Extracting related consumer groups')
291
+ related_consumer_groups = self.__get_consumer_groups_related_to_a_topic(topic_partitions)
292
+
293
+ self._logger.info(
294
+ f'Increasing topic "{topic}" partitions: The following consumer groups will be updated:"{related_consumer_groups}"'
295
+ )
296
+
297
+ consumers_to_update: list[KafkaConsumer] = []
298
+ new_partitions_consumer: Optional[KafkaConsumer] = None
299
+
300
+ try:
301
+ for consumer_group in related_consumer_groups:
302
+ self._logger.info(
303
+ f'Increasing topic "{topic}" partitions: Requesting the assignment of the partitions of the group "{consumer_group}"'
304
+ )
305
+ (consumer_with_all_partitions, _) = self.__get_consumer_with_all_partitions_assigned(
306
+ consumer_group=consumer_group,
307
+ topic=topic,
308
+ )
309
+ consumers_to_update.append(consumer_with_all_partitions)
310
+
311
+ self._logger.info(
312
+ f'Increasing topic "{topic}" partitions: Incrementing the partition to "{new_number_of_partitions}"'
313
+ )
314
+
315
+ self._get_kafka_admin().create_partitions(
316
+ {
317
+ topic: NewPartitions(total_count=new_number_of_partitions),
318
+ }
319
+ )
320
+
321
+ new_partitions = [
322
+ TopicPartition(
323
+ topic=topic,
324
+ partition=partition_index,
325
+ )
326
+ for partition_index in range(previous_partitions_number, new_number_of_partitions)
327
+ ]
328
+
329
+ for consumer_group in related_consumer_groups:
330
+ self._logger.info(
331
+ f'Increasing topic "{topic}" partitions: Moving the offset of the consumer group "{consumer_group}" to the beginning of the new partitions'
332
+ )
333
+ # We need to create a new consumer because kafka-python has a limitation that does not allow to assign specific partitions to a consumer subscribed to an entire topic
334
+ new_partitions_consumer = KafkaConsumer(
335
+ group_id=consumer_group,
336
+ enable_auto_commit=False,
337
+ auto_offset_reset=KafkaPythonConsumerInitialOffsetPositionTranslator.to_kafka_supported_format(
338
+ ConsumerInitialOffsetPosition.BEGINNING
339
+ ),
340
+ **self._config_in_library_format,
341
+ )
342
+ new_partitions_consumer.assign(new_partitions)
343
+ for new_partition in new_partitions:
344
+ new_partitions_consumer.seek(new_partition, 0)
345
+ new_partitions_consumer.commit()
346
+ new_partitions_consumer.close()
347
+
348
+ self._logger.info(f'Increasing topic "{topic}" partitions: Process complete')
349
+
350
+ except Exception as exception:
351
+ for consumer_with_all_partitions in consumers_to_update:
352
+ consumer_with_all_partitions.close()
353
+
354
+ if new_partitions_consumer is not None:
355
+ new_partitions_consumer.close()
356
+
357
+ self._logger.error(f'Increasing topic "{topic}" partitions: unexpected error {exception}')
358
+ raise exception
359
+
360
+ return
361
+
362
+ def get_number_of_partitions(self, topic: str) -> int:
363
+ consumer = KafkaConsumer(
364
+ enable_auto_commit=False,
365
+ auto_offset_reset=KafkaPythonConsumerInitialOffsetPositionTranslator.to_kafka_supported_format(
366
+ ConsumerInitialOffsetPosition.BEGINNING
367
+ ),
368
+ **self._config_in_library_format,
369
+ )
370
+
371
+ try:
372
+ partitions = consumer.partitions_for_topic(topic)
373
+ if partitions is None:
374
+ raise TopicNotFoundException(topic_name=topic)
375
+
376
+ return len(partitions)
377
+ except Exception as exception:
378
+ consumer.close()
379
+ raise exception
380
+
381
+ # The purpose of this function is to get all the consumer groups that are consuming from the topic
382
+ # It is a heavy tasks because we need to get the offset of all the partitions of the topic
383
+ def __get_consumer_groups_related_to_a_topic(self, topic_partitions: Sequence[TopicPartition]) -> set[str]:
384
+ cluster_consumer_groups = self.get_cluster_consumer_groups()
385
+
386
+ related_consumer_groups: set[str] = set()
387
+
388
+ for consumer_group in cluster_consumer_groups:
389
+ partitions_offsets = list(
390
+ self._get_kafka_admin()
391
+ .list_consumer_group_offsets(consumer_group, partitions=topic_partitions)
392
+ .values()
393
+ )
394
+
395
+ partitions_with_valid_offsets = [partition for partition in partitions_offsets if partition.offset != -1]
396
+
397
+ if len(partitions_with_valid_offsets) == 0:
398
+ continue
399
+
400
+ related_consumer_groups.add(consumer_group)
401
+
402
+ return related_consumer_groups
@@ -88,5 +88,5 @@ class KafkaPythonAdminTestClient(KafkaPythonAdminClient, KafkaAdminTestClient):
88
88
  self,
89
89
  ) -> None:
90
90
  self.delete_topics(topics=self.get_topics())
91
- self.delete_subscription_groups(subscription_groups=self.get_subscription_groups())
91
+ self.delete_subscription_groups(subscription_groups=self.get_cluster_consumer_groups())
92
92
  self._wait_for_cluster_update()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: buz
3
- Version: 2.14.0
3
+ Version: 2.14.2
4
4
  Summary: Buz is a set of light, simple and extensible implementations of event, command and query buses.
5
5
  License: MIT
6
6
  Author: Luis Pintado Lozano
@@ -48,8 +48,8 @@ buz/event/infrastructure/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
48
48
  buz/event/infrastructure/buz_kafka/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
49
49
  buz/event/infrastructure/buz_kafka/async_buz_kafka_event_bus.py,sha256=SyLblUVlwWOaNfZzK7vL6Ee4m-85vZVCH0rjOgqVAww,4913
50
50
  buz/event/infrastructure/buz_kafka/base_buz_aiokafka_async_consumer.py,sha256=E9Sy6IDZrywowcO9qIOJF5zjFvnE4CncTiZD3VC-554,13793
51
- buz/event/infrastructure/buz_kafka/buz_aiokafka_async_consumer.py,sha256=J_9NhImjlotueksFQ5mJ80Uto3BSgCJvOxJ29pzbW-U,5601
52
- buz/event/infrastructure/buz_kafka/buz_aiokafka_multi_threaded_consumer.py,sha256=ECuWXI2Es5W6mEplZ783JCGx6VvvG3VLp8TzobwuH_8,5542
51
+ buz/event/infrastructure/buz_kafka/buz_aiokafka_async_consumer.py,sha256=u69_YYDsztq4cZDKQPo4x8FPIx-NRzHJe1SCq0GaCjg,5732
52
+ buz/event/infrastructure/buz_kafka/buz_aiokafka_multi_threaded_consumer.py,sha256=Si7bqWfsKqOjH6PIxnqtOgO_fSviVJbt2G9avv2DgiM,5675
53
53
  buz/event/infrastructure/buz_kafka/buz_kafka_event_bus.py,sha256=ymRSvcYVgbVCPgHN6rMBVBHQ5heCSwCDl6EffyqGVX8,4601
54
54
  buz/event/infrastructure/buz_kafka/consume_strategy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
55
55
  buz/event/infrastructure/buz_kafka/consume_strategy/consume_strategy.py,sha256=RqlXe5W2S6rH3FTr--tcxzFJTAVLb-Dhl7m6qjgNz2M,331
@@ -57,9 +57,10 @@ buz/event/infrastructure/buz_kafka/consume_strategy/kafka_on_fail_strategy.py,sh
57
57
  buz/event/infrastructure/buz_kafka/consume_strategy/topic_and_subscription_group_per_subscriber_kafka_consumer_strategy.py,sha256=LX9mARKCLKyyo4loxegxR_s4qNP1QfabXXYp4Keusts,634
58
58
  buz/event/infrastructure/buz_kafka/exceptions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
59
59
  buz/event/infrastructure/buz_kafka/exceptions/kafka_event_bus_config_not_valid_exception.py,sha256=VUKZXA2ygjg21P4DADFl_Tace6RwSXia1MRYvJypxbM,135
60
- buz/event/infrastructure/buz_kafka/kafka_event_async_subscriber_executor.py,sha256=ULM5I35hmgUAYJobTYyCgMXYYhDvLNhRd5S_7tMUMog,5073
60
+ buz/event/infrastructure/buz_kafka/exceptions/retry_exception.py,sha256=Fq9kvI3DpFsGD3x2icmQ1fYIsuKZAFqI3tCibAuEtSQ,441
61
+ buz/event/infrastructure/buz_kafka/kafka_event_async_subscriber_executor.py,sha256=OGPuoFGKkaV0pLyBKqv_kHz-xCRFWcqHmJBGOfmJTfc,5845
61
62
  buz/event/infrastructure/buz_kafka/kafka_event_subscriber_executor.py,sha256=EyG2vsFYErWAyqxdXqSwxx5Zi_y0d6i0h05XavJMnxg,254
62
- buz/event/infrastructure/buz_kafka/kafka_event_sync_subscriber_executor.py,sha256=S24FMC4oEilO1kx5q1f-_p2Jl54ATQhINPwHeU_ZyME,4835
63
+ buz/event/infrastructure/buz_kafka/kafka_event_sync_subscriber_executor.py,sha256=S9ECzWpUQm7YKEtOFuzZMZvRVaiAMyMVfNksJ7Jno9A,5600
63
64
  buz/event/infrastructure/buz_kafka/publish_strategy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
64
65
  buz/event/infrastructure/buz_kafka/publish_strategy/publish_strategy.py,sha256=zIkgMnUU7ueG6QHEubMzdTHOtqdldIbS7k5FDLNmqVk,178
65
66
  buz/event/infrastructure/buz_kafka/publish_strategy/topic_per_event_kafka_publish_strategy.py,sha256=aLKj6GyLJNcMbuDA1QBa-RzWKBHEorBuPFkkqo_H60k,405
@@ -79,7 +80,7 @@ buz/event/infrastructure/kombu/retry_strategy/simple_publish_retry_policy.py,sha
79
80
  buz/event/infrastructure/models/consuming_task.py,sha256=GJvn6fGTN5ZQJaOuQCX17JP7SInIGvTLTk7DLoqnLQ4,302
80
81
  buz/event/infrastructure/queue/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
81
82
  buz/event/meta_base_subscriber.py,sha256=IP2Siol98OmoeCoOISiaCJxgxJG2SCrhmbAN4t01aWg,698
82
- buz/event/meta_subscriber.py,sha256=ieCOtOD2JTXizyFxisBZ4-d_4MvCmIW4BksstngV8oI,265
83
+ buz/event/meta_subscriber.py,sha256=JPhhRqHkDOBWhuqtPmseUtAoYde1OmTBViqVbLBhvME,359
83
84
  buz/event/middleware/__init__.py,sha256=1_33sdvRejCF4mHuKVkbldeJde6Y2jYtSrB5vMs0Rfo,773
84
85
  buz/event/middleware/async_consume_middleware.py,sha256=314z7ZyhvQIvi90kEO0t-FlnHSyRjArk3RqKOdDE6bM,459
85
86
  buz/event/middleware/async_consume_middleware_chain_resolver.py,sha256=Hw75JAs5pyZVDi7-nD4I1nbUXjwYpHQW9PctafGS4ks,1193
@@ -136,8 +137,9 @@ buz/event/worker.py,sha256=BL9TXB_kyr0Avql9fIcFm3CDNnXPvZB6O6BxVwjtCdA,942
136
137
  buz/handler.py,sha256=cZqV1NDPGVZQgJ3YSBDhOQ1sdJGdUopxi57yQ6fbPvc,272
137
138
  buz/kafka/__init__.py,sha256=R3fcyET-SNEAvk_XlBQbHIbQVb63Qiz6lVrif3nDhNU,3435
138
139
  buz/kafka/domain/exceptions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
139
- buz/kafka/domain/exceptions/not_all_partition_assigned_exception.py,sha256=9zDWoh0SbHLRuCvpfIGcvrmcscKsXpbAPIxr5-z-GYg,296
140
+ buz/kafka/domain/exceptions/not_all_partition_assigned_exception.py,sha256=1Ky6gDh_baD6cGB0MBnjbkkLcw2zQU_kFXPpDZn56z0,400
140
141
  buz/kafka/domain/exceptions/not_valid_kafka_message_exception.py,sha256=Dn6I_-eGQnOuu5WW24oKGOdKOu4EdM8ByH3DLAbz5SY,57
142
+ buz/kafka/domain/exceptions/not_valid_partition_number_exception.py,sha256=YZyGbblHk6ON9sBtjRQTDa-nC88i4oe14_VSO8vSTm0,337
141
143
  buz/kafka/domain/exceptions/topic_already_created_exception.py,sha256=UrisdveZGa2BB0ko4mS7-5fwy8eGsIu409_grtq1r9k,333
142
144
  buz/kafka/domain/exceptions/topic_not_found_exception.py,sha256=kLuqGqfsb6YTCe5UCKpMwBm_QAnU9Udfb8bWajPoA8k,201
143
145
  buz/kafka/domain/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -155,7 +157,7 @@ buz/kafka/domain/models/kafka_supported_sasl_mechanisms.py,sha256=ASyDaFgseQRcUJ
155
157
  buz/kafka/domain/models/kafka_supported_security_protocols.py,sha256=ffY2-9sOj4XIkJTSQVkqeOb4KnuqEYXISDarfDN8r9Q,161
156
158
  buz/kafka/domain/services/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
157
159
  buz/kafka/domain/services/async_kafka_producer.py,sha256=gSq3WwEVux_gp3EKDAMN1WsM027uklB58E-WnKpyhPs,533
158
- buz/kafka/domain/services/kafka_admin_client.py,sha256=XE6H-4JWsjygKjdtLtGMX5ELhWkJcpR7ai9CO2kB98Y,1222
160
+ buz/kafka/domain/services/kafka_admin_client.py,sha256=Kh_w-qWEY8rsrlYjnJT1FrJLVZrO3l2LeRAWFyc_nOg,1558
159
161
  buz/kafka/domain/services/kafka_admin_test_client.py,sha256=91l_vFIo1yhJLQQCC_OmeXZ5F429zP7Hx5g4FNllpfE,1625
160
162
  buz/kafka/domain/services/kafka_producer.py,sha256=8bLTV328orrPHcARzkc6no4vyJzrArVtCsjmSRXDjos,506
161
163
  buz/kafka/infrastructure/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -180,8 +182,8 @@ buz/kafka/infrastructure/interfaces/async_connection_manager.py,sha256=JbaLu5UVV
180
182
  buz/kafka/infrastructure/interfaces/connection_manager.py,sha256=EWnvShJHOg8QYe6a3ma0urjKjmVMDBi7q8T2cv_i_MQ,200
181
183
  buz/kafka/infrastructure/kafka_python/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
182
184
  buz/kafka/infrastructure/kafka_python/exception/consumer_interrupted_exception.py,sha256=fqhgV7HILdVdv-p1CsOIaaESKY2ZXBtRGYbrVSdPLg0,164
183
- buz/kafka/infrastructure/kafka_python/kafka_python_admin_client.py,sha256=7HbwkaoedXbxZ4LcGWytU2q_GTdLm_c1ziyGTBoKgF0,9038
184
- buz/kafka/infrastructure/kafka_python/kafka_python_admin_test_client.py,sha256=wLhnrHzyFJ9ETWNUfd-dmwm_CwZyiImaAP97cEdRgzE,2982
185
+ buz/kafka/infrastructure/kafka_python/kafka_python_admin_client.py,sha256=_zTOY3ihiXESUlDj0SECEvixt9MMny0xBGzFPix0ZYM,16241
186
+ buz/kafka/infrastructure/kafka_python/kafka_python_admin_test_client.py,sha256=5xP23dQ7FDuy7dIWNw39C3bMVmaUj9ZQhEEJISRv9ec,2986
185
187
  buz/kafka/infrastructure/kafka_python/kafka_python_producer.py,sha256=DkqqLSSXHBf4SXXf-IZwwLhxWrGE95Jg4MO_3RDsikU,3594
186
188
  buz/kafka/infrastructure/kafka_python/translators/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
187
189
  buz/kafka/infrastructure/kafka_python/translators/consumer_initial_offset_position_translator.py,sha256=hJ48_eyMcnbFL_Y5TOiMbGXrQSryuKk9CvP59MdqNOY,620
@@ -243,7 +245,7 @@ buz/serializer/message_to_json_bytes_serializer.py,sha256=RGZJ64t4t4Pz2FCASZZCv-
243
245
  buz/wrapper/__init__.py,sha256=GnRdJFcncn-qp0hzDG9dBHLmTJSbHFVjE_yr-MdW_n4,77
244
246
  buz/wrapper/async_to_sync.py,sha256=OfK-vrVUhuN-LLLvekLdMbQYtH0ue5lfbvuasj6ovMI,698
245
247
  buz/wrapper/event_loop.py,sha256=pfBJ1g-8A2a3YgW8Gf9Fg0kkewoh3-wgTy2KIFDyfHk,266
246
- buz-2.14.0.dist-info/LICENSE,sha256=Jytu2S-2SPEgsB0y6BF-_LUxIWY7402fl0JSh36TLZE,1062
247
- buz-2.14.0.dist-info/METADATA,sha256=f8tigTI_Ee1HBkPHTPfsYHjkOyuBZwlCX6dTeDpjzCQ,1617
248
- buz-2.14.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
249
- buz-2.14.0.dist-info/RECORD,,
248
+ buz-2.14.2.dist-info/LICENSE,sha256=Jytu2S-2SPEgsB0y6BF-_LUxIWY7402fl0JSh36TLZE,1062
249
+ buz-2.14.2.dist-info/METADATA,sha256=LIxzwAWa6gNGKmz32C5UzoI6hMr-g1lc9AWIEoguWno,1617
250
+ buz-2.14.2.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
251
+ buz-2.14.2.dist-info/RECORD,,
File without changes
File without changes