buz 2.18.0__py3-none-any.whl → 2.20.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -66,6 +66,7 @@ class BaseBuzAIOKafkaAsyncConsumer(AsyncConsumer):
66
66
  heartbeat_interval_ms: Optional[int] = None,
67
67
  wait_for_connection_to_cluster_ms: Optional[int] = None,
68
68
  worker_instance_id: Optional[str] = None,
69
+ milliseconds_between_retries: int = 5000,
69
70
  ):
70
71
  self.__connection_config = connection_config
71
72
  self.__consume_strategy = consume_strategy
@@ -106,6 +107,7 @@ class BaseBuzAIOKafkaAsyncConsumer(AsyncConsumer):
106
107
  )
107
108
  self.__wait_for_connection_to_cluster_ms: Optional[int] = wait_for_connection_to_cluster_ms
108
109
  self.__worker_instance_id: Optional[str] = worker_instance_id
110
+ self.__milliseconds_between_retries: int = milliseconds_between_retries
109
111
  self.__polling_tasks_semaphore = Semaphore(self.__max_number_of_concurrent_polling_tasks)
110
112
  self.__consumer_and_partition_mutex: dict[str, Lock] = defaultdict(Lock)
111
113
  self.__is_worked_initialized = False
@@ -167,6 +169,7 @@ class BaseBuzAIOKafkaAsyncConsumer(AsyncConsumer):
167
169
  f" - Seconds between polls if there are no new tasks: {self.__seconds_between_polls_if_there_are_no_new_tasks}\n"
168
170
  f" - Max number of concurrent polling tasks: {self.__max_number_of_concurrent_polling_tasks}\n"
169
171
  f" - Wait for connection to cluster ms: {self.__wait_for_connection_to_cluster_ms}\n"
172
+ f" - Milliseconds between retries: {self.__milliseconds_between_retries}ms ({self.__milliseconds_between_retries / 1000.0}s)\n"
170
173
  f" - Health check port: {self.__health_check_port}\n"
171
174
  f" - Number of subscribers: {len(self.__subscribers)}",
172
175
  )
@@ -63,6 +63,7 @@ class BuzAIOKafkaAsyncConsumer(BaseBuzAIOKafkaAsyncConsumer):
63
63
  heartbeat_interval_ms: Optional[int] = None,
64
64
  health_check_port: Optional[int] = None,
65
65
  wait_for_connection_to_cluster_ms: Optional[int] = None,
66
+ milliseconds_between_retries: int = 5000,
66
67
  ):
67
68
  super().__init__(
68
69
  connection_config=connection_config,
@@ -85,12 +86,14 @@ class BuzAIOKafkaAsyncConsumer(BaseBuzAIOKafkaAsyncConsumer):
85
86
  health_check_port=health_check_port,
86
87
  wait_for_connection_to_cluster_ms=wait_for_connection_to_cluster_ms,
87
88
  worker_instance_id=worker_instance_id,
89
+ milliseconds_between_retries=milliseconds_between_retries,
88
90
  )
89
91
  self.__on_fail_strategy = on_fail_strategy
90
92
  self.__consume_middlewares = consume_middlewares
91
93
  self.__consume_retrier = consume_retrier
92
94
  self.__reject_callback = reject_callback
93
95
  self._deserializers_per_subscriber = deserializers_per_subscriber
96
+ self.__milliseconds_between_retries = milliseconds_between_retries
94
97
 
95
98
  async def _create_kafka_consumer_executor(
96
99
  self,
@@ -115,12 +118,13 @@ class BuzAIOKafkaAsyncConsumer(BaseBuzAIOKafkaAsyncConsumer):
115
118
  )
116
119
 
117
120
  return KafkaEventAsyncSubscriberExecutor(
121
+ subscriber=subscriber,
118
122
  logger=self._logger,
123
+ consume_middlewares=self.__consume_middlewares,
124
+ milliseconds_between_retries=self.__milliseconds_between_retries,
119
125
  byte_deserializer=byte_deserializer,
120
126
  header_deserializer=KafkaHeaderSerializer(),
121
127
  on_fail_strategy=self.__on_fail_strategy,
122
- subscriber=subscriber,
123
- consume_middlewares=self.__consume_middlewares,
124
128
  consume_retrier=self.__consume_retrier,
125
129
  reject_callback=self.__reject_callback,
126
130
  cdc_payload_deserializer=cdc_payload_deserializer,
@@ -64,6 +64,7 @@ class BuzAIOKafkaMultiThreadedConsumer(BaseBuzAIOKafkaAsyncConsumer):
64
64
  heartbeat_interval_ms: Optional[int] = None,
65
65
  health_check_port: Optional[int] = None,
66
66
  wait_for_connection_to_cluster_ms: Optional[int] = None,
67
+ milliseconds_between_retries: int = 5000,
67
68
  ):
68
69
  super().__init__(
69
70
  connection_config=connection_config,
@@ -86,12 +87,14 @@ class BuzAIOKafkaMultiThreadedConsumer(BaseBuzAIOKafkaAsyncConsumer):
86
87
  heartbeat_interval_ms=heartbeat_interval_ms,
87
88
  wait_for_connection_to_cluster_ms=wait_for_connection_to_cluster_ms,
88
89
  worker_instance_id=worker_instance_id,
90
+ milliseconds_between_retries=milliseconds_between_retries,
89
91
  )
90
92
  self.__on_fail_strategy = on_fail_strategy
91
93
  self.__consume_middlewares = consume_middlewares
92
94
  self.__consume_retrier = consume_retrier
93
95
  self.__reject_callback = reject_callback
94
96
  self._deserializers_per_subscriber = deserializers_per_subscriber
97
+ self.__milliseconds_between_retries = milliseconds_between_retries
95
98
 
96
99
  async def _create_kafka_consumer_executor(
97
100
  self,
@@ -116,12 +119,13 @@ class BuzAIOKafkaMultiThreadedConsumer(BaseBuzAIOKafkaAsyncConsumer):
116
119
  )
117
120
 
118
121
  return KafkaEventSyncSubscriberExecutor(
122
+ subscriber=subscriber,
119
123
  logger=self._logger,
124
+ consume_middlewares=self.__consume_middlewares,
125
+ milliseconds_between_retries=self.__milliseconds_between_retries,
120
126
  byte_deserializer=byte_deserializer,
121
127
  header_deserializer=KafkaHeaderSerializer(),
122
128
  on_fail_strategy=self.__on_fail_strategy,
123
- subscriber=subscriber,
124
- consume_middlewares=self.__consume_middlewares,
125
129
  consume_retrier=self.__consume_retrier,
126
130
  reject_callback=self.__reject_callback,
127
131
  cdc_payload_deserializer=cdc_payload_deserializer,
@@ -37,7 +37,7 @@ class KafkaEventAsyncSubscriberExecutor(KafkaEventSubscriberExecutor):
37
37
  subscriber: AsyncSubscriber,
38
38
  logger: Logger,
39
39
  consume_middlewares: Optional[Sequence[AsyncConsumeMiddleware]] = None,
40
- seconds_between_retries: float = 5,
40
+ milliseconds_between_retries: int = 5000,
41
41
  byte_deserializer: ByteDeserializer[Event],
42
42
  header_deserializer: KafkaHeaderSerializer,
43
43
  on_fail_strategy: KafkaOnFailStrategy,
@@ -48,7 +48,8 @@ class KafkaEventAsyncSubscriberExecutor(KafkaEventSubscriberExecutor):
48
48
  self.__subscriber = subscriber
49
49
  self.__logger = logger
50
50
  self.__consume_middleware_chain_resolver = AsyncConsumeMiddlewareChainResolver(consume_middlewares or [])
51
- self.__seconds_between_retires = seconds_between_retries
51
+ self.__milliseconds_between_retries = milliseconds_between_retries
52
+ self.__seconds_between_retries = milliseconds_between_retries / 1000.0
52
53
  self.__on_fail_strategy = on_fail_strategy
53
54
  self.__consume_retrier = consume_retrier
54
55
  self.__reject_callback = reject_callback
@@ -158,7 +159,7 @@ class KafkaEventAsyncSubscriberExecutor(KafkaEventSubscriberExecutor):
158
159
  ),
159
160
  exc_info=exception,
160
161
  )
161
- await sleep(self.__seconds_between_retires)
162
+ await sleep(self.__seconds_between_retries)
162
163
  continue
163
164
 
164
165
  self.__logger.exception(exception)
@@ -38,7 +38,7 @@ class KafkaEventSyncSubscriberExecutor(KafkaEventSubscriberExecutor):
38
38
  subscriber: Subscriber,
39
39
  logger: Logger,
40
40
  consume_middlewares: Optional[Sequence[ConsumeMiddleware]] = None,
41
- seconds_between_retries: float = 5,
41
+ milliseconds_between_retries: int = 5000,
42
42
  byte_deserializer: ByteDeserializer[Event],
43
43
  header_deserializer: KafkaHeaderSerializer,
44
44
  on_fail_strategy: KafkaOnFailStrategy,
@@ -49,7 +49,8 @@ class KafkaEventSyncSubscriberExecutor(KafkaEventSubscriberExecutor):
49
49
  self.__subscriber = subscriber
50
50
  self.__logger = logger
51
51
  self.__consume_middleware_chain_resolver = ConsumeMiddlewareChainResolver(consume_middlewares or [])
52
- self.__seconds_between_retires = seconds_between_retries
52
+ self.__milliseconds_between_retries = milliseconds_between_retries
53
+ self.__seconds_between_retries = milliseconds_between_retries / 1000.0
53
54
  self.__on_fail_strategy = on_fail_strategy
54
55
  self.__consume_retrier = consume_retrier
55
56
  self.__reject_callback = reject_callback
@@ -161,7 +162,7 @@ class KafkaEventSyncSubscriberExecutor(KafkaEventSubscriberExecutor):
161
162
  ),
162
163
  exc_info=exception,
163
164
  )
164
- time.sleep(self.__seconds_between_retires)
165
+ time.sleep(self.__seconds_between_retries)
165
166
  continue
166
167
 
167
168
  self.__logger.exception(exception)
@@ -0,0 +1,6 @@
1
+ from __future__ import annotations
2
+
3
+
4
+ class ConsumerGroupNotFoundException(Exception):
5
+ def __init__(self, consumer_group: str) -> None:
6
+ super().__init__(f'The consumer group "{consumer_group}" has not been found')
@@ -65,3 +65,11 @@ class KafkaAdminClient(ConnectionManager, ABC):
65
65
  new_number_of_partitions: int,
66
66
  ) -> None:
67
67
  pass
68
+
69
+ @abstractmethod
70
+ def get_consumer_group_offsets(self, *, consumer_group: str, topic: str) -> dict[int, int]:
71
+ """
72
+ Get the committed offsets for a consumer group on a specific topic.
73
+ Returns a dictionary mapping partition numbers to their committed offsets.
74
+ """
75
+ pass
@@ -12,6 +12,7 @@ from kafka.admin.new_partitions import NewPartitions
12
12
  from kafka.errors import TopicAlreadyExistsError
13
13
  from kafka.structs import TopicPartition, OffsetAndTimestamp
14
14
 
15
+ from buz.kafka.domain.exceptions.consumer_group_not_found_exception import ConsumerGroupNotFoundException
15
16
  from buz.kafka.domain.exceptions.not_all_partition_assigned_exception import NotAllPartitionAssignedException
16
17
  from buz.kafka.domain.exceptions.not_valid_partition_number_exception import NotValidPartitionNumberException
17
18
  from buz.kafka.domain.exceptions.topic_already_created_exception import KafkaTopicsAlreadyCreatedException
@@ -400,3 +401,28 @@ class KafkaPythonAdminClient(KafkaAdminClient):
400
401
  related_consumer_groups.add(consumer_group)
401
402
 
402
403
  return related_consumer_groups
404
+
405
+ def get_consumer_group_offsets(self, *, consumer_group: str, topic: str) -> dict[int, int]:
406
+ self._logger.info(f'Getting consumer group offsets for group "{consumer_group}" on topic "{topic}"')
407
+
408
+ if not self.is_topic_created(topic):
409
+ raise TopicNotFoundException(topic_name=topic)
410
+
411
+ cluster_consumer_groups = self.get_cluster_consumer_groups()
412
+ if consumer_group not in cluster_consumer_groups:
413
+ raise ConsumerGroupNotFoundException(consumer_group=consumer_group)
414
+
415
+ partitions = self.get_number_of_partitions(topic)
416
+ topic_partitions = [TopicPartition(topic=topic, partition=partition) for partition in range(partitions)]
417
+
418
+ offsets_response = self._get_kafka_admin().list_consumer_group_offsets(
419
+ consumer_group, partitions=topic_partitions
420
+ )
421
+
422
+ # Build the result dictionary, filtering out partitions with no committed offset (-1)
423
+ result: dict[int, int] = {}
424
+ for topic_partition, offset_and_metadata in offsets_response.items():
425
+ if offset_and_metadata.offset >= 0:
426
+ result[topic_partition.partition] = offset_and_metadata.offset
427
+
428
+ return result
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: buz
3
- Version: 2.18.0
3
+ Version: 2.20.0
4
4
  Summary: Buz is a set of light, simple and extensible implementations of event, command and query buses.
5
5
  License: MIT
6
6
  Author: Luis Pintado Lozano
@@ -47,9 +47,9 @@ buz/event/exceptions/worker_execution_exception.py,sha256=6mgztvXOCG_9VZ_Jptkk72
47
47
  buz/event/infrastructure/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
48
48
  buz/event/infrastructure/buz_kafka/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
49
49
  buz/event/infrastructure/buz_kafka/async_buz_kafka_event_bus.py,sha256=SyLblUVlwWOaNfZzK7vL6Ee4m-85vZVCH0rjOgqVAww,4913
50
- buz/event/infrastructure/buz_kafka/base_buz_aiokafka_async_consumer.py,sha256=guVuZUQjxR-1ECuHjJ7Z7DTtIs2E7BSmazuP73dxgcY,21013
51
- buz/event/infrastructure/buz_kafka/buz_aiokafka_async_consumer.py,sha256=qDICqM8vkNSRgNzi1SZ8QnBMH1u41aWkomXzKUAUjSw,7029
52
- buz/event/infrastructure/buz_kafka/buz_aiokafka_multi_threaded_consumer.py,sha256=D059qyCJ7QBvCIXK8BaS8xCtjHjX_ityHadccttVXNk,6972
50
+ buz/event/infrastructure/buz_kafka/base_buz_aiokafka_async_consumer.py,sha256=7ZhaKaFXBpD3HVkuQMpAJvY8lfy7__1wxftLIwCmnMQ,21284
51
+ buz/event/infrastructure/buz_kafka/buz_aiokafka_async_consumer.py,sha256=GmmuAZboDkrpNOLF8cE_F0t4I7ZnMiGsiGw4SYIvKGc,7303
52
+ buz/event/infrastructure/buz_kafka/buz_aiokafka_multi_threaded_consumer.py,sha256=ZRLRoBRomqrXAiePSMn4gePF59AWPn6VQpQui1UVnyM,7246
53
53
  buz/event/infrastructure/buz_kafka/buz_kafka_event_bus.py,sha256=ymRSvcYVgbVCPgHN6rMBVBHQ5heCSwCDl6EffyqGVX8,4601
54
54
  buz/event/infrastructure/buz_kafka/consume_strategy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
55
55
  buz/event/infrastructure/buz_kafka/consume_strategy/consume_strategy.py,sha256=RqlXe5W2S6rH3FTr--tcxzFJTAVLb-Dhl7m6qjgNz2M,331
@@ -59,9 +59,9 @@ buz/event/infrastructure/buz_kafka/exceptions/__init__.py,sha256=47DEQpj8HBSa-_T
59
59
  buz/event/infrastructure/buz_kafka/exceptions/kafka_event_bus_config_not_valid_exception.py,sha256=VUKZXA2ygjg21P4DADFl_Tace6RwSXia1MRYvJypxbM,135
60
60
  buz/event/infrastructure/buz_kafka/exceptions/max_consumer_retry_exception.py,sha256=5O33uUC8FLILY1C13tQwkfsLSXrmbe0vMaUfBmOuXdU,264
61
61
  buz/event/infrastructure/buz_kafka/exceptions/retry_exception.py,sha256=Fq9kvI3DpFsGD3x2icmQ1fYIsuKZAFqI3tCibAuEtSQ,441
62
- buz/event/infrastructure/buz_kafka/kafka_event_async_subscriber_executor.py,sha256=jHCj9qVraoRFOc8cnrsavjQ_bbdYOHFsQXKA7ccitPs,8738
62
+ buz/event/infrastructure/buz_kafka/kafka_event_async_subscriber_executor.py,sha256=JeKEoudfYNdUzuf8YOXHq-DY6wWHbUglE8W9aoETNp4,8833
63
63
  buz/event/infrastructure/buz_kafka/kafka_event_subscriber_executor.py,sha256=EyG2vsFYErWAyqxdXqSwxx5Zi_y0d6i0h05XavJMnxg,254
64
- buz/event/infrastructure/buz_kafka/kafka_event_sync_subscriber_executor.py,sha256=bFHd-xsGaPsy0W80byJNcbf6Tj_OTdsBk7DN8fZklz8,8843
64
+ buz/event/infrastructure/buz_kafka/kafka_event_sync_subscriber_executor.py,sha256=UEiQabnIuFSyds_g7JPQHzWIWa_Jha7fEtlMnKaKLEY,8938
65
65
  buz/event/infrastructure/buz_kafka/models/cdc_process_context.py,sha256=4843xU1x42XdF8iwocawrBuz2uVt9bOEeDTG5ghxmik,246
66
66
  buz/event/infrastructure/buz_kafka/models/kafka_delivery_context.py,sha256=Kvi1Pq9EvR_UQ6e-DbvB2l3m7rTvq2k4UmEZuHUg-qU,259
67
67
  buz/event/infrastructure/buz_kafka/publish_strategy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -148,6 +148,7 @@ buz/event/worker.py,sha256=BL9TXB_kyr0Avql9fIcFm3CDNnXPvZB6O6BxVwjtCdA,942
148
148
  buz/handler.py,sha256=W6jSTo5BNV9u9QKBaEMhLIa3tgQocd6oYEJf5K4EfEU,358
149
149
  buz/kafka/__init__.py,sha256=R3fcyET-SNEAvk_XlBQbHIbQVb63Qiz6lVrif3nDhNU,3435
150
150
  buz/kafka/domain/exceptions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
151
+ buz/kafka/domain/exceptions/consumer_group_not_found_exception.py,sha256=8My8lM7DZ7JARfHY1MmuW5BWOB1YadiKhnc3cSTPlaI,225
151
152
  buz/kafka/domain/exceptions/not_all_partition_assigned_exception.py,sha256=1Ky6gDh_baD6cGB0MBnjbkkLcw2zQU_kFXPpDZn56z0,400
152
153
  buz/kafka/domain/exceptions/not_valid_kafka_message_exception.py,sha256=Dn6I_-eGQnOuu5WW24oKGOdKOu4EdM8ByH3DLAbz5SY,57
153
154
  buz/kafka/domain/exceptions/not_valid_partition_number_exception.py,sha256=YZyGbblHk6ON9sBtjRQTDa-nC88i4oe14_VSO8vSTm0,337
@@ -168,7 +169,7 @@ buz/kafka/domain/models/kafka_supported_sasl_mechanisms.py,sha256=ASyDaFgseQRcUJ
168
169
  buz/kafka/domain/models/kafka_supported_security_protocols.py,sha256=ffY2-9sOj4XIkJTSQVkqeOb4KnuqEYXISDarfDN8r9Q,161
169
170
  buz/kafka/domain/services/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
170
171
  buz/kafka/domain/services/async_kafka_producer.py,sha256=gSq3WwEVux_gp3EKDAMN1WsM027uklB58E-WnKpyhPs,533
171
- buz/kafka/domain/services/kafka_admin_client.py,sha256=Kh_w-qWEY8rsrlYjnJT1FrJLVZrO3l2LeRAWFyc_nOg,1558
172
+ buz/kafka/domain/services/kafka_admin_client.py,sha256=gjLs7BZwRjBd6qEoL2bNUdpHGPkh51dw1-kmC6I2YHw,1871
172
173
  buz/kafka/domain/services/kafka_admin_test_client.py,sha256=91l_vFIo1yhJLQQCC_OmeXZ5F429zP7Hx5g4FNllpfE,1625
173
174
  buz/kafka/domain/services/kafka_producer.py,sha256=8bLTV328orrPHcARzkc6no4vyJzrArVtCsjmSRXDjos,506
174
175
  buz/kafka/infrastructure/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -198,7 +199,7 @@ buz/kafka/infrastructure/interfaces/async_connection_manager.py,sha256=JbaLu5UVV
198
199
  buz/kafka/infrastructure/interfaces/connection_manager.py,sha256=EWnvShJHOg8QYe6a3ma0urjKjmVMDBi7q8T2cv_i_MQ,200
199
200
  buz/kafka/infrastructure/kafka_python/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
200
201
  buz/kafka/infrastructure/kafka_python/exception/consumer_interrupted_exception.py,sha256=fqhgV7HILdVdv-p1CsOIaaESKY2ZXBtRGYbrVSdPLg0,164
201
- buz/kafka/infrastructure/kafka_python/kafka_python_admin_client.py,sha256=_zTOY3ihiXESUlDj0SECEvixt9MMny0xBGzFPix0ZYM,16241
202
+ buz/kafka/infrastructure/kafka_python/kafka_python_admin_client.py,sha256=FRO_zmGdOkiu5wFYmO7v-hLASxR0XT45uDqjHgmg_F4,17544
202
203
  buz/kafka/infrastructure/kafka_python/kafka_python_admin_test_client.py,sha256=5xP23dQ7FDuy7dIWNw39C3bMVmaUj9ZQhEEJISRv9ec,2986
203
204
  buz/kafka/infrastructure/kafka_python/kafka_python_producer.py,sha256=DkqqLSSXHBf4SXXf-IZwwLhxWrGE95Jg4MO_3RDsikU,3594
204
205
  buz/kafka/infrastructure/kafka_python/translators/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -262,7 +263,7 @@ buz/serializer/message_to_json_bytes_serializer.py,sha256=RGZJ64t4t4Pz2FCASZZCv-
262
263
  buz/wrapper/__init__.py,sha256=GnRdJFcncn-qp0hzDG9dBHLmTJSbHFVjE_yr-MdW_n4,77
263
264
  buz/wrapper/async_to_sync.py,sha256=OfK-vrVUhuN-LLLvekLdMbQYtH0ue5lfbvuasj6ovMI,698
264
265
  buz/wrapper/event_loop.py,sha256=pfBJ1g-8A2a3YgW8Gf9Fg0kkewoh3-wgTy2KIFDyfHk,266
265
- buz-2.18.0.dist-info/LICENSE,sha256=jcLgcIIVaBqaZNwe0kzGWSU99YgwMcI0IGv142wkYSM,1062
266
- buz-2.18.0.dist-info/METADATA,sha256=L0TUDYrh4I11KrZf6vgj88eHmWm62AP3t_7yAr812Hc,12580
267
- buz-2.18.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
268
- buz-2.18.0.dist-info/RECORD,,
266
+ buz-2.20.0.dist-info/LICENSE,sha256=jcLgcIIVaBqaZNwe0kzGWSU99YgwMcI0IGv142wkYSM,1062
267
+ buz-2.20.0.dist-info/METADATA,sha256=G6e8B1zuAsgMDYnD6b4yJY3oS5bgRBzWdTq-Qrk-KPU,12580
268
+ buz-2.20.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
269
+ buz-2.20.0.dist-info/RECORD,,
File without changes
File without changes