buz 2.14.0__py3-none-any.whl → 2.14.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- buz/kafka/domain/exceptions/not_valid_partition_number_exception.py +10 -0
- buz/kafka/domain/services/kafka_admin_client.py +13 -0
- buz/kafka/infrastructure/kafka_python/kafka_python_admin_client.py +196 -34
- buz/kafka/infrastructure/kafka_python/kafka_python_admin_test_client.py +1 -1
- {buz-2.14.0.dist-info → buz-2.14.1.dist-info}/METADATA +1 -1
- {buz-2.14.0.dist-info → buz-2.14.1.dist-info}/RECORD +8 -7
- {buz-2.14.0.dist-info → buz-2.14.1.dist-info}/LICENSE +0 -0
- {buz-2.14.0.dist-info → buz-2.14.1.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
class NotValidPartitionNumberException(Exception):
|
|
2
|
+
def __init__(
|
|
3
|
+
self,
|
|
4
|
+
*,
|
|
5
|
+
partition_number: int,
|
|
6
|
+
min_partition_number: int,
|
|
7
|
+
) -> None:
|
|
8
|
+
super().__init__(
|
|
9
|
+
f'"{partition_number}" is not a valid partition number, the minimum partition number is "{min_partition_number}"'
|
|
10
|
+
)
|
|
@@ -40,6 +40,10 @@ class KafkaAdminClient(ConnectionManager, ABC):
|
|
|
40
40
|
) -> set[str]:
|
|
41
41
|
pass
|
|
42
42
|
|
|
43
|
+
@abstractmethod
|
|
44
|
+
def get_number_of_partitions(self, topic: str) -> int:
|
|
45
|
+
pass
|
|
46
|
+
|
|
43
47
|
# This function moves the following offset from the provided date
|
|
44
48
|
# if there are no messages with a date greater than the provided offset
|
|
45
49
|
# the offset will be moved to the end
|
|
@@ -52,3 +56,12 @@ class KafkaAdminClient(ConnectionManager, ABC):
|
|
|
52
56
|
target_datetime: datetime,
|
|
53
57
|
) -> None:
|
|
54
58
|
pass
|
|
59
|
+
|
|
60
|
+
@abstractmethod
|
|
61
|
+
def increase_topic_partitions_and_set_offset_of_related_consumer_groups_to_the_beginning_of_the_new_ones(
|
|
62
|
+
self,
|
|
63
|
+
*,
|
|
64
|
+
topic: str,
|
|
65
|
+
new_number_of_partitions: int,
|
|
66
|
+
) -> None:
|
|
67
|
+
pass
|
|
@@ -8,10 +8,12 @@ from typing import Any, Callable, Optional, Sequence, cast
|
|
|
8
8
|
from cachetools import TTLCache
|
|
9
9
|
from kafka import KafkaClient, KafkaConsumer
|
|
10
10
|
from kafka.admin import KafkaAdminClient as KafkaPythonLibraryAdminClient, NewTopic
|
|
11
|
+
from kafka.admin.new_partitions import NewPartitions
|
|
11
12
|
from kafka.errors import TopicAlreadyExistsError
|
|
12
13
|
from kafka.structs import TopicPartition, OffsetAndTimestamp
|
|
13
14
|
|
|
14
15
|
from buz.kafka.domain.exceptions.not_all_partition_assigned_exception import NotAllPartitionAssignedException
|
|
16
|
+
from buz.kafka.domain.exceptions.not_valid_partition_number_exception import NotValidPartitionNumberException
|
|
15
17
|
from buz.kafka.domain.exceptions.topic_already_created_exception import KafkaTopicsAlreadyCreatedException
|
|
16
18
|
from buz.kafka.domain.exceptions.topic_not_found_exception import TopicNotFoundException
|
|
17
19
|
from buz.kafka.domain.models.consumer_initial_offset_position import ConsumerInitialOffsetPosition
|
|
@@ -150,10 +152,10 @@ class KafkaPythonAdminClient(KafkaAdminClient):
|
|
|
150
152
|
group_ids=subscription_groups,
|
|
151
153
|
)
|
|
152
154
|
|
|
153
|
-
def
|
|
155
|
+
def get_cluster_consumer_groups(
|
|
154
156
|
self,
|
|
155
157
|
) -> set[str]:
|
|
156
|
-
return set(self._get_kafka_admin().list_consumer_groups())
|
|
158
|
+
return set([consumer_group_tuple[0] for consumer_group_tuple in self._get_kafka_admin().list_consumer_groups()])
|
|
157
159
|
|
|
158
160
|
def _wait_for_cluster_update(self) -> None:
|
|
159
161
|
future = self._get_kafka_client().cluster.request_update()
|
|
@@ -166,6 +168,46 @@ class KafkaPythonAdminClient(KafkaAdminClient):
|
|
|
166
168
|
topic: str,
|
|
167
169
|
target_datetime: datetime,
|
|
168
170
|
) -> None:
|
|
171
|
+
(consumer, topic_partitions) = self.__get_consumer_with_all_partitions_assigned(
|
|
172
|
+
consumer_group=consumer_group,
|
|
173
|
+
topic=topic,
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
offsets_for_date = self.__get_first_offset_after_date(
|
|
177
|
+
consumer=consumer,
|
|
178
|
+
topic_partitions=topic_partitions,
|
|
179
|
+
target_datetime=target_datetime,
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
try:
|
|
183
|
+
end_offsets = consumer.end_offsets(topic_partitions)
|
|
184
|
+
|
|
185
|
+
if end_offsets is None or len(end_offsets.keys()) != len(topic_partitions):
|
|
186
|
+
raise Exception(f'There was an error extracting the end offsets of the topic "{topic}"')
|
|
187
|
+
|
|
188
|
+
for topic_partition in topic_partitions:
|
|
189
|
+
offset_and_timestamp = offsets_for_date.get(topic_partition)
|
|
190
|
+
if offset_and_timestamp:
|
|
191
|
+
self._logger.info(f'moving "{topic_partition}" to the offset "{offset_and_timestamp.offset}"')
|
|
192
|
+
consumer.seek(topic_partition, offset_and_timestamp.offset)
|
|
193
|
+
else:
|
|
194
|
+
self._logger.info(
|
|
195
|
+
f'moving "{topic_partition}" to the end of the topic because there are no messages later than "{target_datetime}"'
|
|
196
|
+
)
|
|
197
|
+
consumer.seek(topic_partition, end_offsets[topic_partition])
|
|
198
|
+
|
|
199
|
+
consumer.commit()
|
|
200
|
+
except Exception as exception:
|
|
201
|
+
consumer.close()
|
|
202
|
+
raise exception
|
|
203
|
+
|
|
204
|
+
consumer.close()
|
|
205
|
+
|
|
206
|
+
def __get_consumer_with_all_partitions_assigned(
|
|
207
|
+
self,
|
|
208
|
+
consumer_group: str,
|
|
209
|
+
topic: str,
|
|
210
|
+
) -> tuple[KafkaConsumer, Sequence[TopicPartition]]:
|
|
169
211
|
consumer = KafkaConsumer(
|
|
170
212
|
group_id=consumer_group,
|
|
171
213
|
enable_auto_commit=False,
|
|
@@ -175,44 +217,29 @@ class KafkaPythonAdminClient(KafkaAdminClient):
|
|
|
175
217
|
**self._config_in_library_format,
|
|
176
218
|
)
|
|
177
219
|
|
|
178
|
-
|
|
220
|
+
try:
|
|
221
|
+
partitions = self.get_number_of_partitions(topic)
|
|
179
222
|
|
|
180
|
-
|
|
181
|
-
raise TopicNotFoundException(topic)
|
|
223
|
+
topic_partitions = [TopicPartition(topic=topic, partition=partition) for partition in range(partitions)]
|
|
182
224
|
|
|
183
|
-
|
|
184
|
-
consumer.subscribe(topics=[topic])
|
|
225
|
+
consumer.subscribe(topic)
|
|
185
226
|
|
|
186
|
-
|
|
227
|
+
self.__force_partition_assignment(consumer)
|
|
187
228
|
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
229
|
+
# We need all the partitions in order to update the offsets
|
|
230
|
+
if len(consumer.assignment()) != len(topic_partitions):
|
|
231
|
+
raise NotAllPartitionAssignedException(topic)
|
|
191
232
|
|
|
192
|
-
|
|
193
|
-
consumer
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
end_offsets = consumer.end_offsets(topic_partitions)
|
|
199
|
-
|
|
200
|
-
if end_offsets is None or len(end_offsets.keys()) != len(topic_partitions):
|
|
201
|
-
raise Exception(f'There was an error extracting the end offsets of the topic "{topic}"')
|
|
202
|
-
|
|
203
|
-
for topic_partition in topic_partitions:
|
|
204
|
-
offset_and_timestamp = offsets_for_date.get(topic_partition)
|
|
205
|
-
if offset_and_timestamp:
|
|
206
|
-
self._logger.info(f'moving "{topic_partition}" to the offset "{offset_and_timestamp.offset}"')
|
|
207
|
-
consumer.seek(topic_partition, offset_and_timestamp.offset)
|
|
208
|
-
else:
|
|
209
|
-
self._logger.info(
|
|
210
|
-
f'moving "{topic_partition}" to the end of the topic because there are no messages later than "{target_datetime}"'
|
|
211
|
-
)
|
|
212
|
-
consumer.seek(topic_partition, end_offsets[topic_partition])
|
|
233
|
+
# This could produce a race condition, but it is a limitation of kafka admin (we are not able to check if all the partition are assigned using the manual assignment)
|
|
234
|
+
# https://github.com/dpkp/kafka-python/blob/master/kafka/consumer/group.py#L430
|
|
235
|
+
consumer.unsubscribe()
|
|
236
|
+
consumer.assign(topic_partitions)
|
|
237
|
+
self.__force_partition_assignment(consumer)
|
|
213
238
|
|
|
214
|
-
|
|
215
|
-
|
|
239
|
+
return (consumer, topic_partitions)
|
|
240
|
+
except Exception as exception:
|
|
241
|
+
consumer.close()
|
|
242
|
+
raise exception
|
|
216
243
|
|
|
217
244
|
def __get_first_offset_after_date(
|
|
218
245
|
self,
|
|
@@ -235,3 +262,138 @@ class KafkaPythonAdminClient(KafkaAdminClient):
|
|
|
235
262
|
# We are not to commit the new offset, but we need to execute a polling in order to start the partition assignment
|
|
236
263
|
def __force_partition_assignment(self, consumer: KafkaConsumer) -> None:
|
|
237
264
|
consumer.poll(max_records=1, timeout_ms=0)
|
|
265
|
+
|
|
266
|
+
def increase_topic_partitions_and_set_offset_of_related_consumer_groups_to_the_beginning_of_the_new_ones(
|
|
267
|
+
self,
|
|
268
|
+
*,
|
|
269
|
+
topic: str,
|
|
270
|
+
new_number_of_partitions: int,
|
|
271
|
+
) -> None:
|
|
272
|
+
self._logger.info(
|
|
273
|
+
f'Increasing topic "{topic}" partitions: Verifying the new number of partitions "{new_number_of_partitions}"'
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
previous_partitions_number = self.get_number_of_partitions(topic)
|
|
277
|
+
topic_partitions = [
|
|
278
|
+
TopicPartition(topic=topic, partition=partition) for partition in range(previous_partitions_number)
|
|
279
|
+
]
|
|
280
|
+
|
|
281
|
+
if previous_partitions_number >= new_number_of_partitions:
|
|
282
|
+
raise NotValidPartitionNumberException(
|
|
283
|
+
partition_number=new_number_of_partitions,
|
|
284
|
+
min_partition_number=len(topic_partitions),
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
self._logger.info(f'Increasing topic "{topic}" partitions: Extracting related consumer groups')
|
|
288
|
+
related_consumer_groups = self.__get_consumer_groups_related_to_a_topic(topic_partitions)
|
|
289
|
+
|
|
290
|
+
self._logger.info(
|
|
291
|
+
f'Increasing topic "{topic}" partitions: The following consumer groups will be updated:"{related_consumer_groups}"'
|
|
292
|
+
)
|
|
293
|
+
|
|
294
|
+
consumers_to_update: list[KafkaConsumer] = []
|
|
295
|
+
new_partitions_consumer: Optional[KafkaConsumer] = None
|
|
296
|
+
|
|
297
|
+
try:
|
|
298
|
+
for consumer_group in related_consumer_groups:
|
|
299
|
+
self._logger.info(
|
|
300
|
+
f'Increasing topic "{topic}" partitions: Requesting the assignment of the partitions of the group "{consumer_group}"'
|
|
301
|
+
)
|
|
302
|
+
(consumer_with_all_partitions, _) = self.__get_consumer_with_all_partitions_assigned(
|
|
303
|
+
consumer_group=consumer_group,
|
|
304
|
+
topic=topic,
|
|
305
|
+
)
|
|
306
|
+
consumers_to_update.append(consumer_with_all_partitions)
|
|
307
|
+
|
|
308
|
+
self._logger.info(
|
|
309
|
+
f'Increasing topic "{topic}" partitions: Incrementing the partition to "{new_number_of_partitions}"'
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
self._get_kafka_admin().create_partitions(
|
|
313
|
+
{
|
|
314
|
+
topic: NewPartitions(total_count=new_number_of_partitions),
|
|
315
|
+
}
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
new_partitions = [
|
|
319
|
+
TopicPartition(
|
|
320
|
+
topic=topic,
|
|
321
|
+
partition=partition_index,
|
|
322
|
+
)
|
|
323
|
+
for partition_index in range(previous_partitions_number, new_number_of_partitions)
|
|
324
|
+
]
|
|
325
|
+
|
|
326
|
+
for consumer_group in related_consumer_groups:
|
|
327
|
+
self._logger.info(
|
|
328
|
+
f'Increasing topic "{topic}" partitions: Moving the offset of the consumer group "{consumer_group}" to the beginning of the new partitions'
|
|
329
|
+
)
|
|
330
|
+
# We need to create a new consumer because kafka-python has a limitation that does not allow to assign specific partitions to a consumer subscribed to an entire topic
|
|
331
|
+
new_partitions_consumer = KafkaConsumer(
|
|
332
|
+
group_id=consumer_group,
|
|
333
|
+
enable_auto_commit=False,
|
|
334
|
+
auto_offset_reset=KafkaPythonConsumerInitialOffsetPositionTranslator.to_kafka_supported_format(
|
|
335
|
+
ConsumerInitialOffsetPosition.BEGINNING
|
|
336
|
+
),
|
|
337
|
+
**self._config_in_library_format,
|
|
338
|
+
)
|
|
339
|
+
new_partitions_consumer.assign(new_partitions)
|
|
340
|
+
for new_partition in new_partitions:
|
|
341
|
+
new_partitions_consumer.seek(new_partition, 0)
|
|
342
|
+
new_partitions_consumer.commit()
|
|
343
|
+
new_partitions_consumer.close()
|
|
344
|
+
|
|
345
|
+
self._logger.info(f'Increasing topic "{topic}" partitions: Process complete')
|
|
346
|
+
|
|
347
|
+
except Exception as exception:
|
|
348
|
+
for consumer_with_all_partitions in consumers_to_update:
|
|
349
|
+
consumer_with_all_partitions.close()
|
|
350
|
+
|
|
351
|
+
if new_partitions_consumer is not None:
|
|
352
|
+
new_partitions_consumer.close()
|
|
353
|
+
|
|
354
|
+
self._logger.error(f'Increasing topic "{topic}" partitions: unexpected error {exception}')
|
|
355
|
+
raise exception
|
|
356
|
+
|
|
357
|
+
return
|
|
358
|
+
|
|
359
|
+
def get_number_of_partitions(self, topic: str) -> int:
|
|
360
|
+
consumer = KafkaConsumer(
|
|
361
|
+
enable_auto_commit=False,
|
|
362
|
+
auto_offset_reset=KafkaPythonConsumerInitialOffsetPositionTranslator.to_kafka_supported_format(
|
|
363
|
+
ConsumerInitialOffsetPosition.BEGINNING
|
|
364
|
+
),
|
|
365
|
+
**self._config_in_library_format,
|
|
366
|
+
)
|
|
367
|
+
|
|
368
|
+
try:
|
|
369
|
+
partitions = consumer.partitions_for_topic(topic)
|
|
370
|
+
if partitions is None:
|
|
371
|
+
raise TopicNotFoundException(topic_name=topic)
|
|
372
|
+
|
|
373
|
+
return len(partitions)
|
|
374
|
+
except Exception as exception:
|
|
375
|
+
consumer.close()
|
|
376
|
+
raise exception
|
|
377
|
+
|
|
378
|
+
# The purpose of this function is to get all the consumer groups that are consuming from the topic
|
|
379
|
+
# It is a heavy tasks because we need to get the offset of all the partitions of the topic
|
|
380
|
+
def __get_consumer_groups_related_to_a_topic(self, topic_partitions: Sequence[TopicPartition]) -> set[str]:
|
|
381
|
+
cluster_consumer_groups = self.get_cluster_consumer_groups()
|
|
382
|
+
|
|
383
|
+
related_consumer_groups: set[str] = set()
|
|
384
|
+
|
|
385
|
+
for consumer_group in cluster_consumer_groups:
|
|
386
|
+
partitions_offsets = list(
|
|
387
|
+
self._get_kafka_admin()
|
|
388
|
+
.list_consumer_group_offsets(consumer_group, partitions=topic_partitions)
|
|
389
|
+
.values()
|
|
390
|
+
)
|
|
391
|
+
|
|
392
|
+
partitions_with_valid_offsets = [partition for partition in partitions_offsets if partition.offset != -1]
|
|
393
|
+
|
|
394
|
+
if len(partitions_with_valid_offsets) == 0:
|
|
395
|
+
continue
|
|
396
|
+
|
|
397
|
+
related_consumer_groups.add(consumer_group)
|
|
398
|
+
|
|
399
|
+
return related_consumer_groups
|
|
@@ -88,5 +88,5 @@ class KafkaPythonAdminTestClient(KafkaPythonAdminClient, KafkaAdminTestClient):
|
|
|
88
88
|
self,
|
|
89
89
|
) -> None:
|
|
90
90
|
self.delete_topics(topics=self.get_topics())
|
|
91
|
-
self.delete_subscription_groups(subscription_groups=self.
|
|
91
|
+
self.delete_subscription_groups(subscription_groups=self.get_cluster_consumer_groups())
|
|
92
92
|
self._wait_for_cluster_update()
|
|
@@ -138,6 +138,7 @@ buz/kafka/__init__.py,sha256=R3fcyET-SNEAvk_XlBQbHIbQVb63Qiz6lVrif3nDhNU,3435
|
|
|
138
138
|
buz/kafka/domain/exceptions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
139
139
|
buz/kafka/domain/exceptions/not_all_partition_assigned_exception.py,sha256=9zDWoh0SbHLRuCvpfIGcvrmcscKsXpbAPIxr5-z-GYg,296
|
|
140
140
|
buz/kafka/domain/exceptions/not_valid_kafka_message_exception.py,sha256=Dn6I_-eGQnOuu5WW24oKGOdKOu4EdM8ByH3DLAbz5SY,57
|
|
141
|
+
buz/kafka/domain/exceptions/not_valid_partition_number_exception.py,sha256=YZyGbblHk6ON9sBtjRQTDa-nC88i4oe14_VSO8vSTm0,337
|
|
141
142
|
buz/kafka/domain/exceptions/topic_already_created_exception.py,sha256=UrisdveZGa2BB0ko4mS7-5fwy8eGsIu409_grtq1r9k,333
|
|
142
143
|
buz/kafka/domain/exceptions/topic_not_found_exception.py,sha256=kLuqGqfsb6YTCe5UCKpMwBm_QAnU9Udfb8bWajPoA8k,201
|
|
143
144
|
buz/kafka/domain/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -155,7 +156,7 @@ buz/kafka/domain/models/kafka_supported_sasl_mechanisms.py,sha256=ASyDaFgseQRcUJ
|
|
|
155
156
|
buz/kafka/domain/models/kafka_supported_security_protocols.py,sha256=ffY2-9sOj4XIkJTSQVkqeOb4KnuqEYXISDarfDN8r9Q,161
|
|
156
157
|
buz/kafka/domain/services/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
157
158
|
buz/kafka/domain/services/async_kafka_producer.py,sha256=gSq3WwEVux_gp3EKDAMN1WsM027uklB58E-WnKpyhPs,533
|
|
158
|
-
buz/kafka/domain/services/kafka_admin_client.py,sha256=
|
|
159
|
+
buz/kafka/domain/services/kafka_admin_client.py,sha256=Kh_w-qWEY8rsrlYjnJT1FrJLVZrO3l2LeRAWFyc_nOg,1558
|
|
159
160
|
buz/kafka/domain/services/kafka_admin_test_client.py,sha256=91l_vFIo1yhJLQQCC_OmeXZ5F429zP7Hx5g4FNllpfE,1625
|
|
160
161
|
buz/kafka/domain/services/kafka_producer.py,sha256=8bLTV328orrPHcARzkc6no4vyJzrArVtCsjmSRXDjos,506
|
|
161
162
|
buz/kafka/infrastructure/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -180,8 +181,8 @@ buz/kafka/infrastructure/interfaces/async_connection_manager.py,sha256=JbaLu5UVV
|
|
|
180
181
|
buz/kafka/infrastructure/interfaces/connection_manager.py,sha256=EWnvShJHOg8QYe6a3ma0urjKjmVMDBi7q8T2cv_i_MQ,200
|
|
181
182
|
buz/kafka/infrastructure/kafka_python/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
182
183
|
buz/kafka/infrastructure/kafka_python/exception/consumer_interrupted_exception.py,sha256=fqhgV7HILdVdv-p1CsOIaaESKY2ZXBtRGYbrVSdPLg0,164
|
|
183
|
-
buz/kafka/infrastructure/kafka_python/kafka_python_admin_client.py,sha256=
|
|
184
|
-
buz/kafka/infrastructure/kafka_python/kafka_python_admin_test_client.py,sha256=
|
|
184
|
+
buz/kafka/infrastructure/kafka_python/kafka_python_admin_client.py,sha256=OwyDCXXMA3-yhBRVW4iRCr9M9DJQ65QBtEKx032z1rs,16140
|
|
185
|
+
buz/kafka/infrastructure/kafka_python/kafka_python_admin_test_client.py,sha256=5xP23dQ7FDuy7dIWNw39C3bMVmaUj9ZQhEEJISRv9ec,2986
|
|
185
186
|
buz/kafka/infrastructure/kafka_python/kafka_python_producer.py,sha256=DkqqLSSXHBf4SXXf-IZwwLhxWrGE95Jg4MO_3RDsikU,3594
|
|
186
187
|
buz/kafka/infrastructure/kafka_python/translators/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
187
188
|
buz/kafka/infrastructure/kafka_python/translators/consumer_initial_offset_position_translator.py,sha256=hJ48_eyMcnbFL_Y5TOiMbGXrQSryuKk9CvP59MdqNOY,620
|
|
@@ -243,7 +244,7 @@ buz/serializer/message_to_json_bytes_serializer.py,sha256=RGZJ64t4t4Pz2FCASZZCv-
|
|
|
243
244
|
buz/wrapper/__init__.py,sha256=GnRdJFcncn-qp0hzDG9dBHLmTJSbHFVjE_yr-MdW_n4,77
|
|
244
245
|
buz/wrapper/async_to_sync.py,sha256=OfK-vrVUhuN-LLLvekLdMbQYtH0ue5lfbvuasj6ovMI,698
|
|
245
246
|
buz/wrapper/event_loop.py,sha256=pfBJ1g-8A2a3YgW8Gf9Fg0kkewoh3-wgTy2KIFDyfHk,266
|
|
246
|
-
buz-2.14.
|
|
247
|
-
buz-2.14.
|
|
248
|
-
buz-2.14.
|
|
249
|
-
buz-2.14.
|
|
247
|
+
buz-2.14.1.dist-info/LICENSE,sha256=Jytu2S-2SPEgsB0y6BF-_LUxIWY7402fl0JSh36TLZE,1062
|
|
248
|
+
buz-2.14.1.dist-info/METADATA,sha256=WL8Cp60N7_OJhpYuwyBLADYJ5cVohFC29oKmBRDri00,1617
|
|
249
|
+
buz-2.14.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
250
|
+
buz-2.14.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|