buz 2.14.0__tar.gz → 2.14.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {buz-2.14.0 → buz-2.14.1}/PKG-INFO +1 -1
- {buz-2.14.0 → buz-2.14.1}/pyproject.toml +1 -1
- buz-2.14.1/src/buz/kafka/domain/exceptions/not_valid_partition_number_exception.py +10 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/domain/services/kafka_admin_client.py +13 -0
- buz-2.14.1/src/buz/kafka/infrastructure/kafka_python/kafka_python_admin_client.py +399 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/infrastructure/kafka_python/kafka_python_admin_test_client.py +1 -1
- buz-2.14.0/src/buz/kafka/infrastructure/kafka_python/kafka_python_admin_client.py +0 -237
- {buz-2.14.0 → buz-2.14.1}/LICENSE +0 -0
- {buz-2.14.0 → buz-2.14.1}/README.md +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/command/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/command/asynchronous/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/command/asynchronous/base_command_handler.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/command/asynchronous/command_bus.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/command/asynchronous/command_handler.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/command/asynchronous/middleware/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/command/asynchronous/middleware/base_handle_middleware.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/command/asynchronous/middleware/handle_middleware.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/command/asynchronous/middleware/handle_middleware_chain_resolver.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/command/asynchronous/self_process/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/command/asynchronous/self_process/self_process_command_bus.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/command/command.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/command/more_than_one_command_handler_related_exception.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/command/synchronous/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/command/synchronous/base_command_handler.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/command/synchronous/command_bus.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/command/synchronous/command_handler.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/command/synchronous/middleware/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/command/synchronous/middleware/base_handle_middleware.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/command/synchronous/middleware/handle_middleware.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/command/synchronous/middleware/handle_middleware_chain_resolver.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/command/synchronous/self_process/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/command/synchronous/self_process/self_process_command_bus.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/command/synchronous/synced_async/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/command/synchronous/synced_async/synced_async_command_bus.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/async_consumer.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/async_event_bus.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/async_subscriber.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/async_worker.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/base_async_subscriber.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/base_subscriber.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/consumer.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/dead_letter_queue/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/dead_letter_queue/dlq_criteria.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/dead_letter_queue/dlq_record.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/dead_letter_queue/dlq_repository.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/event.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/event_bus.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/exceptions/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/exceptions/event_not_published_exception.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/exceptions/event_restore_exception.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/exceptions/subscribers_not_found_exception.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/exceptions/term_signal_interruption_exception.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/exceptions/worker_execution_exception.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/buz_kafka/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/buz_kafka/async_buz_kafka_event_bus.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/buz_kafka/base_buz_aiokafka_async_consumer.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/buz_kafka/buz_aiokafka_async_consumer.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/buz_kafka/buz_aiokafka_multi_threaded_consumer.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/buz_kafka/buz_kafka_event_bus.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/buz_kafka/consume_strategy/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/buz_kafka/consume_strategy/consume_strategy.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/buz_kafka/consume_strategy/kafka_on_fail_strategy.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/buz_kafka/consume_strategy/topic_and_subscription_group_per_subscriber_kafka_consumer_strategy.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/buz_kafka/exceptions/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/buz_kafka/exceptions/kafka_event_bus_config_not_valid_exception.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/buz_kafka/kafka_event_async_subscriber_executor.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/buz_kafka/kafka_event_subscriber_executor.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/buz_kafka/kafka_event_sync_subscriber_executor.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/buz_kafka/publish_strategy/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/buz_kafka/publish_strategy/publish_strategy.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/buz_kafka/publish_strategy/topic_per_event_kafka_publish_strategy.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/kombu/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/kombu/allowed_kombu_serializer.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/kombu/consume_strategy/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/kombu/consume_strategy/consume_strategy.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/kombu/consume_strategy/queue_per_subscriber_consume_strategy.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/kombu/kombu_consumer.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/kombu/kombu_event_bus.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/kombu/publish_strategy/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/kombu/publish_strategy/fanout_exchange_per_event_publish_strategy.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/kombu/publish_strategy/publish_strategy.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/kombu/retry_strategy/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/kombu/retry_strategy/publish_retry_policy.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/kombu/retry_strategy/simple_publish_retry_policy.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/models/consuming_task.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/infrastructure/queue/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/meta_base_subscriber.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/meta_subscriber.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/middleware/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/middleware/async_consume_middleware.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/middleware/async_consume_middleware_chain_resolver.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/middleware/async_publish_middleware.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/middleware/async_publish_middleware_chain_resolver.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/middleware/base_consume_middleware.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/middleware/base_publish_middleware.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/middleware/consume_middleware.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/middleware/consume_middleware_chain_resolver.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/middleware/exceptions/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/middleware/exceptions/event_already_in_progress_exception.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/middleware/publish_middleware.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/middleware/publish_middleware_chain_resolver.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/strategies/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/strategies/execution_strategy/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/strategies/execution_strategy/async_execution_strategy.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/strategies/execution_strategy/async_self_process_execution_strategy.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/strategies/execution_strategy/cyclic_iterator_execution_strategy.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/strategies/execution_strategy/execution_strategy.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/strategies/execution_strategy/self_process_execution_strategy.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/strategies/retry/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/strategies/retry/consume_retrier.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/strategies/retry/consumed_event_retry.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/strategies/retry/consumed_event_retry_repository.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/strategies/retry/max_retries_consume_retrier.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/strategies/retry/max_retries_negative_exception.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/strategies/retry/reject_callback.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/subscriber.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/sync/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/sync/sync_event_bus.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/transactional_outbox/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/transactional_outbox/event_to_outbox_record_translator.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/transactional_outbox/fqn_to_event_mapper.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/transactional_outbox/outbox_criteria/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/transactional_outbox/outbox_criteria/deliverable_records_outbox_criteria_factory.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/transactional_outbox/outbox_criteria/outbox_criteria.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/transactional_outbox/outbox_criteria/outbox_criteria_factory.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/transactional_outbox/outbox_criteria/outbox_sorting_criteria.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/transactional_outbox/outbox_record.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/transactional_outbox/outbox_record_finder/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/transactional_outbox/outbox_record_finder/outbox_record_stream_finder.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/transactional_outbox/outbox_record_finder/polling_outbox_record_stream_finder.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/transactional_outbox/outbox_record_to_event_translator.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/transactional_outbox/outbox_record_validation/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/transactional_outbox/outbox_record_validation/abstract_outbox_record_validator.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/transactional_outbox/outbox_record_validation/outbox_record_size_not_allowed_exception.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/transactional_outbox/outbox_record_validation/outbox_record_validation_exception.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/transactional_outbox/outbox_record_validation/outbox_record_validator.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/transactional_outbox/outbox_record_validation/size_outbox_record_validator.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/transactional_outbox/outbox_repository.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/transactional_outbox/transactional_outbox_event_bus.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/transactional_outbox/transactional_outbox_worker.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/event/worker.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/handler.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/domain/exceptions/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/domain/exceptions/not_all_partition_assigned_exception.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/domain/exceptions/not_valid_kafka_message_exception.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/domain/exceptions/topic_already_created_exception.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/domain/exceptions/topic_not_found_exception.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/domain/models/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/domain/models/auto_create_topic_configuration.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/domain/models/consumer_initial_offset_position.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/domain/models/create_kafka_topic.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/domain/models/kafka_connection_config.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/domain/models/kafka_connection_credentials.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/domain/models/kafka_connection_plain_text_credentials.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/domain/models/kafka_connection_sasl_credentials.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/domain/models/kafka_consumer_record.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/domain/models/kafka_poll_record.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/domain/models/kafka_supported_compression_type.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/domain/models/kafka_supported_sasl_mechanisms.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/domain/models/kafka_supported_security_protocols.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/domain/services/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/domain/services/async_kafka_producer.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/domain/services/kafka_admin_test_client.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/domain/services/kafka_producer.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/infrastructure/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/infrastructure/aiokafka/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/infrastructure/aiokafka/aiokafka_consumer.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/infrastructure/aiokafka/aiokafka_producer.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/infrastructure/aiokafka/rebalance/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/infrastructure/aiokafka/rebalance/kafka_callback_rebalancer.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/infrastructure/aiokafka/translators/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/infrastructure/aiokafka/translators/consumer_initial_offset_position_translator.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/infrastructure/cdc/cdc_message.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/infrastructure/deserializers/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/infrastructure/deserializers/byte_deserializer.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/infrastructure/deserializers/bytes_to_message_deserializer.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/infrastructure/deserializers/implementations/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/infrastructure/deserializers/implementations/cdc/cdc_record_bytes_to_event_deserializer.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/infrastructure/deserializers/implementations/cdc/not_valid_cdc_message_exception.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/infrastructure/deserializers/implementations/json_byte_deserializer.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/infrastructure/deserializers/implementations/json_bytes_to_message_deserializer.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/infrastructure/interfaces/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/infrastructure/interfaces/async_connection_manager.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/infrastructure/interfaces/connection_manager.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/infrastructure/kafka_python/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/infrastructure/kafka_python/exception/consumer_interrupted_exception.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/infrastructure/kafka_python/kafka_python_producer.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/infrastructure/kafka_python/translators/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/infrastructure/kafka_python/translators/consumer_initial_offset_position_translator.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/infrastructure/serializers/byte_serializer.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/infrastructure/serializers/implementations/cdc_record_bytes_to_event_serializer.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/infrastructure/serializers/implementations/json_byte_serializer.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/kafka/infrastructure/serializers/kafka_header_serializer.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/locator/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/locator/handler_fqn_not_found_exception.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/locator/locator.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/locator/message_fqn_not_found_exception.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/locator/pypendency/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/locator/pypendency/container_locator.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/locator/pypendency/container_locator_resolution_configuration.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/locator/pypendency/handler_not_found_exception.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/locator/pypendency/handler_not_registered_exception.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/locator/sync/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/locator/sync/handler_already_registered_exception.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/locator/sync/handler_not_registered_exception.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/locator/sync/instance_locator.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/message.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/middleware/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/middleware/middleware.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/middleware/middleware_chain_builder.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/py.typed +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/query/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/query/asynchronous/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/query/asynchronous/base_query_handler.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/query/asynchronous/middleware/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/query/asynchronous/middleware/base_handle_middleware.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/query/asynchronous/middleware/handle_middleware.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/query/asynchronous/middleware/handle_middleware_chain_resolver.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/query/asynchronous/query_bus.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/query/asynchronous/query_handler.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/query/asynchronous/self_process/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/query/asynchronous/self_process/self_process_query_bus.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/query/more_than_one_query_handler_related_exception.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/query/query.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/query/query_response.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/query/synchronous/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/query/synchronous/base_query_handler.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/query/synchronous/middleware/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/query/synchronous/middleware/base_handle_middleware.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/query/synchronous/middleware/handle_middleware.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/query/synchronous/middleware/handle_middleware_chain_resolver.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/query/synchronous/query_bus.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/query/synchronous/query_handler.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/query/synchronous/self_process/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/query/synchronous/self_process/self_process_query_bus.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/query/synchronous/synced_async/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/query/synchronous/synced_async/synced_async_query_bus.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/queue/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/queue/in_memory/in_memory_multiqueue_repository.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/queue/in_memory/in_memory_queue_repository.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/queue/multiqueue_repository.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/queue/queue_repository.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/serializer/message_to_bytes_serializer.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/serializer/message_to_json_bytes_serializer.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/wrapper/__init__.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/wrapper/async_to_sync.py +0 -0
- {buz-2.14.0 → buz-2.14.1}/src/buz/wrapper/event_loop.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[tool.poetry]
|
|
2
2
|
name = "buz"
|
|
3
|
-
version = "2.14.
|
|
3
|
+
version = "2.14.1"
|
|
4
4
|
description = "Buz is a set of light, simple and extensible implementations of event, command and query buses."
|
|
5
5
|
readme = "README.md"
|
|
6
6
|
authors = ["Luis Pintado Lozano <luis.pintado.lozano@gmail.com>", "Gerardo Parra <gprauxiliar@gmail.com>"]
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
class NotValidPartitionNumberException(Exception):
|
|
2
|
+
def __init__(
|
|
3
|
+
self,
|
|
4
|
+
*,
|
|
5
|
+
partition_number: int,
|
|
6
|
+
min_partition_number: int,
|
|
7
|
+
) -> None:
|
|
8
|
+
super().__init__(
|
|
9
|
+
f'"{partition_number}" is not a valid partition number, the minimum partition number is "{min_partition_number}"'
|
|
10
|
+
)
|
|
@@ -40,6 +40,10 @@ class KafkaAdminClient(ConnectionManager, ABC):
|
|
|
40
40
|
) -> set[str]:
|
|
41
41
|
pass
|
|
42
42
|
|
|
43
|
+
@abstractmethod
|
|
44
|
+
def get_number_of_partitions(self, topic: str) -> int:
|
|
45
|
+
pass
|
|
46
|
+
|
|
43
47
|
# This function moves the following offset from the provided date
|
|
44
48
|
# if there are no messages with a date greater than the provided offset
|
|
45
49
|
# the offset will be moved to the end
|
|
@@ -52,3 +56,12 @@ class KafkaAdminClient(ConnectionManager, ABC):
|
|
|
52
56
|
target_datetime: datetime,
|
|
53
57
|
) -> None:
|
|
54
58
|
pass
|
|
59
|
+
|
|
60
|
+
@abstractmethod
|
|
61
|
+
def increase_topic_partitions_and_set_offset_of_related_consumer_groups_to_the_beginning_of_the_new_ones(
|
|
62
|
+
self,
|
|
63
|
+
*,
|
|
64
|
+
topic: str,
|
|
65
|
+
new_number_of_partitions: int,
|
|
66
|
+
) -> None:
|
|
67
|
+
pass
|
|
@@ -0,0 +1,399 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from datetime import datetime
|
|
4
|
+
from logging import Logger
|
|
5
|
+
import re
|
|
6
|
+
from typing import Any, Callable, Optional, Sequence, cast
|
|
7
|
+
|
|
8
|
+
from cachetools import TTLCache
|
|
9
|
+
from kafka import KafkaClient, KafkaConsumer
|
|
10
|
+
from kafka.admin import KafkaAdminClient as KafkaPythonLibraryAdminClient, NewTopic
|
|
11
|
+
from kafka.admin.new_partitions import NewPartitions
|
|
12
|
+
from kafka.errors import TopicAlreadyExistsError
|
|
13
|
+
from kafka.structs import TopicPartition, OffsetAndTimestamp
|
|
14
|
+
|
|
15
|
+
from buz.kafka.domain.exceptions.not_all_partition_assigned_exception import NotAllPartitionAssignedException
|
|
16
|
+
from buz.kafka.domain.exceptions.not_valid_partition_number_exception import NotValidPartitionNumberException
|
|
17
|
+
from buz.kafka.domain.exceptions.topic_already_created_exception import KafkaTopicsAlreadyCreatedException
|
|
18
|
+
from buz.kafka.domain.exceptions.topic_not_found_exception import TopicNotFoundException
|
|
19
|
+
from buz.kafka.domain.models.consumer_initial_offset_position import ConsumerInitialOffsetPosition
|
|
20
|
+
from buz.kafka.domain.models.create_kafka_topic import CreateKafkaTopic
|
|
21
|
+
from buz.kafka.domain.models.kafka_connection_config import KafkaConnectionConfig
|
|
22
|
+
from buz.kafka.domain.services.kafka_admin_client import KafkaAdminClient
|
|
23
|
+
|
|
24
|
+
from buz.kafka.infrastructure.kafka_python.translators.consumer_initial_offset_position_translator import (
|
|
25
|
+
KafkaPythonConsumerInitialOffsetPositionTranslator,
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
INTERNAL_KAFKA_TOPICS = {"__consumer_offsets", "_schema"}
|
|
29
|
+
TOPIC_CACHE_KEY = "topics"
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class KafkaPythonAdminClient(KafkaAdminClient):
|
|
33
|
+
__PYTHON_KAFKA_DUPLICATED_TOPIC_ERROR_CODE = 36
|
|
34
|
+
|
|
35
|
+
_kafka_admin: Optional[KafkaPythonLibraryAdminClient] = None
|
|
36
|
+
_kafka_client: Optional[KafkaClient] = None
|
|
37
|
+
|
|
38
|
+
def __init__(
|
|
39
|
+
self,
|
|
40
|
+
*,
|
|
41
|
+
logger: Logger,
|
|
42
|
+
connection_config: KafkaConnectionConfig,
|
|
43
|
+
cache_ttl_seconds: int = 0,
|
|
44
|
+
):
|
|
45
|
+
self._logger = logger
|
|
46
|
+
self.__connection_config = connection_config
|
|
47
|
+
self._config_in_library_format = self.__get_kafka_config_in_library_format(self.__connection_config)
|
|
48
|
+
self.__ttl_cache: TTLCache[str, Any] = TTLCache(maxsize=1, ttl=cache_ttl_seconds)
|
|
49
|
+
|
|
50
|
+
def __get_kafka_config_in_library_format(self, config: KafkaConnectionConfig) -> dict:
|
|
51
|
+
return {
|
|
52
|
+
"client_id": config.client_id,
|
|
53
|
+
"bootstrap_servers": config.bootstrap_servers,
|
|
54
|
+
"security_protocol": config.credentials.security_protocol.value,
|
|
55
|
+
"sasl_mechanism": config.credentials.sasl_mechanism.value if config.credentials.sasl_mechanism else None,
|
|
56
|
+
"sasl_plain_username": config.credentials.user,
|
|
57
|
+
"sasl_plain_password": config.credentials.password,
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
def connect(self):
|
|
61
|
+
self._get_kafka_admin()
|
|
62
|
+
self._get_kafka_client()
|
|
63
|
+
|
|
64
|
+
def disconnect(self):
|
|
65
|
+
if self._kafka_admin is not None:
|
|
66
|
+
self._kafka_admin.close()
|
|
67
|
+
self._kafka_admin = None
|
|
68
|
+
if self._kafka_client is not None:
|
|
69
|
+
self._kafka_client.close()
|
|
70
|
+
self._kafka_client = None
|
|
71
|
+
|
|
72
|
+
def _get_kafka_admin(self) -> KafkaPythonLibraryAdminClient:
|
|
73
|
+
if not self._kafka_admin:
|
|
74
|
+
self._kafka_admin = KafkaPythonLibraryAdminClient(**self._config_in_library_format)
|
|
75
|
+
return self._kafka_admin
|
|
76
|
+
|
|
77
|
+
def _get_kafka_client(self) -> KafkaClient:
|
|
78
|
+
if not self._kafka_client:
|
|
79
|
+
self._kafka_client = KafkaClient(**self._config_in_library_format)
|
|
80
|
+
return self._kafka_client
|
|
81
|
+
|
|
82
|
+
def create_topics(
|
|
83
|
+
self,
|
|
84
|
+
*,
|
|
85
|
+
topics: Sequence[CreateKafkaTopic],
|
|
86
|
+
) -> None:
|
|
87
|
+
new_topics = [
|
|
88
|
+
NewTopic(
|
|
89
|
+
name=topic.name,
|
|
90
|
+
num_partitions=topic.partitions,
|
|
91
|
+
replication_factor=topic.replication_factor,
|
|
92
|
+
topic_configs=topic.configs,
|
|
93
|
+
)
|
|
94
|
+
for topic in topics
|
|
95
|
+
]
|
|
96
|
+
|
|
97
|
+
try:
|
|
98
|
+
self._get_kafka_admin().create_topics(new_topics=new_topics)
|
|
99
|
+
except TopicAlreadyExistsError as error:
|
|
100
|
+
topic_names = self.__get_list_of_kafka_topics_from_topic_already_exists_error(error)
|
|
101
|
+
raise KafkaTopicsAlreadyCreatedException(topic_names=topic_names)
|
|
102
|
+
|
|
103
|
+
def __get_list_of_kafka_topics_from_topic_already_exists_error(self, error: TopicAlreadyExistsError) -> list[str]:
|
|
104
|
+
message = str(error)
|
|
105
|
+
response_message = re.search(r"topic_errors=\[.*?]", message)
|
|
106
|
+
topic_messages = re.findall(
|
|
107
|
+
r"topic='[^']*', error_code=" + str(self.__PYTHON_KAFKA_DUPLICATED_TOPIC_ERROR_CODE), response_message[0] # type: ignore
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
return [re.search("'.*'", topic_message)[0].strip("'") for topic_message in topic_messages] # type: ignore
|
|
111
|
+
|
|
112
|
+
def is_topic_created(
|
|
113
|
+
self,
|
|
114
|
+
topic: str,
|
|
115
|
+
) -> bool:
|
|
116
|
+
return topic in self.get_topics()
|
|
117
|
+
|
|
118
|
+
def get_topics(
|
|
119
|
+
self,
|
|
120
|
+
) -> set[str]:
|
|
121
|
+
return self.__resolve_cached_property(
|
|
122
|
+
TOPIC_CACHE_KEY, lambda: set(self._get_kafka_admin().list_topics()) - INTERNAL_KAFKA_TOPICS
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
def __resolve_cached_property(self, property_key: str, callback: Callable) -> Any:
|
|
126
|
+
value = self.__ttl_cache.get(property_key)
|
|
127
|
+
if value is not None:
|
|
128
|
+
return value
|
|
129
|
+
value = callback()
|
|
130
|
+
self.__ttl_cache[property_key] = value
|
|
131
|
+
return value
|
|
132
|
+
|
|
133
|
+
def delete_topics(
|
|
134
|
+
self,
|
|
135
|
+
*,
|
|
136
|
+
topics: set[str],
|
|
137
|
+
) -> None:
|
|
138
|
+
self._get_kafka_admin().delete_topics(
|
|
139
|
+
topics=topics,
|
|
140
|
+
)
|
|
141
|
+
self.__remove_cache_property(TOPIC_CACHE_KEY)
|
|
142
|
+
|
|
143
|
+
def __remove_cache_property(self, property_key: str) -> None:
|
|
144
|
+
self.__ttl_cache.pop(property_key, None)
|
|
145
|
+
|
|
146
|
+
def delete_subscription_groups(
|
|
147
|
+
self,
|
|
148
|
+
*,
|
|
149
|
+
subscription_groups: set[str],
|
|
150
|
+
) -> None:
|
|
151
|
+
self._get_kafka_admin().delete_consumer_groups(
|
|
152
|
+
group_ids=subscription_groups,
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
def get_cluster_consumer_groups(
|
|
156
|
+
self,
|
|
157
|
+
) -> set[str]:
|
|
158
|
+
return set([consumer_group_tuple[0] for consumer_group_tuple in self._get_kafka_admin().list_consumer_groups()])
|
|
159
|
+
|
|
160
|
+
def _wait_for_cluster_update(self) -> None:
|
|
161
|
+
future = self._get_kafka_client().cluster.request_update()
|
|
162
|
+
self._get_kafka_client().poll(future=future)
|
|
163
|
+
|
|
164
|
+
def move_offsets_to_datetime(
|
|
165
|
+
self,
|
|
166
|
+
*,
|
|
167
|
+
consumer_group: str,
|
|
168
|
+
topic: str,
|
|
169
|
+
target_datetime: datetime,
|
|
170
|
+
) -> None:
|
|
171
|
+
(consumer, topic_partitions) = self.__get_consumer_with_all_partitions_assigned(
|
|
172
|
+
consumer_group=consumer_group,
|
|
173
|
+
topic=topic,
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
offsets_for_date = self.__get_first_offset_after_date(
|
|
177
|
+
consumer=consumer,
|
|
178
|
+
topic_partitions=topic_partitions,
|
|
179
|
+
target_datetime=target_datetime,
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
try:
|
|
183
|
+
end_offsets = consumer.end_offsets(topic_partitions)
|
|
184
|
+
|
|
185
|
+
if end_offsets is None or len(end_offsets.keys()) != len(topic_partitions):
|
|
186
|
+
raise Exception(f'There was an error extracting the end offsets of the topic "{topic}"')
|
|
187
|
+
|
|
188
|
+
for topic_partition in topic_partitions:
|
|
189
|
+
offset_and_timestamp = offsets_for_date.get(topic_partition)
|
|
190
|
+
if offset_and_timestamp:
|
|
191
|
+
self._logger.info(f'moving "{topic_partition}" to the offset "{offset_and_timestamp.offset}"')
|
|
192
|
+
consumer.seek(topic_partition, offset_and_timestamp.offset)
|
|
193
|
+
else:
|
|
194
|
+
self._logger.info(
|
|
195
|
+
f'moving "{topic_partition}" to the end of the topic because there are no messages later than "{target_datetime}"'
|
|
196
|
+
)
|
|
197
|
+
consumer.seek(topic_partition, end_offsets[topic_partition])
|
|
198
|
+
|
|
199
|
+
consumer.commit()
|
|
200
|
+
except Exception as exception:
|
|
201
|
+
consumer.close()
|
|
202
|
+
raise exception
|
|
203
|
+
|
|
204
|
+
consumer.close()
|
|
205
|
+
|
|
206
|
+
def __get_consumer_with_all_partitions_assigned(
|
|
207
|
+
self,
|
|
208
|
+
consumer_group: str,
|
|
209
|
+
topic: str,
|
|
210
|
+
) -> tuple[KafkaConsumer, Sequence[TopicPartition]]:
|
|
211
|
+
consumer = KafkaConsumer(
|
|
212
|
+
group_id=consumer_group,
|
|
213
|
+
enable_auto_commit=False,
|
|
214
|
+
auto_offset_reset=KafkaPythonConsumerInitialOffsetPositionTranslator.to_kafka_supported_format(
|
|
215
|
+
ConsumerInitialOffsetPosition.BEGINNING
|
|
216
|
+
),
|
|
217
|
+
**self._config_in_library_format,
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
try:
|
|
221
|
+
partitions = self.get_number_of_partitions(topic)
|
|
222
|
+
|
|
223
|
+
topic_partitions = [TopicPartition(topic=topic, partition=partition) for partition in range(partitions)]
|
|
224
|
+
|
|
225
|
+
consumer.subscribe(topic)
|
|
226
|
+
|
|
227
|
+
self.__force_partition_assignment(consumer)
|
|
228
|
+
|
|
229
|
+
# We need all the partitions in order to update the offsets
|
|
230
|
+
if len(consumer.assignment()) != len(topic_partitions):
|
|
231
|
+
raise NotAllPartitionAssignedException(topic)
|
|
232
|
+
|
|
233
|
+
# This could produce a race condition, but it is a limitation of kafka admin (we are not able to check if all the partition are assigned using the manual assignment)
|
|
234
|
+
# https://github.com/dpkp/kafka-python/blob/master/kafka/consumer/group.py#L430
|
|
235
|
+
consumer.unsubscribe()
|
|
236
|
+
consumer.assign(topic_partitions)
|
|
237
|
+
self.__force_partition_assignment(consumer)
|
|
238
|
+
|
|
239
|
+
return (consumer, topic_partitions)
|
|
240
|
+
except Exception as exception:
|
|
241
|
+
consumer.close()
|
|
242
|
+
raise exception
|
|
243
|
+
|
|
244
|
+
def __get_first_offset_after_date(
|
|
245
|
+
self,
|
|
246
|
+
*,
|
|
247
|
+
consumer: KafkaConsumer,
|
|
248
|
+
topic_partitions: Sequence[TopicPartition],
|
|
249
|
+
target_datetime: datetime,
|
|
250
|
+
) -> dict[TopicPartition, Optional[OffsetAndTimestamp]]:
|
|
251
|
+
offset_for_times: dict[TopicPartition, Optional[int]] = {}
|
|
252
|
+
timestamp_ms = int(target_datetime.timestamp() * 1000)
|
|
253
|
+
|
|
254
|
+
for topic_partition in topic_partitions:
|
|
255
|
+
offset_for_times[topic_partition] = timestamp_ms
|
|
256
|
+
|
|
257
|
+
return cast(
|
|
258
|
+
dict[TopicPartition, Optional[OffsetAndTimestamp]],
|
|
259
|
+
consumer.offsets_for_times(offset_for_times),
|
|
260
|
+
)
|
|
261
|
+
|
|
262
|
+
# We are not to commit the new offset, but we need to execute a polling in order to start the partition assignment
|
|
263
|
+
def __force_partition_assignment(self, consumer: KafkaConsumer) -> None:
|
|
264
|
+
consumer.poll(max_records=1, timeout_ms=0)
|
|
265
|
+
|
|
266
|
+
def increase_topic_partitions_and_set_offset_of_related_consumer_groups_to_the_beginning_of_the_new_ones(
|
|
267
|
+
self,
|
|
268
|
+
*,
|
|
269
|
+
topic: str,
|
|
270
|
+
new_number_of_partitions: int,
|
|
271
|
+
) -> None:
|
|
272
|
+
self._logger.info(
|
|
273
|
+
f'Increasing topic "{topic}" partitions: Verifying the new number of partitions "{new_number_of_partitions}"'
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
previous_partitions_number = self.get_number_of_partitions(topic)
|
|
277
|
+
topic_partitions = [
|
|
278
|
+
TopicPartition(topic=topic, partition=partition) for partition in range(previous_partitions_number)
|
|
279
|
+
]
|
|
280
|
+
|
|
281
|
+
if previous_partitions_number >= new_number_of_partitions:
|
|
282
|
+
raise NotValidPartitionNumberException(
|
|
283
|
+
partition_number=new_number_of_partitions,
|
|
284
|
+
min_partition_number=len(topic_partitions),
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
self._logger.info(f'Increasing topic "{topic}" partitions: Extracting related consumer groups')
|
|
288
|
+
related_consumer_groups = self.__get_consumer_groups_related_to_a_topic(topic_partitions)
|
|
289
|
+
|
|
290
|
+
self._logger.info(
|
|
291
|
+
f'Increasing topic "{topic}" partitions: The following consumer groups will be updated:"{related_consumer_groups}"'
|
|
292
|
+
)
|
|
293
|
+
|
|
294
|
+
consumers_to_update: list[KafkaConsumer] = []
|
|
295
|
+
new_partitions_consumer: Optional[KafkaConsumer] = None
|
|
296
|
+
|
|
297
|
+
try:
|
|
298
|
+
for consumer_group in related_consumer_groups:
|
|
299
|
+
self._logger.info(
|
|
300
|
+
f'Increasing topic "{topic}" partitions: Requesting the assignment of the partitions of the group "{consumer_group}"'
|
|
301
|
+
)
|
|
302
|
+
(consumer_with_all_partitions, _) = self.__get_consumer_with_all_partitions_assigned(
|
|
303
|
+
consumer_group=consumer_group,
|
|
304
|
+
topic=topic,
|
|
305
|
+
)
|
|
306
|
+
consumers_to_update.append(consumer_with_all_partitions)
|
|
307
|
+
|
|
308
|
+
self._logger.info(
|
|
309
|
+
f'Increasing topic "{topic}" partitions: Incrementing the partition to "{new_number_of_partitions}"'
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
self._get_kafka_admin().create_partitions(
|
|
313
|
+
{
|
|
314
|
+
topic: NewPartitions(total_count=new_number_of_partitions),
|
|
315
|
+
}
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
new_partitions = [
|
|
319
|
+
TopicPartition(
|
|
320
|
+
topic=topic,
|
|
321
|
+
partition=partition_index,
|
|
322
|
+
)
|
|
323
|
+
for partition_index in range(previous_partitions_number, new_number_of_partitions)
|
|
324
|
+
]
|
|
325
|
+
|
|
326
|
+
for consumer_group in related_consumer_groups:
|
|
327
|
+
self._logger.info(
|
|
328
|
+
f'Increasing topic "{topic}" partitions: Moving the offset of the consumer group "{consumer_group}" to the beginning of the new partitions'
|
|
329
|
+
)
|
|
330
|
+
# We need to create a new consumer because kafka-python has a limitation that does not allow to assign specific partitions to a consumer subscribed to an entire topic
|
|
331
|
+
new_partitions_consumer = KafkaConsumer(
|
|
332
|
+
group_id=consumer_group,
|
|
333
|
+
enable_auto_commit=False,
|
|
334
|
+
auto_offset_reset=KafkaPythonConsumerInitialOffsetPositionTranslator.to_kafka_supported_format(
|
|
335
|
+
ConsumerInitialOffsetPosition.BEGINNING
|
|
336
|
+
),
|
|
337
|
+
**self._config_in_library_format,
|
|
338
|
+
)
|
|
339
|
+
new_partitions_consumer.assign(new_partitions)
|
|
340
|
+
for new_partition in new_partitions:
|
|
341
|
+
new_partitions_consumer.seek(new_partition, 0)
|
|
342
|
+
new_partitions_consumer.commit()
|
|
343
|
+
new_partitions_consumer.close()
|
|
344
|
+
|
|
345
|
+
self._logger.info(f'Increasing topic "{topic}" partitions: Process complete')
|
|
346
|
+
|
|
347
|
+
except Exception as exception:
|
|
348
|
+
for consumer_with_all_partitions in consumers_to_update:
|
|
349
|
+
consumer_with_all_partitions.close()
|
|
350
|
+
|
|
351
|
+
if new_partitions_consumer is not None:
|
|
352
|
+
new_partitions_consumer.close()
|
|
353
|
+
|
|
354
|
+
self._logger.error(f'Increasing topic "{topic}" partitions: unexpected error {exception}')
|
|
355
|
+
raise exception
|
|
356
|
+
|
|
357
|
+
return
|
|
358
|
+
|
|
359
|
+
def get_number_of_partitions(self, topic: str) -> int:
|
|
360
|
+
consumer = KafkaConsumer(
|
|
361
|
+
enable_auto_commit=False,
|
|
362
|
+
auto_offset_reset=KafkaPythonConsumerInitialOffsetPositionTranslator.to_kafka_supported_format(
|
|
363
|
+
ConsumerInitialOffsetPosition.BEGINNING
|
|
364
|
+
),
|
|
365
|
+
**self._config_in_library_format,
|
|
366
|
+
)
|
|
367
|
+
|
|
368
|
+
try:
|
|
369
|
+
partitions = consumer.partitions_for_topic(topic)
|
|
370
|
+
if partitions is None:
|
|
371
|
+
raise TopicNotFoundException(topic_name=topic)
|
|
372
|
+
|
|
373
|
+
return len(partitions)
|
|
374
|
+
except Exception as exception:
|
|
375
|
+
consumer.close()
|
|
376
|
+
raise exception
|
|
377
|
+
|
|
378
|
+
# The purpose of this function is to get all the consumer groups that are consuming from the topic
|
|
379
|
+
# It is a heavy tasks because we need to get the offset of all the partitions of the topic
|
|
380
|
+
def __get_consumer_groups_related_to_a_topic(self, topic_partitions: Sequence[TopicPartition]) -> set[str]:
|
|
381
|
+
cluster_consumer_groups = self.get_cluster_consumer_groups()
|
|
382
|
+
|
|
383
|
+
related_consumer_groups: set[str] = set()
|
|
384
|
+
|
|
385
|
+
for consumer_group in cluster_consumer_groups:
|
|
386
|
+
partitions_offsets = list(
|
|
387
|
+
self._get_kafka_admin()
|
|
388
|
+
.list_consumer_group_offsets(consumer_group, partitions=topic_partitions)
|
|
389
|
+
.values()
|
|
390
|
+
)
|
|
391
|
+
|
|
392
|
+
partitions_with_valid_offsets = [partition for partition in partitions_offsets if partition.offset != -1]
|
|
393
|
+
|
|
394
|
+
if len(partitions_with_valid_offsets) == 0:
|
|
395
|
+
continue
|
|
396
|
+
|
|
397
|
+
related_consumer_groups.add(consumer_group)
|
|
398
|
+
|
|
399
|
+
return related_consumer_groups
|
|
@@ -88,5 +88,5 @@ class KafkaPythonAdminTestClient(KafkaPythonAdminClient, KafkaAdminTestClient):
|
|
|
88
88
|
self,
|
|
89
89
|
) -> None:
|
|
90
90
|
self.delete_topics(topics=self.get_topics())
|
|
91
|
-
self.delete_subscription_groups(subscription_groups=self.
|
|
91
|
+
self.delete_subscription_groups(subscription_groups=self.get_cluster_consumer_groups())
|
|
92
92
|
self._wait_for_cluster_update()
|