karafka 1.4.4 → 2.1.10
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/.github/FUNDING.yml +1 -3
- data/.github/workflows/ci.yml +117 -36
- data/.rspec +4 -0
- data/.ruby-version +1 -1
- data/CHANGELOG.md +611 -578
- data/CONTRIBUTING.md +10 -19
- data/Gemfile +7 -0
- data/Gemfile.lock +59 -100
- data/LICENSE +17 -0
- data/LICENSE-COMM +89 -0
- data/LICENSE-LGPL +165 -0
- data/README.md +64 -66
- data/bin/benchmarks +85 -0
- data/bin/create_token +22 -0
- data/bin/integrations +297 -0
- data/bin/karafka +4 -12
- data/bin/rspecs +6 -0
- data/bin/scenario +29 -0
- data/bin/stress_many +13 -0
- data/bin/stress_one +13 -0
- data/bin/verify_license_integrity +37 -0
- data/certs/cert_chain.pem +26 -0
- data/certs/karafka-pro.pem +11 -0
- data/config/locales/errors.yml +84 -0
- data/config/locales/pro_errors.yml +39 -0
- data/docker-compose.yml +13 -3
- data/karafka.gemspec +27 -22
- data/lib/active_job/karafka.rb +17 -0
- data/lib/active_job/queue_adapters/karafka_adapter.rb +32 -0
- data/lib/karafka/active_job/consumer.rb +49 -0
- data/lib/karafka/active_job/current_attributes/loading.rb +36 -0
- data/lib/karafka/active_job/current_attributes/persistence.rb +28 -0
- data/lib/karafka/active_job/current_attributes.rb +42 -0
- data/lib/karafka/active_job/dispatcher.rb +69 -0
- data/lib/karafka/active_job/job_extensions.rb +34 -0
- data/lib/karafka/active_job/job_options_contract.rb +32 -0
- data/lib/karafka/admin.rb +286 -0
- data/lib/karafka/app.rb +47 -23
- data/lib/karafka/base_consumer.rb +247 -29
- data/lib/karafka/cli/base.rb +24 -4
- data/lib/karafka/cli/console.rb +13 -8
- data/lib/karafka/cli/info.rb +45 -10
- data/lib/karafka/cli/install.rb +22 -12
- data/lib/karafka/cli/server.rb +63 -41
- data/lib/karafka/cli/topics.rb +146 -0
- data/lib/karafka/cli.rb +4 -11
- data/lib/karafka/connection/client.rb +502 -89
- data/lib/karafka/connection/consumer_group_coordinator.rb +48 -0
- data/lib/karafka/connection/listener.rb +294 -38
- data/lib/karafka/connection/listeners_batch.rb +40 -0
- data/lib/karafka/connection/messages_buffer.rb +84 -0
- data/lib/karafka/connection/pauses_manager.rb +46 -0
- data/lib/karafka/connection/proxy.rb +92 -0
- data/lib/karafka/connection/raw_messages_buffer.rb +101 -0
- data/lib/karafka/connection/rebalance_manager.rb +90 -0
- data/lib/karafka/contracts/base.rb +17 -0
- data/lib/karafka/contracts/config.rb +88 -11
- data/lib/karafka/contracts/consumer_group.rb +32 -187
- data/lib/karafka/contracts/server_cli_options.rb +80 -19
- data/lib/karafka/contracts/topic.rb +65 -0
- data/lib/karafka/contracts.rb +1 -1
- data/lib/karafka/embedded.rb +36 -0
- data/lib/karafka/env.rb +46 -0
- data/lib/karafka/errors.rb +26 -21
- data/lib/karafka/helpers/async.rb +33 -0
- data/lib/karafka/helpers/colorize.rb +26 -0
- data/lib/karafka/helpers/multi_delegator.rb +2 -2
- data/lib/karafka/instrumentation/callbacks/error.rb +39 -0
- data/lib/karafka/instrumentation/callbacks/statistics.rb +51 -0
- data/lib/karafka/instrumentation/logger.rb +5 -9
- data/lib/karafka/instrumentation/logger_listener.rb +299 -0
- data/lib/karafka/instrumentation/monitor.rb +13 -61
- data/lib/karafka/instrumentation/notifications.rb +75 -0
- data/lib/karafka/instrumentation/proctitle_listener.rb +7 -16
- data/lib/karafka/instrumentation/vendors/datadog/dashboard.json +1 -0
- data/lib/karafka/instrumentation/vendors/datadog/logger_listener.rb +153 -0
- data/lib/karafka/instrumentation/vendors/datadog/metrics_listener.rb +264 -0
- data/lib/karafka/instrumentation/vendors/kubernetes/liveness_listener.rb +176 -0
- data/lib/karafka/licenser.rb +78 -0
- data/lib/karafka/messages/batch_metadata.rb +52 -0
- data/lib/karafka/messages/builders/batch_metadata.rb +40 -0
- data/lib/karafka/messages/builders/message.rb +36 -0
- data/lib/karafka/messages/builders/messages.rb +36 -0
- data/lib/karafka/{params/params.rb → messages/message.rb} +20 -13
- data/lib/karafka/messages/messages.rb +71 -0
- data/lib/karafka/{params → messages}/metadata.rb +4 -6
- data/lib/karafka/messages/parser.rb +14 -0
- data/lib/karafka/messages/seek.rb +12 -0
- data/lib/karafka/patches/rdkafka/bindings.rb +139 -0
- data/lib/karafka/pro/active_job/consumer.rb +47 -0
- data/lib/karafka/pro/active_job/dispatcher.rb +86 -0
- data/lib/karafka/pro/active_job/job_options_contract.rb +45 -0
- data/lib/karafka/pro/encryption/cipher.rb +58 -0
- data/lib/karafka/pro/encryption/contracts/config.rb +79 -0
- data/lib/karafka/pro/encryption/errors.rb +24 -0
- data/lib/karafka/pro/encryption/messages/middleware.rb +46 -0
- data/lib/karafka/pro/encryption/messages/parser.rb +56 -0
- data/lib/karafka/pro/encryption/setup/config.rb +48 -0
- data/lib/karafka/pro/encryption.rb +47 -0
- data/lib/karafka/pro/iterator/expander.rb +95 -0
- data/lib/karafka/pro/iterator/tpl_builder.rb +155 -0
- data/lib/karafka/pro/iterator.rb +170 -0
- data/lib/karafka/pro/loader.rb +102 -0
- data/lib/karafka/pro/performance_tracker.rb +84 -0
- data/lib/karafka/pro/processing/collapser.rb +62 -0
- data/lib/karafka/pro/processing/coordinator.rb +148 -0
- data/lib/karafka/pro/processing/filters/base.rb +61 -0
- data/lib/karafka/pro/processing/filters/delayer.rb +70 -0
- data/lib/karafka/pro/processing/filters/expirer.rb +51 -0
- data/lib/karafka/pro/processing/filters/throttler.rb +84 -0
- data/lib/karafka/pro/processing/filters/virtual_limiter.rb +52 -0
- data/lib/karafka/pro/processing/filters_applier.rb +105 -0
- data/lib/karafka/pro/processing/jobs/consume_non_blocking.rb +39 -0
- data/lib/karafka/pro/processing/jobs/revoked_non_blocking.rb +37 -0
- data/lib/karafka/pro/processing/jobs_builder.rb +50 -0
- data/lib/karafka/pro/processing/partitioner.rb +69 -0
- data/lib/karafka/pro/processing/scheduler.rb +75 -0
- data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom.rb +70 -0
- data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom_vp.rb +76 -0
- data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_mom.rb +72 -0
- data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_mom_vp.rb +76 -0
- data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom.rb +66 -0
- data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom_vp.rb +70 -0
- data/lib/karafka/pro/processing/strategies/aj/dlq_mom.rb +64 -0
- data/lib/karafka/pro/processing/strategies/aj/dlq_mom_vp.rb +69 -0
- data/lib/karafka/pro/processing/strategies/aj/ftr_lrj_mom.rb +38 -0
- data/lib/karafka/pro/processing/strategies/aj/ftr_lrj_mom_vp.rb +66 -0
- data/lib/karafka/pro/processing/strategies/aj/ftr_mom.rb +38 -0
- data/lib/karafka/pro/processing/strategies/aj/ftr_mom_vp.rb +58 -0
- data/lib/karafka/pro/processing/strategies/aj/lrj_mom.rb +37 -0
- data/lib/karafka/pro/processing/strategies/aj/lrj_mom_vp.rb +82 -0
- data/lib/karafka/pro/processing/strategies/aj/mom.rb +36 -0
- data/lib/karafka/pro/processing/strategies/aj/mom_vp.rb +52 -0
- data/lib/karafka/pro/processing/strategies/base.rb +26 -0
- data/lib/karafka/pro/processing/strategies/default.rb +105 -0
- data/lib/karafka/pro/processing/strategies/dlq/default.rb +131 -0
- data/lib/karafka/pro/processing/strategies/dlq/ftr.rb +61 -0
- data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj.rb +75 -0
- data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_mom.rb +71 -0
- data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_mom_vp.rb +43 -0
- data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_vp.rb +41 -0
- data/lib/karafka/pro/processing/strategies/dlq/ftr_mom.rb +69 -0
- data/lib/karafka/pro/processing/strategies/dlq/ftr_mom_vp.rb +41 -0
- data/lib/karafka/pro/processing/strategies/dlq/ftr_vp.rb +40 -0
- data/lib/karafka/pro/processing/strategies/dlq/lrj.rb +64 -0
- data/lib/karafka/pro/processing/strategies/dlq/lrj_mom.rb +65 -0
- data/lib/karafka/pro/processing/strategies/dlq/lrj_mom_vp.rb +36 -0
- data/lib/karafka/pro/processing/strategies/dlq/lrj_vp.rb +39 -0
- data/lib/karafka/pro/processing/strategies/dlq/mom.rb +68 -0
- data/lib/karafka/pro/processing/strategies/dlq/mom_vp.rb +37 -0
- data/lib/karafka/pro/processing/strategies/dlq/vp.rb +40 -0
- data/lib/karafka/pro/processing/strategies/ftr/default.rb +111 -0
- data/lib/karafka/pro/processing/strategies/ftr/vp.rb +40 -0
- data/lib/karafka/pro/processing/strategies/lrj/default.rb +87 -0
- data/lib/karafka/pro/processing/strategies/lrj/ftr.rb +69 -0
- data/lib/karafka/pro/processing/strategies/lrj/ftr_mom.rb +67 -0
- data/lib/karafka/pro/processing/strategies/lrj/ftr_mom_vp.rb +40 -0
- data/lib/karafka/pro/processing/strategies/lrj/ftr_vp.rb +39 -0
- data/lib/karafka/pro/processing/strategies/lrj/mom.rb +82 -0
- data/lib/karafka/pro/processing/strategies/lrj/mom_vp.rb +38 -0
- data/lib/karafka/pro/processing/strategies/lrj/vp.rb +36 -0
- data/lib/karafka/pro/processing/strategies/mom/default.rb +46 -0
- data/lib/karafka/pro/processing/strategies/mom/ftr.rb +53 -0
- data/lib/karafka/pro/processing/strategies/mom/ftr_vp.rb +37 -0
- data/lib/karafka/pro/processing/strategies/mom/vp.rb +35 -0
- data/lib/karafka/pro/processing/strategies/vp/default.rb +104 -0
- data/lib/karafka/pro/processing/strategies.rb +22 -0
- data/lib/karafka/pro/processing/strategy_selector.rb +84 -0
- data/lib/karafka/pro/processing/virtual_offset_manager.rb +147 -0
- data/lib/karafka/pro/routing/features/base.rb +24 -0
- data/lib/karafka/pro/routing/features/dead_letter_queue/contract.rb +50 -0
- data/lib/karafka/pro/routing/features/dead_letter_queue.rb +27 -0
- data/lib/karafka/pro/routing/features/delaying/config.rb +27 -0
- data/lib/karafka/pro/routing/features/delaying/contract.rb +38 -0
- data/lib/karafka/pro/routing/features/delaying/topic.rb +59 -0
- data/lib/karafka/pro/routing/features/delaying.rb +29 -0
- data/lib/karafka/pro/routing/features/expiring/config.rb +27 -0
- data/lib/karafka/pro/routing/features/expiring/contract.rb +38 -0
- data/lib/karafka/pro/routing/features/expiring/topic.rb +59 -0
- data/lib/karafka/pro/routing/features/expiring.rb +27 -0
- data/lib/karafka/pro/routing/features/filtering/config.rb +40 -0
- data/lib/karafka/pro/routing/features/filtering/contract.rb +41 -0
- data/lib/karafka/pro/routing/features/filtering/topic.rb +51 -0
- data/lib/karafka/pro/routing/features/filtering.rb +27 -0
- data/lib/karafka/pro/routing/features/long_running_job/config.rb +28 -0
- data/lib/karafka/pro/routing/features/long_running_job/contract.rb +37 -0
- data/lib/karafka/pro/routing/features/long_running_job/topic.rb +42 -0
- data/lib/karafka/pro/routing/features/long_running_job.rb +28 -0
- data/lib/karafka/pro/routing/features/pausing/contract.rb +48 -0
- data/lib/karafka/pro/routing/features/pausing/topic.rb +44 -0
- data/lib/karafka/pro/routing/features/pausing.rb +25 -0
- data/lib/karafka/pro/routing/features/throttling/config.rb +32 -0
- data/lib/karafka/pro/routing/features/throttling/contract.rb +41 -0
- data/lib/karafka/pro/routing/features/throttling/topic.rb +69 -0
- data/lib/karafka/pro/routing/features/throttling.rb +30 -0
- data/lib/karafka/pro/routing/features/virtual_partitions/config.rb +30 -0
- data/lib/karafka/pro/routing/features/virtual_partitions/contract.rb +52 -0
- data/lib/karafka/pro/routing/features/virtual_partitions/topic.rb +56 -0
- data/lib/karafka/pro/routing/features/virtual_partitions.rb +27 -0
- data/lib/karafka/pro.rb +13 -0
- data/lib/karafka/process.rb +24 -8
- data/lib/karafka/processing/coordinator.rb +181 -0
- data/lib/karafka/processing/coordinators_buffer.rb +62 -0
- data/lib/karafka/processing/executor.rb +148 -0
- data/lib/karafka/processing/executors_buffer.rb +72 -0
- data/lib/karafka/processing/jobs/base.rb +55 -0
- data/lib/karafka/processing/jobs/consume.rb +45 -0
- data/lib/karafka/processing/jobs/idle.rb +24 -0
- data/lib/karafka/processing/jobs/revoked.rb +22 -0
- data/lib/karafka/processing/jobs/shutdown.rb +23 -0
- data/lib/karafka/processing/jobs_builder.rb +28 -0
- data/lib/karafka/processing/jobs_queue.rb +150 -0
- data/lib/karafka/processing/partitioner.rb +24 -0
- data/lib/karafka/processing/result.rb +42 -0
- data/lib/karafka/processing/scheduler.rb +22 -0
- data/lib/karafka/processing/strategies/aj_dlq_mom.rb +44 -0
- data/lib/karafka/processing/strategies/aj_mom.rb +21 -0
- data/lib/karafka/processing/strategies/base.rb +52 -0
- data/lib/karafka/processing/strategies/default.rb +158 -0
- data/lib/karafka/processing/strategies/dlq.rb +88 -0
- data/lib/karafka/processing/strategies/dlq_mom.rb +49 -0
- data/lib/karafka/processing/strategies/mom.rb +29 -0
- data/lib/karafka/processing/strategy_selector.rb +47 -0
- data/lib/karafka/processing/worker.rb +93 -0
- data/lib/karafka/processing/workers_batch.rb +27 -0
- data/lib/karafka/railtie.rb +125 -0
- data/lib/karafka/routing/activity_manager.rb +84 -0
- data/lib/karafka/routing/builder.rb +34 -23
- data/lib/karafka/routing/consumer_group.rb +47 -21
- data/lib/karafka/routing/consumer_mapper.rb +1 -12
- data/lib/karafka/routing/features/active_job/builder.rb +33 -0
- data/lib/karafka/routing/features/active_job/config.rb +15 -0
- data/lib/karafka/routing/features/active_job/contract.rb +41 -0
- data/lib/karafka/routing/features/active_job/topic.rb +33 -0
- data/lib/karafka/routing/features/active_job.rb +13 -0
- data/lib/karafka/routing/features/base/expander.rb +53 -0
- data/lib/karafka/routing/features/base.rb +34 -0
- data/lib/karafka/routing/features/dead_letter_queue/config.rb +19 -0
- data/lib/karafka/routing/features/dead_letter_queue/contract.rb +42 -0
- data/lib/karafka/routing/features/dead_letter_queue/topic.rb +41 -0
- data/lib/karafka/routing/features/dead_letter_queue.rb +16 -0
- data/lib/karafka/routing/features/declaratives/config.rb +18 -0
- data/lib/karafka/routing/features/declaratives/contract.rb +30 -0
- data/lib/karafka/routing/features/declaratives/topic.rb +44 -0
- data/lib/karafka/routing/features/declaratives.rb +14 -0
- data/lib/karafka/routing/features/manual_offset_management/config.rb +15 -0
- data/lib/karafka/routing/features/manual_offset_management/contract.rb +24 -0
- data/lib/karafka/routing/features/manual_offset_management/topic.rb +35 -0
- data/lib/karafka/routing/features/manual_offset_management.rb +18 -0
- data/lib/karafka/routing/proxy.rb +18 -20
- data/lib/karafka/routing/router.rb +28 -3
- data/lib/karafka/routing/subscription_group.rb +91 -0
- data/lib/karafka/routing/subscription_groups_builder.rb +58 -0
- data/lib/karafka/routing/topic.rb +77 -24
- data/lib/karafka/routing/topics.rb +46 -0
- data/lib/karafka/runner.rb +52 -0
- data/lib/karafka/serialization/json/deserializer.rb +7 -15
- data/lib/karafka/server.rb +108 -37
- data/lib/karafka/setup/attributes_map.rb +347 -0
- data/lib/karafka/setup/config.rb +183 -179
- data/lib/karafka/status.rb +54 -7
- data/lib/karafka/templates/example_consumer.rb.erb +16 -0
- data/lib/karafka/templates/karafka.rb.erb +34 -56
- data/lib/karafka/time_trackers/base.rb +14 -0
- data/lib/karafka/time_trackers/pause.rb +122 -0
- data/lib/karafka/time_trackers/poll.rb +69 -0
- data/lib/karafka/version.rb +1 -1
- data/lib/karafka.rb +90 -16
- data/renovate.json +6 -0
- data.tar.gz.sig +0 -0
- metadata +290 -172
- metadata.gz.sig +0 -0
- data/MIT-LICENCE +0 -18
- data/certs/mensfeld.pem +0 -25
- data/config/errors.yml +0 -41
- data/lib/karafka/assignment_strategies/round_robin.rb +0 -13
- data/lib/karafka/attributes_map.rb +0 -63
- data/lib/karafka/backends/inline.rb +0 -16
- data/lib/karafka/base_responder.rb +0 -226
- data/lib/karafka/cli/flow.rb +0 -48
- data/lib/karafka/cli/missingno.rb +0 -19
- data/lib/karafka/code_reloader.rb +0 -67
- data/lib/karafka/connection/api_adapter.rb +0 -159
- data/lib/karafka/connection/batch_delegator.rb +0 -55
- data/lib/karafka/connection/builder.rb +0 -23
- data/lib/karafka/connection/message_delegator.rb +0 -36
- data/lib/karafka/consumers/batch_metadata.rb +0 -10
- data/lib/karafka/consumers/callbacks.rb +0 -71
- data/lib/karafka/consumers/includer.rb +0 -64
- data/lib/karafka/consumers/responders.rb +0 -24
- data/lib/karafka/consumers/single_params.rb +0 -15
- data/lib/karafka/contracts/consumer_group_topic.rb +0 -19
- data/lib/karafka/contracts/responder_usage.rb +0 -54
- data/lib/karafka/fetcher.rb +0 -42
- data/lib/karafka/helpers/class_matcher.rb +0 -88
- data/lib/karafka/helpers/config_retriever.rb +0 -46
- data/lib/karafka/helpers/inflector.rb +0 -26
- data/lib/karafka/instrumentation/stdout_listener.rb +0 -140
- data/lib/karafka/params/batch_metadata.rb +0 -26
- data/lib/karafka/params/builders/batch_metadata.rb +0 -30
- data/lib/karafka/params/builders/params.rb +0 -38
- data/lib/karafka/params/builders/params_batch.rb +0 -25
- data/lib/karafka/params/params_batch.rb +0 -60
- data/lib/karafka/patches/ruby_kafka.rb +0 -47
- data/lib/karafka/persistence/client.rb +0 -29
- data/lib/karafka/persistence/consumers.rb +0 -45
- data/lib/karafka/persistence/topics.rb +0 -48
- data/lib/karafka/responders/builder.rb +0 -36
- data/lib/karafka/responders/topic.rb +0 -55
- data/lib/karafka/routing/topic_mapper.rb +0 -53
- data/lib/karafka/serialization/json/serializer.rb +0 -31
- data/lib/karafka/setup/configurators/water_drop.rb +0 -36
- data/lib/karafka/templates/application_responder.rb.erb +0 -11
@@ -1,118 +1,531 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
module Karafka
|
4
|
+
# Namespace for Kafka connection related logic
|
4
5
|
module Connection
|
5
|
-
#
|
6
|
-
#
|
6
|
+
# An abstraction layer on top of the rdkafka consumer.
|
7
|
+
#
|
8
|
+
# It is threadsafe and provides some security measures so we won't end up operating on a
|
9
|
+
# closed consumer instance as it causes Ruby VM process to crash.
|
7
10
|
class Client
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
]
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
#
|
21
|
-
#
|
22
|
-
#
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
#
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
11
|
+
attr_reader :rebalance_manager
|
12
|
+
|
13
|
+
# @return [String] underlying consumer name
|
14
|
+
# @note Consumer name may change in case we regenerate it
|
15
|
+
attr_reader :name
|
16
|
+
|
17
|
+
# @return [String] id of the client
|
18
|
+
attr_reader :id
|
19
|
+
|
20
|
+
# How many times should we retry polling in case of a failure
|
21
|
+
MAX_POLL_RETRIES = 20
|
22
|
+
|
23
|
+
# 1 minute of max wait for the first rebalance before a forceful attempt
|
24
|
+
# This applies only to a case when a short-lived Karafka instance with a client would be
|
25
|
+
# closed before first rebalance. Mitigates a librdkafka bug.
|
26
|
+
COOPERATIVE_STICKY_MAX_WAIT = 60_000
|
27
|
+
|
28
|
+
# We want to make sure we never close several clients in the same moment to prevent
|
29
|
+
# potential race conditions and other issues
|
30
|
+
SHUTDOWN_MUTEX = Mutex.new
|
31
|
+
|
32
|
+
private_constant :MAX_POLL_RETRIES, :SHUTDOWN_MUTEX, :COOPERATIVE_STICKY_MAX_WAIT
|
33
|
+
|
34
|
+
# Creates a new consumer instance.
|
35
|
+
#
|
36
|
+
# @param subscription_group [Karafka::Routing::SubscriptionGroup] subscription group
|
37
|
+
# with all the configuration details needed for us to create a client
|
38
|
+
# @return [Karafka::Connection::Client]
|
39
|
+
def initialize(subscription_group)
|
40
|
+
@id = SecureRandom.hex(6)
|
41
|
+
# Name is set when we build consumer
|
42
|
+
@name = ''
|
43
|
+
@closed = false
|
44
|
+
@subscription_group = subscription_group
|
45
|
+
@buffer = RawMessagesBuffer.new
|
46
|
+
@rebalance_manager = RebalanceManager.new
|
47
|
+
@kafka = build_consumer
|
48
|
+
# There are few operations that can happen in parallel from the listener threads as well
|
49
|
+
# as from the workers. They are not fully thread-safe because they may be composed out of
|
50
|
+
# few calls to Kafka or out of few internal state changes. That is why we mutex them.
|
51
|
+
# It mostly revolves around pausing and resuming.
|
52
|
+
@mutex = Mutex.new
|
53
|
+
# We need to keep track of what we have paused for resuming
|
54
|
+
# In case we loose partition, we still need to resume it, otherwise it won't be fetched
|
55
|
+
# again if we get reassigned to it later on. We need to keep them as after revocation we
|
56
|
+
# no longer may be able to fetch them from Kafka. We could build them but it is easier
|
57
|
+
# to just keep them here and use if needed when cannot be obtained
|
58
|
+
@paused_tpls = Hash.new { |h, k| h[k] = {} }
|
59
|
+
end
|
60
|
+
|
61
|
+
# Fetches messages within boundaries defined by the settings (time, size, topics, etc).
|
62
|
+
#
|
63
|
+
# @return [Karafka::Connection::MessagesBuffer] messages buffer that holds messages per topic
|
64
|
+
# partition
|
65
|
+
# @note This method should not be executed from many threads at the same time
|
66
|
+
def batch_poll
|
67
|
+
time_poll = TimeTrackers::Poll.new(@subscription_group.max_wait_time)
|
68
|
+
|
69
|
+
@buffer.clear
|
70
|
+
@rebalance_manager.clear
|
71
|
+
|
72
|
+
loop do
|
73
|
+
time_poll.start
|
74
|
+
|
75
|
+
# Don't fetch more messages if we do not have any time left
|
76
|
+
break if time_poll.exceeded?
|
77
|
+
# Don't fetch more messages if we've fetched max as we've wanted
|
78
|
+
break if @buffer.size >= @subscription_group.max_messages
|
79
|
+
|
80
|
+
# Fetch message within our time boundaries
|
81
|
+
message = poll(time_poll.remaining)
|
82
|
+
|
83
|
+
# Put a message to the buffer if there is one
|
84
|
+
@buffer << message if message
|
85
|
+
|
86
|
+
# Upon polling rebalance manager might have been updated.
|
87
|
+
# If partition revocation happens, we need to remove messages from revoked partitions
|
88
|
+
# as well as ensure we do not have duplicated due to the offset reset for partitions
|
89
|
+
# that we got assigned
|
90
|
+
# We also do early break, so the information about rebalance is used as soon as possible
|
91
|
+
if @rebalance_manager.changed?
|
92
|
+
remove_revoked_and_duplicated_messages
|
93
|
+
break
|
94
|
+
end
|
95
|
+
|
96
|
+
# Track time spent on all of the processing and polling
|
97
|
+
time_poll.checkpoint
|
98
|
+
|
99
|
+
# Finally once we've (potentially) removed revoked, etc, if no messages were returned
|
100
|
+
# we can break.
|
101
|
+
# Worth keeping in mind, that the rebalance manager might have been updated despite no
|
102
|
+
# messages being returned during a poll
|
103
|
+
break unless message
|
39
104
|
end
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
105
|
+
|
106
|
+
@buffer
|
107
|
+
end
|
108
|
+
|
109
|
+
# Stores offset for a given partition of a given topic based on the provided message.
|
110
|
+
#
|
111
|
+
# @param message [Karafka::Messages::Message]
|
112
|
+
def store_offset(message)
|
113
|
+
internal_store_offset(message)
|
114
|
+
end
|
115
|
+
|
116
|
+
# @return [Boolean] true if our current assignment has been lost involuntarily.
|
117
|
+
def assignment_lost?
|
118
|
+
@kafka.assignment_lost?
|
119
|
+
end
|
120
|
+
|
121
|
+
# Commits the offset on a current consumer in a non-blocking or blocking way.
|
122
|
+
#
|
123
|
+
# @param async [Boolean] should the commit happen async or sync (async by default)
|
124
|
+
# @return [Boolean] did committing was successful. It may be not, when we no longer own
|
125
|
+
# given partition.
|
126
|
+
#
|
127
|
+
# @note This will commit all the offsets for the whole consumer. In order to achieve
|
128
|
+
# granular control over where the offset should be for particular topic partitions, the
|
129
|
+
# store_offset should be used to only store new offset when we want them to be flushed
|
130
|
+
#
|
131
|
+
# @note This method for async may return `true` despite involuntary partition revocation as
|
132
|
+
# it does **not** resolve to `lost_assignment?`. It returns only the commit state operation
|
133
|
+
# result.
|
134
|
+
def commit_offsets(async: true)
|
135
|
+
internal_commit_offsets(async: async)
|
52
136
|
end
|
53
137
|
|
54
|
-
#
|
138
|
+
# Commits offset in a synchronous way.
|
139
|
+
#
|
140
|
+
# @see `#commit_offset` for more details
|
141
|
+
def commit_offsets!
|
142
|
+
commit_offsets(async: false)
|
143
|
+
end
|
144
|
+
|
145
|
+
# Seek to a particular message. The next poll on the topic/partition will return the
|
146
|
+
# message at the given offset.
|
147
|
+
#
|
148
|
+
# @param message [Messages::Message, Messages::Seek] message to which we want to seek to.
|
149
|
+
# It can have the time based offset.
|
150
|
+
# @note Please note, that if you are seeking to a time offset, getting the offset is blocking
|
151
|
+
def seek(message)
|
152
|
+
@mutex.synchronize { internal_seek(message) }
|
153
|
+
end
|
154
|
+
|
155
|
+
# Pauses given partition and moves back to last successful offset processed.
|
156
|
+
#
|
157
|
+
# @param topic [String] topic name
|
158
|
+
# @param partition [Integer] partition
|
159
|
+
# @param offset [Integer] offset of the message on which we want to pause (this message will
|
160
|
+
# be reprocessed after getting back to processing)
|
161
|
+
# @note This will pause indefinitely and requires manual `#resume`
|
162
|
+
def pause(topic, partition, offset)
|
163
|
+
@mutex.synchronize do
|
164
|
+
# Do not pause if the client got closed, would not change anything
|
165
|
+
return if @closed
|
166
|
+
|
167
|
+
pause_msg = Messages::Seek.new(topic, partition, offset)
|
168
|
+
|
169
|
+
internal_commit_offsets(async: true)
|
170
|
+
|
171
|
+
# Here we do not use our cached tpls because we should not try to pause something we do
|
172
|
+
# not own anymore.
|
173
|
+
tpl = topic_partition_list(topic, partition)
|
174
|
+
|
175
|
+
return unless tpl
|
176
|
+
|
177
|
+
Karafka.monitor.instrument(
|
178
|
+
'client.pause',
|
179
|
+
caller: self,
|
180
|
+
subscription_group: @subscription_group,
|
181
|
+
topic: topic,
|
182
|
+
partition: partition,
|
183
|
+
offset: offset
|
184
|
+
)
|
185
|
+
|
186
|
+
@paused_tpls[topic][partition] = tpl
|
187
|
+
|
188
|
+
@kafka.pause(tpl)
|
189
|
+
internal_seek(pause_msg)
|
190
|
+
end
|
191
|
+
end
|
192
|
+
|
193
|
+
# Resumes processing of a give topic partition after it was paused.
|
194
|
+
#
|
195
|
+
# @param topic [String] topic name
|
196
|
+
# @param partition [Integer] partition
|
197
|
+
def resume(topic, partition)
|
198
|
+
@mutex.synchronize do
|
199
|
+
return if @closed
|
200
|
+
|
201
|
+
# We now commit offsets on rebalances, thus we can do it async just to make sure
|
202
|
+
internal_commit_offsets(async: true)
|
203
|
+
|
204
|
+
# If we were not able, let's try to reuse the one we have (if we have)
|
205
|
+
tpl = topic_partition_list(topic, partition) || @paused_tpls[topic][partition]
|
206
|
+
|
207
|
+
return unless tpl
|
208
|
+
|
209
|
+
# If we did not have it, it means we never paused this partition, thus no resume should
|
210
|
+
# happen in the first place
|
211
|
+
return unless @paused_tpls[topic].delete(partition)
|
212
|
+
|
213
|
+
Karafka.monitor.instrument(
|
214
|
+
'client.resume',
|
215
|
+
caller: self,
|
216
|
+
subscription_group: @subscription_group,
|
217
|
+
topic: topic,
|
218
|
+
partition: partition
|
219
|
+
)
|
220
|
+
|
221
|
+
@kafka.resume(tpl)
|
222
|
+
end
|
223
|
+
end
|
224
|
+
|
225
|
+
# Gracefully stops topic consumption.
|
226
|
+
#
|
55
227
|
# @note Stopping running consumers without a really important reason is not recommended
|
56
228
|
# as until all the consumers are stopped, the server will keep running serving only
|
57
229
|
# part of the messages
|
58
230
|
def stop
|
59
|
-
|
60
|
-
|
231
|
+
# This ensures, that we do not stop the underlying client until it passes the first
|
232
|
+
# rebalance for cooperative-sticky. Otherwise librdkafka may crash
|
233
|
+
#
|
234
|
+
# We set a timeout just in case the rebalance would never happen or would last for an
|
235
|
+
# extensive time period.
|
236
|
+
#
|
237
|
+
# @see https://github.com/confluentinc/librdkafka/issues/4312
|
238
|
+
if @subscription_group.kafka[:'partition.assignment.strategy'] == 'cooperative-sticky'
|
239
|
+
(COOPERATIVE_STICKY_MAX_WAIT / 100).times do
|
240
|
+
# If we're past the first rebalance, no need to wait
|
241
|
+
break if @rebalance_manager.active?
|
242
|
+
|
243
|
+
sleep(0.1)
|
244
|
+
end
|
245
|
+
end
|
246
|
+
|
247
|
+
close
|
61
248
|
end
|
62
249
|
|
63
|
-
#
|
64
|
-
#
|
65
|
-
# @param
|
66
|
-
|
67
|
-
|
250
|
+
# Marks given message as consumed.
|
251
|
+
#
|
252
|
+
# @param [Karafka::Messages::Message] message that we want to mark as processed
|
253
|
+
# @return [Boolean] true if successful. False if we no longer own given partition
|
254
|
+
# @note This method won't trigger automatic offsets commits, rather relying on the offset
|
255
|
+
# check-pointing trigger that happens with each batch processed. It will however check the
|
256
|
+
# `librdkafka` assignment ownership to increase accuracy for involuntary revocations.
|
257
|
+
def mark_as_consumed(message)
|
258
|
+
store_offset(message) && !assignment_lost?
|
68
259
|
end
|
69
260
|
|
70
|
-
# Marks given message as consumed
|
71
|
-
#
|
72
|
-
# @
|
73
|
-
#
|
74
|
-
def mark_as_consumed(
|
75
|
-
|
76
|
-
|
77
|
-
|
261
|
+
# Marks a given message as consumed and commits the offsets in a blocking way.
|
262
|
+
#
|
263
|
+
# @param [Karafka::Messages::Message] message that we want to mark as processed
|
264
|
+
# @return [Boolean] true if successful. False if we no longer own given partition
|
265
|
+
def mark_as_consumed!(message)
|
266
|
+
return false unless mark_as_consumed(message)
|
267
|
+
|
268
|
+
commit_offsets!
|
269
|
+
end
|
270
|
+
|
271
|
+
# Closes and resets the client completely.
|
272
|
+
def reset
|
273
|
+
close
|
274
|
+
|
275
|
+
@closed = false
|
276
|
+
@paused_tpls.clear
|
277
|
+
@kafka = build_consumer
|
78
278
|
end
|
79
279
|
|
80
|
-
#
|
81
|
-
#
|
82
|
-
#
|
83
|
-
#
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
280
|
+
# Runs a single poll ignoring all the potential errors
|
281
|
+
# This is used as a keep-alive in the shutdown stage and any errors that happen here are
|
282
|
+
# irrelevant from the shutdown process perspective
|
283
|
+
#
|
284
|
+
# This is used only to trigger rebalance callbacks
|
285
|
+
def ping
|
286
|
+
poll(100)
|
287
|
+
rescue Rdkafka::RdkafkaError
|
288
|
+
nil
|
89
289
|
end
|
90
290
|
|
91
291
|
private
|
92
292
|
|
93
|
-
|
293
|
+
# When we cannot store an offset, it means we no longer own the partition
|
294
|
+
#
|
295
|
+
# Non thread-safe offset storing method
|
296
|
+
# @param message [Karafka::Messages::Message]
|
297
|
+
# @return [Boolean] true if we could store the offset (if we still own the partition)
|
298
|
+
def internal_store_offset(message)
|
299
|
+
@kafka.store_offset(message)
|
300
|
+
true
|
301
|
+
rescue Rdkafka::RdkafkaError => e
|
302
|
+
return false if e.code == :assignment_lost
|
303
|
+
return false if e.code == :state
|
304
|
+
|
305
|
+
raise e
|
306
|
+
end
|
307
|
+
|
308
|
+
# Non thread-safe message committing method
|
309
|
+
# @param async [Boolean] should the commit happen async or sync (async by default)
|
310
|
+
# @return [Boolean] true if offset commit worked, false if we've lost the assignment
|
311
|
+
# @note We do **not** consider `no_offset` as any problem and we allow to commit offsets
|
312
|
+
# even when no stored, because with sync commit, it refreshes the ownership state of the
|
313
|
+
# consumer in a sync way.
|
314
|
+
def internal_commit_offsets(async: true)
|
315
|
+
@kafka.commit(nil, async)
|
316
|
+
|
317
|
+
true
|
318
|
+
rescue Rdkafka::RdkafkaError => e
|
319
|
+
case e.code
|
320
|
+
when :assignment_lost
|
321
|
+
return false
|
322
|
+
when :unknown_member_id
|
323
|
+
return false
|
324
|
+
when :no_offset
|
325
|
+
return true
|
326
|
+
when :coordinator_load_in_progress
|
327
|
+
sleep(1)
|
328
|
+
retry
|
329
|
+
end
|
330
|
+
|
331
|
+
raise e
|
332
|
+
end
|
333
|
+
|
334
|
+
# Non-mutexed seek that should be used only internally. Outside we expose `#seek` that is
|
335
|
+
# wrapped with a mutex.
|
336
|
+
#
|
337
|
+
# @param message [Messages::Message, Messages::Seek] message to which we want to seek to.
|
338
|
+
# It can have the time based offset.
|
339
|
+
def internal_seek(message)
|
340
|
+
# If the seek message offset is in a time format, we need to find the closest "real"
|
341
|
+
# offset matching before we seek
|
342
|
+
if message.offset.is_a?(Time)
|
343
|
+
tpl = ::Rdkafka::Consumer::TopicPartitionList.new
|
344
|
+
tpl.add_topic_and_partitions_with_offsets(
|
345
|
+
message.topic,
|
346
|
+
message.partition => message.offset
|
347
|
+
)
|
348
|
+
|
349
|
+
proxy = Proxy.new(@kafka)
|
350
|
+
|
351
|
+
# Now we can overwrite the seek message offset with our resolved offset and we can
|
352
|
+
# then seek to the appropriate message
|
353
|
+
# We set the timeout to 2_000 to make sure that remote clusters handle this well
|
354
|
+
real_offsets = proxy.offsets_for_times(tpl)
|
355
|
+
detected_partition = real_offsets.to_h.dig(message.topic, message.partition)
|
356
|
+
|
357
|
+
# There always needs to be an offset. In case we seek into the future, where there
|
358
|
+
# are no offsets yet, we get -1 which indicates the most recent offset
|
359
|
+
# We should always detect offset, whether it is 0, -1 or a corresponding
|
360
|
+
message.offset = detected_partition&.offset || raise(Errors::InvalidTimeBasedOffsetError)
|
361
|
+
end
|
362
|
+
|
363
|
+
@kafka.seek(message)
|
364
|
+
end
|
365
|
+
|
366
|
+
# Commits the stored offsets in a sync way and closes the consumer.
|
367
|
+
def close
|
368
|
+
# Allow only one client to be closed at the same time
|
369
|
+
SHUTDOWN_MUTEX.synchronize do
|
370
|
+
# Once client is closed, we should not close it again
|
371
|
+
# This could only happen in case of a race-condition when forceful shutdown happens
|
372
|
+
# and triggers this from a different thread
|
373
|
+
return if @closed
|
374
|
+
|
375
|
+
@closed = true
|
376
|
+
|
377
|
+
# Remove callbacks runners that were registered
|
378
|
+
::Karafka::Core::Instrumentation.statistics_callbacks.delete(@subscription_group.id)
|
379
|
+
::Karafka::Core::Instrumentation.error_callbacks.delete(@subscription_group.id)
|
380
|
+
|
381
|
+
@kafka.close
|
382
|
+
@buffer.clear
|
383
|
+
# @note We do not clear rebalance manager here as we may still have revocation info
|
384
|
+
# here that we want to consider valid prior to running another reconnection
|
385
|
+
end
|
386
|
+
end
|
387
|
+
|
388
|
+
# Unsubscribes from all the subscriptions
|
389
|
+
# @note This is a private API to be used only on shutdown
|
390
|
+
# @note We do not re-raise since this is supposed to be only used on close and can be safely
|
391
|
+
# ignored. We do however want to instrument on it
|
392
|
+
def unsubscribe
|
393
|
+
@kafka.unsubscribe
|
394
|
+
rescue ::Rdkafka::RdkafkaError => e
|
395
|
+
Karafka.monitor.instrument(
|
396
|
+
'error.occurred',
|
397
|
+
caller: self,
|
398
|
+
error: e,
|
399
|
+
type: 'connection.client.unsubscribe.error'
|
400
|
+
)
|
401
|
+
end
|
402
|
+
|
403
|
+
# @param topic [String]
|
404
|
+
# @param partition [Integer]
|
405
|
+
# @return [Rdkafka::Consumer::TopicPartitionList]
|
406
|
+
def topic_partition_list(topic, partition)
|
407
|
+
rdkafka_partition = @kafka
|
408
|
+
.assignment
|
409
|
+
.to_h[topic]
|
410
|
+
&.detect { |part| part.partition == partition }
|
411
|
+
|
412
|
+
return unless rdkafka_partition
|
413
|
+
|
414
|
+
Rdkafka::Consumer::TopicPartitionList.new({ topic => [rdkafka_partition] })
|
415
|
+
end
|
416
|
+
|
417
|
+
# Performs a single poll operation and handles retries and error
|
418
|
+
#
|
419
|
+
# @param timeout [Integer] timeout for a single poll
|
420
|
+
# @return [Rdkafka::Consumer::Message, nil] fetched message or nil if nothing polled
|
421
|
+
def poll(timeout)
|
422
|
+
time_poll ||= TimeTrackers::Poll.new(timeout)
|
423
|
+
|
424
|
+
return nil if time_poll.exceeded?
|
425
|
+
|
426
|
+
time_poll.start
|
427
|
+
|
428
|
+
@kafka.poll(timeout)
|
429
|
+
rescue ::Rdkafka::RdkafkaError => e
|
430
|
+
early_report = false
|
431
|
+
|
432
|
+
retryable = time_poll.attempts <= MAX_POLL_RETRIES && time_poll.retryable?
|
433
|
+
|
434
|
+
# There are retryable issues on which we want to report fast as they are source of
|
435
|
+
# problems and can mean some bigger system instabilities
|
436
|
+
# Those are mainly network issues and exceeding the max poll interval
|
437
|
+
# We want to report early on max poll interval exceeding because it may mean that the
|
438
|
+
# underlying processing is taking too much time and it is not LRJ
|
439
|
+
case e.code
|
440
|
+
when :max_poll_exceeded # -147
|
441
|
+
early_report = true
|
442
|
+
when :network_exception # 13
|
443
|
+
early_report = true
|
444
|
+
when :transport # -195
|
445
|
+
early_report = true
|
446
|
+
# @see
|
447
|
+
# https://github.com/confluentinc/confluent-kafka-dotnet/issues/1366#issuecomment-821842990
|
448
|
+
# This will be raised each time poll detects a non-existing topic. When auto creation is
|
449
|
+
# on, we can safely ignore it
|
450
|
+
when :unknown_topic_or_part # 3
|
451
|
+
return nil if @subscription_group.kafka[:'allow.auto.create.topics']
|
452
|
+
|
453
|
+
early_report = true
|
454
|
+
|
455
|
+
# No sense in retrying when no topic/partition and we're no longer running
|
456
|
+
retryable = false unless Karafka::App.running?
|
457
|
+
end
|
458
|
+
|
459
|
+
if early_report || !retryable
|
460
|
+
Karafka.monitor.instrument(
|
461
|
+
'error.occurred',
|
462
|
+
caller: self,
|
463
|
+
error: e,
|
464
|
+
type: 'connection.client.poll.error'
|
465
|
+
)
|
466
|
+
end
|
467
|
+
|
468
|
+
raise unless retryable
|
469
|
+
|
470
|
+
# Most of the errors can be safely ignored as librdkafka will recover from them
|
471
|
+
# @see https://github.com/edenhill/librdkafka/issues/1987#issuecomment-422008750
|
472
|
+
# @see https://github.com/edenhill/librdkafka/wiki/Error-handling
|
473
|
+
|
474
|
+
time_poll.checkpoint
|
475
|
+
time_poll.backoff
|
476
|
+
|
477
|
+
# poll may not only return message but also can run callbacks and if they changed,
|
478
|
+
# despite the errors we need to delegate to the other app parts
|
479
|
+
@rebalance_manager.changed? ? nil : retry
|
480
|
+
end
|
481
|
+
|
482
|
+
# Builds a new rdkafka consumer instance based on the subscription group configuration
|
483
|
+
# @return [Rdkafka::Consumer]
|
484
|
+
def build_consumer
|
485
|
+
::Rdkafka::Config.logger = ::Karafka::App.config.logger
|
486
|
+
config = ::Rdkafka::Config.new(@subscription_group.kafka)
|
487
|
+
config.consumer_rebalance_listener = @rebalance_manager
|
488
|
+
consumer = config.consumer
|
489
|
+
@name = consumer.name
|
490
|
+
|
491
|
+
# Register statistics runner for this particular type of callbacks
|
492
|
+
::Karafka::Core::Instrumentation.statistics_callbacks.add(
|
493
|
+
@subscription_group.id,
|
494
|
+
Instrumentation::Callbacks::Statistics.new(
|
495
|
+
@subscription_group.id,
|
496
|
+
@subscription_group.consumer_group_id,
|
497
|
+
@name
|
498
|
+
)
|
499
|
+
)
|
94
500
|
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
settings = ApiAdapter.subscribe(topic)
|
501
|
+
# Register error tracking callback
|
502
|
+
::Karafka::Core::Instrumentation.error_callbacks.add(
|
503
|
+
@subscription_group.id,
|
504
|
+
Instrumentation::Callbacks::Error.new(
|
505
|
+
@subscription_group.id,
|
506
|
+
@subscription_group.consumer_group_id,
|
507
|
+
@name
|
508
|
+
)
|
509
|
+
)
|
105
510
|
|
106
|
-
|
511
|
+
# Subscription needs to happen after we assigned the rebalance callbacks just in case of
|
512
|
+
# a race condition
|
513
|
+
consumer.subscribe(*@subscription_group.topics.map(&:name))
|
514
|
+
consumer
|
515
|
+
end
|
516
|
+
|
517
|
+
# We may have a case where in the middle of data polling, we've lost a partition.
|
518
|
+
# In a case like this we should remove all the pre-buffered messages from list partitions as
|
519
|
+
# we are no longer responsible in a given process for processing those messages and they
|
520
|
+
# should have been picked up by a different process.
|
521
|
+
def remove_revoked_and_duplicated_messages
|
522
|
+
@rebalance_manager.lost_partitions.each do |topic, partitions|
|
523
|
+
partitions.each do |partition|
|
524
|
+
@buffer.delete(topic, partition)
|
107
525
|
end
|
108
526
|
end
|
109
|
-
|
110
|
-
|
111
|
-
# attempts if Kafka is down
|
112
|
-
sleep(consumer_group.reconnect_timeout)
|
113
|
-
# We don't log and just re-raise - this will be logged
|
114
|
-
# down the road
|
115
|
-
raise
|
527
|
+
|
528
|
+
@buffer.uniq!
|
116
529
|
end
|
117
530
|
end
|
118
531
|
end
|
@@ -0,0 +1,48 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Karafka
|
4
|
+
module Connection
|
5
|
+
# This object represents a collective status of execution of group of listeners running inside
|
6
|
+
# of one consumer group but in separate subscription groups.
|
7
|
+
#
|
8
|
+
# There are cases when we do not want to close a given client when others from the same
|
9
|
+
# consumer group are running because it can cause instabilities due to early shutdown of some
|
10
|
+
# of the clients out of same consumer group.
|
11
|
+
#
|
12
|
+
# We also want to make sure, we close one consumer at a time while others can continue polling.
|
13
|
+
#
|
14
|
+
# This prevents a scenario, where a rebalance is not acknowledged and we loose assignment
|
15
|
+
# without having a chance to commit changes.
|
16
|
+
class ConsumerGroupCoordinator
|
17
|
+
# @param group_size [Integer] number of separate subscription groups in a consumer group
|
18
|
+
def initialize(group_size)
|
19
|
+
@shutdown_lock = Mutex.new
|
20
|
+
@group_size = group_size
|
21
|
+
@finished = Set.new
|
22
|
+
end
|
23
|
+
|
24
|
+
# @return [Boolean] true if all the subscription groups from a given consumer group are
|
25
|
+
# finished
|
26
|
+
def finished?
|
27
|
+
@finished.size == @group_size
|
28
|
+
end
|
29
|
+
|
30
|
+
# @return [Boolean] can we start shutdown on a given listener
|
31
|
+
# @note If true, will also obtain a lock so no-one else will be closing the same time we do
|
32
|
+
def shutdown?
|
33
|
+
finished? && @shutdown_lock.try_lock
|
34
|
+
end
|
35
|
+
|
36
|
+
# Unlocks the shutdown lock
|
37
|
+
def unlock
|
38
|
+
@shutdown_lock.unlock if @shutdown_lock.owned?
|
39
|
+
end
|
40
|
+
|
41
|
+
# Marks given listener as finished
|
42
|
+
# @param listener_id [String]
|
43
|
+
def finish_work(listener_id)
|
44
|
+
@finished << listener_id
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|