karafka 1.4.12 → 2.2.10
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/.github/FUNDING.yml +1 -0
- data/.github/ISSUE_TEMPLATE/bug_report.md +10 -9
- data/.github/workflows/ci.yml +169 -31
- data/.rspec +4 -0
- data/.ruby-version +1 -1
- data/CHANGELOG.md +716 -607
- data/CONTRIBUTING.md +10 -19
- data/Gemfile +7 -0
- data/Gemfile.lock +69 -92
- data/LICENSE +17 -0
- data/LICENSE-COMM +89 -0
- data/LICENSE-LGPL +165 -0
- data/README.md +48 -47
- data/bin/benchmarks +99 -0
- data/bin/create_token +22 -0
- data/bin/integrations +310 -0
- data/bin/karafka +5 -14
- data/bin/record_rss +50 -0
- data/bin/rspecs +6 -0
- data/bin/scenario +29 -0
- data/bin/stress_many +13 -0
- data/bin/stress_one +13 -0
- data/bin/verify_license_integrity +37 -0
- data/bin/wait_for_kafka +24 -0
- data/certs/cert_chain.pem +26 -0
- data/certs/karafka-pro.pem +11 -0
- data/config/locales/errors.yml +97 -0
- data/config/locales/pro_errors.yml +59 -0
- data/docker-compose.yml +19 -11
- data/karafka.gemspec +26 -22
- data/lib/active_job/karafka.rb +17 -0
- data/lib/active_job/queue_adapters/karafka_adapter.rb +32 -0
- data/lib/karafka/active_job/consumer.rb +49 -0
- data/lib/karafka/active_job/current_attributes/loading.rb +36 -0
- data/lib/karafka/active_job/current_attributes/persistence.rb +28 -0
- data/lib/karafka/active_job/current_attributes.rb +42 -0
- data/lib/karafka/active_job/dispatcher.rb +69 -0
- data/lib/karafka/active_job/job_extensions.rb +34 -0
- data/lib/karafka/active_job/job_options_contract.rb +32 -0
- data/lib/karafka/admin.rb +313 -0
- data/lib/karafka/app.rb +47 -23
- data/lib/karafka/base_consumer.rb +260 -29
- data/lib/karafka/cli/base.rb +67 -36
- data/lib/karafka/cli/console.rb +18 -12
- data/lib/karafka/cli/help.rb +24 -0
- data/lib/karafka/cli/info.rb +47 -12
- data/lib/karafka/cli/install.rb +23 -14
- data/lib/karafka/cli/server.rb +101 -44
- data/lib/karafka/cli/topics.rb +146 -0
- data/lib/karafka/cli.rb +24 -27
- data/lib/karafka/connection/client.rb +553 -90
- data/lib/karafka/connection/consumer_group_coordinator.rb +48 -0
- data/lib/karafka/connection/listener.rb +294 -38
- data/lib/karafka/connection/listeners_batch.rb +40 -0
- data/lib/karafka/connection/messages_buffer.rb +84 -0
- data/lib/karafka/connection/pauses_manager.rb +46 -0
- data/lib/karafka/connection/proxy.rb +98 -0
- data/lib/karafka/connection/raw_messages_buffer.rb +101 -0
- data/lib/karafka/connection/rebalance_manager.rb +105 -0
- data/lib/karafka/contracts/base.rb +17 -0
- data/lib/karafka/contracts/config.rb +130 -11
- data/lib/karafka/contracts/consumer_group.rb +32 -187
- data/lib/karafka/contracts/server_cli_options.rb +80 -19
- data/lib/karafka/contracts/topic.rb +65 -0
- data/lib/karafka/contracts.rb +1 -1
- data/lib/karafka/embedded.rb +36 -0
- data/lib/karafka/env.rb +46 -0
- data/lib/karafka/errors.rb +37 -21
- data/lib/karafka/helpers/async.rb +33 -0
- data/lib/karafka/helpers/colorize.rb +26 -0
- data/lib/karafka/helpers/multi_delegator.rb +2 -2
- data/lib/karafka/instrumentation/callbacks/error.rb +39 -0
- data/lib/karafka/instrumentation/callbacks/rebalance.rb +64 -0
- data/lib/karafka/instrumentation/callbacks/statistics.rb +51 -0
- data/lib/karafka/instrumentation/logger_listener.rb +303 -0
- data/lib/karafka/instrumentation/monitor.rb +13 -61
- data/lib/karafka/instrumentation/notifications.rb +79 -0
- data/lib/karafka/instrumentation/proctitle_listener.rb +7 -16
- data/lib/karafka/instrumentation/vendors/appsignal/base.rb +30 -0
- data/lib/karafka/instrumentation/vendors/appsignal/client.rb +122 -0
- data/lib/karafka/instrumentation/vendors/appsignal/dashboard.json +222 -0
- data/lib/karafka/instrumentation/vendors/appsignal/errors_listener.rb +30 -0
- data/lib/karafka/instrumentation/vendors/appsignal/metrics_listener.rb +331 -0
- data/lib/karafka/instrumentation/vendors/datadog/dashboard.json +1 -0
- data/lib/karafka/instrumentation/vendors/datadog/logger_listener.rb +155 -0
- data/lib/karafka/instrumentation/vendors/datadog/metrics_listener.rb +264 -0
- data/lib/karafka/instrumentation/vendors/kubernetes/liveness_listener.rb +176 -0
- data/lib/karafka/licenser.rb +78 -0
- data/lib/karafka/messages/batch_metadata.rb +52 -0
- data/lib/karafka/messages/builders/batch_metadata.rb +60 -0
- data/lib/karafka/messages/builders/message.rb +40 -0
- data/lib/karafka/messages/builders/messages.rb +36 -0
- data/lib/karafka/{params/params.rb → messages/message.rb} +20 -13
- data/lib/karafka/messages/messages.rb +71 -0
- data/lib/karafka/{params → messages}/metadata.rb +4 -6
- data/lib/karafka/messages/parser.rb +14 -0
- data/lib/karafka/messages/seek.rb +12 -0
- data/lib/karafka/patches/rdkafka/bindings.rb +122 -0
- data/lib/karafka/patches/rdkafka/opaque.rb +36 -0
- data/lib/karafka/pro/active_job/consumer.rb +47 -0
- data/lib/karafka/pro/active_job/dispatcher.rb +86 -0
- data/lib/karafka/pro/active_job/job_options_contract.rb +45 -0
- data/lib/karafka/pro/cleaner/errors.rb +27 -0
- data/lib/karafka/pro/cleaner/messages/message.rb +46 -0
- data/lib/karafka/pro/cleaner/messages/messages.rb +42 -0
- data/lib/karafka/pro/cleaner.rb +41 -0
- data/lib/karafka/pro/contracts/base.rb +23 -0
- data/lib/karafka/pro/contracts/server_cli_options.rb +111 -0
- data/lib/karafka/pro/encryption/cipher.rb +58 -0
- data/lib/karafka/pro/encryption/contracts/config.rb +79 -0
- data/lib/karafka/pro/encryption/errors.rb +27 -0
- data/lib/karafka/pro/encryption/messages/middleware.rb +46 -0
- data/lib/karafka/pro/encryption/messages/parser.rb +56 -0
- data/lib/karafka/pro/encryption/setup/config.rb +48 -0
- data/lib/karafka/pro/encryption.rb +47 -0
- data/lib/karafka/pro/iterator/expander.rb +95 -0
- data/lib/karafka/pro/iterator/tpl_builder.rb +155 -0
- data/lib/karafka/pro/iterator.rb +170 -0
- data/lib/karafka/pro/loader.rb +106 -0
- data/lib/karafka/pro/performance_tracker.rb +84 -0
- data/lib/karafka/pro/processing/collapser.rb +62 -0
- data/lib/karafka/pro/processing/coordinator.rb +147 -0
- data/lib/karafka/pro/processing/filters/base.rb +61 -0
- data/lib/karafka/pro/processing/filters/delayer.rb +70 -0
- data/lib/karafka/pro/processing/filters/expirer.rb +51 -0
- data/lib/karafka/pro/processing/filters/inline_insights_delayer.rb +78 -0
- data/lib/karafka/pro/processing/filters/throttler.rb +84 -0
- data/lib/karafka/pro/processing/filters/virtual_limiter.rb +52 -0
- data/lib/karafka/pro/processing/filters_applier.rb +105 -0
- data/lib/karafka/pro/processing/jobs/consume_non_blocking.rb +39 -0
- data/lib/karafka/pro/processing/jobs/revoked_non_blocking.rb +37 -0
- data/lib/karafka/pro/processing/jobs_builder.rb +50 -0
- data/lib/karafka/pro/processing/partitioner.rb +69 -0
- data/lib/karafka/pro/processing/scheduler.rb +75 -0
- data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom.rb +70 -0
- data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom_vp.rb +76 -0
- data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_mom.rb +72 -0
- data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_mom_vp.rb +76 -0
- data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom.rb +66 -0
- data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom_vp.rb +70 -0
- data/lib/karafka/pro/processing/strategies/aj/dlq_mom.rb +64 -0
- data/lib/karafka/pro/processing/strategies/aj/dlq_mom_vp.rb +69 -0
- data/lib/karafka/pro/processing/strategies/aj/ftr_lrj_mom.rb +38 -0
- data/lib/karafka/pro/processing/strategies/aj/ftr_lrj_mom_vp.rb +66 -0
- data/lib/karafka/pro/processing/strategies/aj/ftr_mom.rb +38 -0
- data/lib/karafka/pro/processing/strategies/aj/ftr_mom_vp.rb +58 -0
- data/lib/karafka/pro/processing/strategies/aj/lrj_mom.rb +37 -0
- data/lib/karafka/pro/processing/strategies/aj/lrj_mom_vp.rb +82 -0
- data/lib/karafka/pro/processing/strategies/aj/mom.rb +36 -0
- data/lib/karafka/pro/processing/strategies/aj/mom_vp.rb +52 -0
- data/lib/karafka/pro/processing/strategies/base.rb +26 -0
- data/lib/karafka/pro/processing/strategies/default.rb +105 -0
- data/lib/karafka/pro/processing/strategies/dlq/default.rb +137 -0
- data/lib/karafka/pro/processing/strategies/dlq/ftr.rb +61 -0
- data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj.rb +75 -0
- data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_mom.rb +71 -0
- data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_mom_vp.rb +43 -0
- data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_vp.rb +41 -0
- data/lib/karafka/pro/processing/strategies/dlq/ftr_mom.rb +69 -0
- data/lib/karafka/pro/processing/strategies/dlq/ftr_mom_vp.rb +41 -0
- data/lib/karafka/pro/processing/strategies/dlq/ftr_vp.rb +40 -0
- data/lib/karafka/pro/processing/strategies/dlq/lrj.rb +64 -0
- data/lib/karafka/pro/processing/strategies/dlq/lrj_mom.rb +65 -0
- data/lib/karafka/pro/processing/strategies/dlq/lrj_mom_vp.rb +36 -0
- data/lib/karafka/pro/processing/strategies/dlq/lrj_vp.rb +39 -0
- data/lib/karafka/pro/processing/strategies/dlq/mom.rb +68 -0
- data/lib/karafka/pro/processing/strategies/dlq/mom_vp.rb +37 -0
- data/lib/karafka/pro/processing/strategies/dlq/vp.rb +40 -0
- data/lib/karafka/pro/processing/strategies/ftr/default.rb +111 -0
- data/lib/karafka/pro/processing/strategies/ftr/vp.rb +40 -0
- data/lib/karafka/pro/processing/strategies/lrj/default.rb +85 -0
- data/lib/karafka/pro/processing/strategies/lrj/ftr.rb +69 -0
- data/lib/karafka/pro/processing/strategies/lrj/ftr_mom.rb +67 -0
- data/lib/karafka/pro/processing/strategies/lrj/ftr_mom_vp.rb +40 -0
- data/lib/karafka/pro/processing/strategies/lrj/ftr_vp.rb +39 -0
- data/lib/karafka/pro/processing/strategies/lrj/mom.rb +77 -0
- data/lib/karafka/pro/processing/strategies/lrj/mom_vp.rb +38 -0
- data/lib/karafka/pro/processing/strategies/lrj/vp.rb +36 -0
- data/lib/karafka/pro/processing/strategies/mom/default.rb +46 -0
- data/lib/karafka/pro/processing/strategies/mom/ftr.rb +53 -0
- data/lib/karafka/pro/processing/strategies/mom/ftr_vp.rb +37 -0
- data/lib/karafka/pro/processing/strategies/mom/vp.rb +35 -0
- data/lib/karafka/pro/processing/strategies/vp/default.rb +124 -0
- data/lib/karafka/pro/processing/strategies.rb +22 -0
- data/lib/karafka/pro/processing/strategy_selector.rb +84 -0
- data/lib/karafka/pro/processing/virtual_offset_manager.rb +147 -0
- data/lib/karafka/pro/routing/features/active_job/builder.rb +45 -0
- data/lib/karafka/pro/routing/features/active_job.rb +26 -0
- data/lib/karafka/pro/routing/features/base.rb +24 -0
- data/lib/karafka/pro/routing/features/dead_letter_queue/contracts/topic.rb +53 -0
- data/lib/karafka/pro/routing/features/dead_letter_queue.rb +27 -0
- data/lib/karafka/pro/routing/features/delaying/config.rb +27 -0
- data/lib/karafka/pro/routing/features/delaying/contracts/topic.rb +41 -0
- data/lib/karafka/pro/routing/features/delaying/topic.rb +59 -0
- data/lib/karafka/pro/routing/features/delaying.rb +29 -0
- data/lib/karafka/pro/routing/features/expiring/config.rb +27 -0
- data/lib/karafka/pro/routing/features/expiring/contracts/topic.rb +41 -0
- data/lib/karafka/pro/routing/features/expiring/topic.rb +59 -0
- data/lib/karafka/pro/routing/features/expiring.rb +27 -0
- data/lib/karafka/pro/routing/features/filtering/config.rb +40 -0
- data/lib/karafka/pro/routing/features/filtering/contracts/topic.rb +44 -0
- data/lib/karafka/pro/routing/features/filtering/topic.rb +51 -0
- data/lib/karafka/pro/routing/features/filtering.rb +27 -0
- data/lib/karafka/pro/routing/features/inline_insights/config.rb +32 -0
- data/lib/karafka/pro/routing/features/inline_insights/contracts/topic.rb +41 -0
- data/lib/karafka/pro/routing/features/inline_insights/topic.rb +52 -0
- data/lib/karafka/pro/routing/features/inline_insights.rb +26 -0
- data/lib/karafka/pro/routing/features/long_running_job/config.rb +28 -0
- data/lib/karafka/pro/routing/features/long_running_job/contracts/topic.rb +40 -0
- data/lib/karafka/pro/routing/features/long_running_job/topic.rb +42 -0
- data/lib/karafka/pro/routing/features/long_running_job.rb +28 -0
- data/lib/karafka/pro/routing/features/patterns/builder.rb +38 -0
- data/lib/karafka/pro/routing/features/patterns/config.rb +54 -0
- data/lib/karafka/pro/routing/features/patterns/consumer_group.rb +72 -0
- data/lib/karafka/pro/routing/features/patterns/contracts/consumer_group.rb +62 -0
- data/lib/karafka/pro/routing/features/patterns/contracts/pattern.rb +46 -0
- data/lib/karafka/pro/routing/features/patterns/contracts/topic.rb +41 -0
- data/lib/karafka/pro/routing/features/patterns/detector.rb +71 -0
- data/lib/karafka/pro/routing/features/patterns/pattern.rb +95 -0
- data/lib/karafka/pro/routing/features/patterns/patterns.rb +35 -0
- data/lib/karafka/pro/routing/features/patterns/topic.rb +50 -0
- data/lib/karafka/pro/routing/features/patterns/topics.rb +53 -0
- data/lib/karafka/pro/routing/features/patterns.rb +33 -0
- data/lib/karafka/pro/routing/features/pausing/contracts/topic.rb +51 -0
- data/lib/karafka/pro/routing/features/pausing/topic.rb +44 -0
- data/lib/karafka/pro/routing/features/pausing.rb +25 -0
- data/lib/karafka/pro/routing/features/throttling/config.rb +32 -0
- data/lib/karafka/pro/routing/features/throttling/contracts/topic.rb +44 -0
- data/lib/karafka/pro/routing/features/throttling/topic.rb +69 -0
- data/lib/karafka/pro/routing/features/throttling.rb +30 -0
- data/lib/karafka/pro/routing/features/virtual_partitions/config.rb +30 -0
- data/lib/karafka/pro/routing/features/virtual_partitions/contracts/topic.rb +55 -0
- data/lib/karafka/pro/routing/features/virtual_partitions/topic.rb +56 -0
- data/lib/karafka/pro/routing/features/virtual_partitions.rb +27 -0
- data/lib/karafka/pro.rb +13 -0
- data/lib/karafka/process.rb +24 -8
- data/lib/karafka/processing/coordinator.rb +181 -0
- data/lib/karafka/processing/coordinators_buffer.rb +62 -0
- data/lib/karafka/processing/executor.rb +155 -0
- data/lib/karafka/processing/executors_buffer.rb +72 -0
- data/lib/karafka/processing/expansions_selector.rb +22 -0
- data/lib/karafka/processing/inline_insights/consumer.rb +41 -0
- data/lib/karafka/processing/inline_insights/listener.rb +19 -0
- data/lib/karafka/processing/inline_insights/tracker.rb +128 -0
- data/lib/karafka/processing/jobs/base.rb +55 -0
- data/lib/karafka/processing/jobs/consume.rb +45 -0
- data/lib/karafka/processing/jobs/idle.rb +24 -0
- data/lib/karafka/processing/jobs/revoked.rb +22 -0
- data/lib/karafka/processing/jobs/shutdown.rb +23 -0
- data/lib/karafka/processing/jobs_builder.rb +28 -0
- data/lib/karafka/processing/jobs_queue.rb +150 -0
- data/lib/karafka/processing/partitioner.rb +24 -0
- data/lib/karafka/processing/result.rb +42 -0
- data/lib/karafka/processing/scheduler.rb +22 -0
- data/lib/karafka/processing/strategies/aj_dlq_mom.rb +44 -0
- data/lib/karafka/processing/strategies/aj_mom.rb +21 -0
- data/lib/karafka/processing/strategies/base.rb +52 -0
- data/lib/karafka/processing/strategies/default.rb +158 -0
- data/lib/karafka/processing/strategies/dlq.rb +88 -0
- data/lib/karafka/processing/strategies/dlq_mom.rb +49 -0
- data/lib/karafka/processing/strategies/mom.rb +29 -0
- data/lib/karafka/processing/strategy_selector.rb +47 -0
- data/lib/karafka/processing/worker.rb +93 -0
- data/lib/karafka/processing/workers_batch.rb +27 -0
- data/lib/karafka/railtie.rb +141 -0
- data/lib/karafka/routing/activity_manager.rb +84 -0
- data/lib/karafka/routing/builder.rb +45 -19
- data/lib/karafka/routing/consumer_group.rb +56 -20
- data/lib/karafka/routing/consumer_mapper.rb +1 -12
- data/lib/karafka/routing/features/active_job/builder.rb +33 -0
- data/lib/karafka/routing/features/active_job/config.rb +15 -0
- data/lib/karafka/routing/features/active_job/contracts/topic.rb +44 -0
- data/lib/karafka/routing/features/active_job/proxy.rb +14 -0
- data/lib/karafka/routing/features/active_job/topic.rb +33 -0
- data/lib/karafka/routing/features/active_job.rb +13 -0
- data/lib/karafka/routing/features/base/expander.rb +59 -0
- data/lib/karafka/routing/features/base.rb +71 -0
- data/lib/karafka/routing/features/dead_letter_queue/config.rb +19 -0
- data/lib/karafka/routing/features/dead_letter_queue/contracts/topic.rb +46 -0
- data/lib/karafka/routing/features/dead_letter_queue/topic.rb +41 -0
- data/lib/karafka/routing/features/dead_letter_queue.rb +16 -0
- data/lib/karafka/routing/features/declaratives/config.rb +18 -0
- data/lib/karafka/routing/features/declaratives/contracts/topic.rb +33 -0
- data/lib/karafka/routing/features/declaratives/topic.rb +44 -0
- data/lib/karafka/routing/features/declaratives.rb +14 -0
- data/lib/karafka/routing/features/inline_insights/config.rb +15 -0
- data/lib/karafka/routing/features/inline_insights/contracts/topic.rb +27 -0
- data/lib/karafka/routing/features/inline_insights/topic.rb +31 -0
- data/lib/karafka/routing/features/inline_insights.rb +40 -0
- data/lib/karafka/routing/features/manual_offset_management/config.rb +15 -0
- data/lib/karafka/routing/features/manual_offset_management/contracts/topic.rb +27 -0
- data/lib/karafka/routing/features/manual_offset_management/topic.rb +35 -0
- data/lib/karafka/routing/features/manual_offset_management.rb +18 -0
- data/lib/karafka/routing/proxy.rb +22 -21
- data/lib/karafka/routing/router.rb +24 -10
- data/lib/karafka/routing/subscription_group.rb +110 -0
- data/lib/karafka/routing/subscription_groups_builder.rb +65 -0
- data/lib/karafka/routing/topic.rb +87 -24
- data/lib/karafka/routing/topics.rb +46 -0
- data/lib/karafka/runner.rb +52 -0
- data/lib/karafka/serialization/json/deserializer.rb +7 -15
- data/lib/karafka/server.rb +113 -37
- data/lib/karafka/setup/attributes_map.rb +348 -0
- data/lib/karafka/setup/config.rb +256 -175
- data/lib/karafka/status.rb +54 -7
- data/lib/karafka/templates/example_consumer.rb.erb +16 -0
- data/lib/karafka/templates/karafka.rb.erb +33 -55
- data/lib/karafka/time_trackers/base.rb +14 -0
- data/lib/karafka/time_trackers/pause.rb +122 -0
- data/lib/karafka/time_trackers/poll.rb +69 -0
- data/lib/karafka/version.rb +1 -1
- data/lib/karafka.rb +91 -17
- data/renovate.json +9 -0
- data.tar.gz.sig +0 -0
- metadata +330 -168
- metadata.gz.sig +0 -0
- data/MIT-LICENCE +0 -18
- data/certs/mensfeld.pem +0 -25
- data/config/errors.yml +0 -41
- data/lib/karafka/assignment_strategies/round_robin.rb +0 -13
- data/lib/karafka/attributes_map.rb +0 -63
- data/lib/karafka/backends/inline.rb +0 -16
- data/lib/karafka/base_responder.rb +0 -226
- data/lib/karafka/cli/flow.rb +0 -48
- data/lib/karafka/cli/missingno.rb +0 -19
- data/lib/karafka/code_reloader.rb +0 -67
- data/lib/karafka/connection/api_adapter.rb +0 -158
- data/lib/karafka/connection/batch_delegator.rb +0 -55
- data/lib/karafka/connection/builder.rb +0 -23
- data/lib/karafka/connection/message_delegator.rb +0 -36
- data/lib/karafka/consumers/batch_metadata.rb +0 -10
- data/lib/karafka/consumers/callbacks.rb +0 -71
- data/lib/karafka/consumers/includer.rb +0 -64
- data/lib/karafka/consumers/responders.rb +0 -24
- data/lib/karafka/consumers/single_params.rb +0 -15
- data/lib/karafka/contracts/consumer_group_topic.rb +0 -19
- data/lib/karafka/contracts/responder_usage.rb +0 -54
- data/lib/karafka/fetcher.rb +0 -42
- data/lib/karafka/helpers/class_matcher.rb +0 -88
- data/lib/karafka/helpers/config_retriever.rb +0 -46
- data/lib/karafka/helpers/inflector.rb +0 -26
- data/lib/karafka/instrumentation/stdout_listener.rb +0 -140
- data/lib/karafka/params/batch_metadata.rb +0 -26
- data/lib/karafka/params/builders/batch_metadata.rb +0 -30
- data/lib/karafka/params/builders/params.rb +0 -38
- data/lib/karafka/params/builders/params_batch.rb +0 -25
- data/lib/karafka/params/params_batch.rb +0 -60
- data/lib/karafka/patches/ruby_kafka.rb +0 -47
- data/lib/karafka/persistence/client.rb +0 -29
- data/lib/karafka/persistence/consumers.rb +0 -45
- data/lib/karafka/persistence/topics.rb +0 -48
- data/lib/karafka/responders/builder.rb +0 -36
- data/lib/karafka/responders/topic.rb +0 -55
- data/lib/karafka/routing/topic_mapper.rb +0 -53
- data/lib/karafka/serialization/json/serializer.rb +0 -31
- data/lib/karafka/setup/configurators/water_drop.rb +0 -36
- data/lib/karafka/templates/application_responder.rb.erb +0 -11
@@ -1,119 +1,582 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
module Karafka
|
4
|
+
# Namespace for Kafka connection related logic
|
4
5
|
module Connection
|
5
|
-
#
|
6
|
-
#
|
6
|
+
# An abstraction layer on top of the rdkafka consumer.
|
7
|
+
#
|
8
|
+
# It is threadsafe and provides some security measures so we won't end up operating on a
|
9
|
+
# closed consumer instance as it causes Ruby VM process to crash.
|
7
10
|
class Client
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
]
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
#
|
21
|
-
#
|
22
|
-
#
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
#
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
caller: self,
|
48
|
-
error: e.cause
|
11
|
+
attr_reader :rebalance_manager
|
12
|
+
|
13
|
+
# @return [String] underlying consumer name
|
14
|
+
# @note Consumer name may change in case we regenerate it
|
15
|
+
attr_reader :name
|
16
|
+
|
17
|
+
# @return [String] id of the client
|
18
|
+
attr_reader :id
|
19
|
+
|
20
|
+
# How many times should we retry polling in case of a failure
|
21
|
+
MAX_POLL_RETRIES = 20
|
22
|
+
|
23
|
+
# 1 minute of max wait for the first rebalance before a forceful attempt
|
24
|
+
# This applies only to a case when a short-lived Karafka instance with a client would be
|
25
|
+
# closed before first rebalance. Mitigates a librdkafka bug.
|
26
|
+
COOPERATIVE_STICKY_MAX_WAIT = 60_000
|
27
|
+
|
28
|
+
# We want to make sure we never close several clients in the same moment to prevent
|
29
|
+
# potential race conditions and other issues
|
30
|
+
SHUTDOWN_MUTEX = Mutex.new
|
31
|
+
|
32
|
+
private_constant :MAX_POLL_RETRIES, :SHUTDOWN_MUTEX, :COOPERATIVE_STICKY_MAX_WAIT
|
33
|
+
|
34
|
+
# Creates a new consumer instance.
|
35
|
+
#
|
36
|
+
# @param subscription_group [Karafka::Routing::SubscriptionGroup] subscription group
|
37
|
+
# with all the configuration details needed for us to create a client
|
38
|
+
# @return [Karafka::Connection::Client]
|
39
|
+
def initialize(subscription_group)
|
40
|
+
@id = SecureRandom.hex(6)
|
41
|
+
# Name is set when we build consumer
|
42
|
+
@name = ''
|
43
|
+
@closed = false
|
44
|
+
@subscription_group = subscription_group
|
45
|
+
@buffer = RawMessagesBuffer.new
|
46
|
+
@rebalance_manager = RebalanceManager.new(@subscription_group.id)
|
47
|
+
@rebalance_callback = Instrumentation::Callbacks::Rebalance.new(
|
48
|
+
@subscription_group.id,
|
49
|
+
@subscription_group.consumer_group.id
|
49
50
|
)
|
50
|
-
|
51
|
-
|
51
|
+
@kafka = build_consumer
|
52
|
+
# There are few operations that can happen in parallel from the listener threads as well
|
53
|
+
# as from the workers. They are not fully thread-safe because they may be composed out of
|
54
|
+
# few calls to Kafka or out of few internal state changes. That is why we mutex them.
|
55
|
+
# It mostly revolves around pausing and resuming.
|
56
|
+
@mutex = Mutex.new
|
57
|
+
# We need to keep track of what we have paused for resuming
|
58
|
+
# In case we loose partition, we still need to resume it, otherwise it won't be fetched
|
59
|
+
# again if we get reassigned to it later on. We need to keep them as after revocation we
|
60
|
+
# no longer may be able to fetch them from Kafka. We could build them but it is easier
|
61
|
+
# to just keep them here and use if needed when cannot be obtained
|
62
|
+
@paused_tpls = Hash.new { |h, k| h[k] = {} }
|
63
|
+
end
|
64
|
+
|
65
|
+
# Fetches messages within boundaries defined by the settings (time, size, topics, etc).
|
66
|
+
#
|
67
|
+
# @return [Karafka::Connection::MessagesBuffer] messages buffer that holds messages per topic
|
68
|
+
# partition
|
69
|
+
# @note This method should not be executed from many threads at the same time
|
70
|
+
def batch_poll
|
71
|
+
time_poll = TimeTrackers::Poll.new(@subscription_group.max_wait_time)
|
72
|
+
|
73
|
+
@buffer.clear
|
74
|
+
@rebalance_manager.clear
|
75
|
+
|
76
|
+
loop do
|
77
|
+
time_poll.start
|
78
|
+
|
79
|
+
# Don't fetch more messages if we do not have any time left
|
80
|
+
break if time_poll.exceeded?
|
81
|
+
# Don't fetch more messages if we've fetched max as we've wanted
|
82
|
+
break if @buffer.size >= @subscription_group.max_messages
|
83
|
+
|
84
|
+
# Fetch message within our time boundaries
|
85
|
+
message = poll(time_poll.remaining)
|
86
|
+
|
87
|
+
# Put a message to the buffer if there is one
|
88
|
+
@buffer << message if message
|
89
|
+
|
90
|
+
# Upon polling rebalance manager might have been updated.
|
91
|
+
# If partition revocation happens, we need to remove messages from revoked partitions
|
92
|
+
# as well as ensure we do not have duplicated due to the offset reset for partitions
|
93
|
+
# that we got assigned
|
94
|
+
# We also do early break, so the information about rebalance is used as soon as possible
|
95
|
+
if @rebalance_manager.changed?
|
96
|
+
remove_revoked_and_duplicated_messages
|
97
|
+
break
|
98
|
+
end
|
99
|
+
|
100
|
+
# Track time spent on all of the processing and polling
|
101
|
+
time_poll.checkpoint
|
102
|
+
|
103
|
+
# Finally once we've (potentially) removed revoked, etc, if no messages were returned
|
104
|
+
# we can break.
|
105
|
+
# Worth keeping in mind, that the rebalance manager might have been updated despite no
|
106
|
+
# messages being returned during a poll
|
107
|
+
break unless message
|
108
|
+
end
|
109
|
+
|
110
|
+
@buffer
|
111
|
+
end
|
112
|
+
|
113
|
+
# Stores offset for a given partition of a given topic based on the provided message.
|
114
|
+
#
|
115
|
+
# @param message [Karafka::Messages::Message]
|
116
|
+
def store_offset(message)
|
117
|
+
internal_store_offset(message)
|
118
|
+
end
|
119
|
+
|
120
|
+
# @return [Boolean] true if our current assignment has been lost involuntarily.
|
121
|
+
def assignment_lost?
|
122
|
+
@kafka.assignment_lost?
|
123
|
+
end
|
124
|
+
|
125
|
+
# Commits the offset on a current consumer in a non-blocking or blocking way.
|
126
|
+
#
|
127
|
+
# @param async [Boolean] should the commit happen async or sync (async by default)
|
128
|
+
# @return [Boolean] did committing was successful. It may be not, when we no longer own
|
129
|
+
# given partition.
|
130
|
+
#
|
131
|
+
# @note This will commit all the offsets for the whole consumer. In order to achieve
|
132
|
+
# granular control over where the offset should be for particular topic partitions, the
|
133
|
+
# store_offset should be used to only store new offset when we want them to be flushed
|
134
|
+
#
|
135
|
+
# @note This method for async may return `true` despite involuntary partition revocation as
|
136
|
+
# it does **not** resolve to `lost_assignment?`. It returns only the commit state operation
|
137
|
+
# result.
|
138
|
+
def commit_offsets(async: true)
|
139
|
+
internal_commit_offsets(async: async)
|
140
|
+
end
|
141
|
+
|
142
|
+
# Commits offset in a synchronous way.
|
143
|
+
#
|
144
|
+
# @see `#commit_offset` for more details
|
145
|
+
def commit_offsets!
|
146
|
+
commit_offsets(async: false)
|
52
147
|
end
|
53
148
|
|
54
|
-
#
|
149
|
+
# Seek to a particular message. The next poll on the topic/partition will return the
|
150
|
+
# message at the given offset.
|
151
|
+
#
|
152
|
+
# @param message [Messages::Message, Messages::Seek] message to which we want to seek to.
|
153
|
+
# It can have the time based offset.
|
154
|
+
# @note Please note, that if you are seeking to a time offset, getting the offset is blocking
|
155
|
+
def seek(message)
|
156
|
+
@mutex.synchronize { internal_seek(message) }
|
157
|
+
end
|
158
|
+
|
159
|
+
# Pauses given partition and moves back to last successful offset processed.
|
160
|
+
#
|
161
|
+
# @param topic [String] topic name
|
162
|
+
# @param partition [Integer] partition
|
163
|
+
# @param offset [Integer, nil] offset of the message on which we want to pause (this message
|
164
|
+
# will be reprocessed after getting back to processing) or nil if we want to pause and
|
165
|
+
# resume from the consecutive offset (+1 from the last message passed to us by librdkafka)
|
166
|
+
# @note This will pause indefinitely and requires manual `#resume`
|
167
|
+
# @note When `#internal_seek` is not involved (when offset is `nil`) we will not purge the
|
168
|
+
# librdkafka buffers and continue from the last cursor offset
|
169
|
+
def pause(topic, partition, offset = nil)
|
170
|
+
@mutex.synchronize do
|
171
|
+
# Do not pause if the client got closed, would not change anything
|
172
|
+
return if @closed
|
173
|
+
|
174
|
+
internal_commit_offsets(async: true)
|
175
|
+
|
176
|
+
# Here we do not use our cached tpls because we should not try to pause something we do
|
177
|
+
# not own anymore.
|
178
|
+
tpl = topic_partition_list(topic, partition)
|
179
|
+
|
180
|
+
return unless tpl
|
181
|
+
|
182
|
+
Karafka.monitor.instrument(
|
183
|
+
'client.pause',
|
184
|
+
caller: self,
|
185
|
+
subscription_group: @subscription_group,
|
186
|
+
topic: topic,
|
187
|
+
partition: partition,
|
188
|
+
offset: offset
|
189
|
+
)
|
190
|
+
|
191
|
+
@paused_tpls[topic][partition] = tpl
|
192
|
+
|
193
|
+
@kafka.pause(tpl)
|
194
|
+
|
195
|
+
# If offset is not provided, will pause where it finished.
|
196
|
+
# This makes librdkafka not purge buffers and can provide significant network savings
|
197
|
+
# when we just want to pause before further processing without changing the offsets
|
198
|
+
return unless offset
|
199
|
+
|
200
|
+
pause_msg = Messages::Seek.new(topic, partition, offset)
|
201
|
+
|
202
|
+
internal_seek(pause_msg)
|
203
|
+
end
|
204
|
+
end
|
205
|
+
|
206
|
+
# Resumes processing of a give topic partition after it was paused.
|
207
|
+
#
|
208
|
+
# @param topic [String] topic name
|
209
|
+
# @param partition [Integer] partition
|
210
|
+
def resume(topic, partition)
|
211
|
+
@mutex.synchronize do
|
212
|
+
return if @closed
|
213
|
+
|
214
|
+
# We now commit offsets on rebalances, thus we can do it async just to make sure
|
215
|
+
internal_commit_offsets(async: true)
|
216
|
+
|
217
|
+
# If we were not able, let's try to reuse the one we have (if we have)
|
218
|
+
tpl = topic_partition_list(topic, partition) || @paused_tpls[topic][partition]
|
219
|
+
|
220
|
+
return unless tpl
|
221
|
+
|
222
|
+
# If we did not have it, it means we never paused this partition, thus no resume should
|
223
|
+
# happen in the first place
|
224
|
+
return unless @paused_tpls[topic].delete(partition)
|
225
|
+
|
226
|
+
Karafka.monitor.instrument(
|
227
|
+
'client.resume',
|
228
|
+
caller: self,
|
229
|
+
subscription_group: @subscription_group,
|
230
|
+
topic: topic,
|
231
|
+
partition: partition
|
232
|
+
)
|
233
|
+
|
234
|
+
@kafka.resume(tpl)
|
235
|
+
end
|
236
|
+
end
|
237
|
+
|
238
|
+
# Gracefully stops topic consumption.
|
239
|
+
#
|
55
240
|
# @note Stopping running consumers without a really important reason is not recommended
|
56
241
|
# as until all the consumers are stopped, the server will keep running serving only
|
57
242
|
# part of the messages
|
58
243
|
def stop
|
59
|
-
|
60
|
-
|
244
|
+
# This ensures, that we do not stop the underlying client until it passes the first
|
245
|
+
# rebalance for cooperative-sticky. Otherwise librdkafka may crash
|
246
|
+
#
|
247
|
+
# We set a timeout just in case the rebalance would never happen or would last for an
|
248
|
+
# extensive time period.
|
249
|
+
#
|
250
|
+
# @see https://github.com/confluentinc/librdkafka/issues/4312
|
251
|
+
if @subscription_group.kafka[:'partition.assignment.strategy'] == 'cooperative-sticky'
|
252
|
+
active_wait = false
|
253
|
+
|
254
|
+
(COOPERATIVE_STICKY_MAX_WAIT / 100).times do
|
255
|
+
# If we're past the first rebalance, no need to wait
|
256
|
+
if @rebalance_manager.active?
|
257
|
+
# We give it a a bit of time because librdkafka has a tendency to do some-post
|
258
|
+
# callback work that from its perspective is still under rebalance
|
259
|
+
sleep(5) if active_wait
|
260
|
+
|
261
|
+
break
|
262
|
+
end
|
263
|
+
|
264
|
+
active_wait = true
|
265
|
+
|
266
|
+
# poll to trigger potential rebalances that could occur during stopping and to trigger
|
267
|
+
# potential callbacks
|
268
|
+
poll(100)
|
269
|
+
|
270
|
+
sleep(0.1)
|
271
|
+
end
|
272
|
+
end
|
273
|
+
|
274
|
+
close
|
61
275
|
end
|
62
276
|
|
63
|
-
#
|
64
|
-
#
|
65
|
-
# @param
|
66
|
-
|
67
|
-
|
68
|
-
|
277
|
+
# Marks given message as consumed.
|
278
|
+
#
|
279
|
+
# @param [Karafka::Messages::Message] message that we want to mark as processed
|
280
|
+
# @return [Boolean] true if successful. False if we no longer own given partition
|
281
|
+
# @note This method won't trigger automatic offsets commits, rather relying on the offset
|
282
|
+
# check-pointing trigger that happens with each batch processed. It will however check the
|
283
|
+
# `librdkafka` assignment ownership to increase accuracy for involuntary revocations.
|
284
|
+
def mark_as_consumed(message)
|
285
|
+
store_offset(message) && !assignment_lost?
|
69
286
|
end
|
70
287
|
|
71
|
-
# Marks given message as consumed
|
72
|
-
#
|
73
|
-
# @
|
74
|
-
#
|
75
|
-
def mark_as_consumed(
|
76
|
-
|
77
|
-
|
78
|
-
|
288
|
+
# Marks a given message as consumed and commits the offsets in a blocking way.
|
289
|
+
#
|
290
|
+
# @param [Karafka::Messages::Message] message that we want to mark as processed
|
291
|
+
# @return [Boolean] true if successful. False if we no longer own given partition
|
292
|
+
def mark_as_consumed!(message)
|
293
|
+
return false unless mark_as_consumed(message)
|
294
|
+
|
295
|
+
commit_offsets!
|
296
|
+
end
|
297
|
+
|
298
|
+
# Closes and resets the client completely.
|
299
|
+
def reset
|
300
|
+
close
|
301
|
+
|
302
|
+
@closed = false
|
303
|
+
@paused_tpls.clear
|
304
|
+
@kafka = build_consumer
|
79
305
|
end
|
80
306
|
|
81
|
-
#
|
82
|
-
#
|
83
|
-
#
|
84
|
-
#
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
307
|
+
# Runs a single poll ignoring all the potential errors
|
308
|
+
# This is used as a keep-alive in the shutdown stage and any errors that happen here are
|
309
|
+
# irrelevant from the shutdown process perspective
|
310
|
+
#
|
311
|
+
# This is used only to trigger rebalance callbacks
|
312
|
+
def ping
|
313
|
+
poll(100)
|
314
|
+
rescue Rdkafka::RdkafkaError
|
315
|
+
nil
|
90
316
|
end
|
91
317
|
|
92
318
|
private
|
93
319
|
|
94
|
-
|
320
|
+
# When we cannot store an offset, it means we no longer own the partition
|
321
|
+
#
|
322
|
+
# Non thread-safe offset storing method
|
323
|
+
# @param message [Karafka::Messages::Message]
|
324
|
+
# @return [Boolean] true if we could store the offset (if we still own the partition)
|
325
|
+
def internal_store_offset(message)
|
326
|
+
@kafka.store_offset(message)
|
327
|
+
true
|
328
|
+
rescue Rdkafka::RdkafkaError => e
|
329
|
+
return false if e.code == :assignment_lost
|
330
|
+
return false if e.code == :state
|
331
|
+
|
332
|
+
raise e
|
333
|
+
end
|
334
|
+
|
335
|
+
# Non thread-safe message committing method
|
336
|
+
# @param async [Boolean] should the commit happen async or sync (async by default)
|
337
|
+
# @return [Boolean] true if offset commit worked, false if we've lost the assignment
|
338
|
+
# @note We do **not** consider `no_offset` as any problem and we allow to commit offsets
|
339
|
+
# even when no stored, because with sync commit, it refreshes the ownership state of the
|
340
|
+
# consumer in a sync way.
|
341
|
+
def internal_commit_offsets(async: true)
|
342
|
+
@kafka.commit(nil, async)
|
343
|
+
|
344
|
+
true
|
345
|
+
rescue Rdkafka::RdkafkaError => e
|
346
|
+
case e.code
|
347
|
+
when :assignment_lost
|
348
|
+
return false
|
349
|
+
when :unknown_member_id
|
350
|
+
return false
|
351
|
+
when :no_offset
|
352
|
+
return true
|
353
|
+
when :coordinator_load_in_progress
|
354
|
+
sleep(1)
|
355
|
+
retry
|
356
|
+
end
|
357
|
+
|
358
|
+
raise e
|
359
|
+
end
|
360
|
+
|
361
|
+
# Non-mutexed seek that should be used only internally. Outside we expose `#seek` that is
|
362
|
+
# wrapped with a mutex.
|
363
|
+
#
|
364
|
+
# @param message [Messages::Message, Messages::Seek] message to which we want to seek to.
|
365
|
+
# It can have the time based offset.
|
366
|
+
#
|
367
|
+
# @note Will not invoke seeking if the desired seek would lead us to the current position.
|
368
|
+
# This prevents us from flushing librdkafka buffer when it is not needed.
|
369
|
+
def internal_seek(message)
|
370
|
+
# If the seek message offset is in a time format, we need to find the closest "real"
|
371
|
+
# offset matching before we seek
|
372
|
+
if message.offset.is_a?(Time)
|
373
|
+
tpl = ::Rdkafka::Consumer::TopicPartitionList.new
|
374
|
+
tpl.add_topic_and_partitions_with_offsets(
|
375
|
+
message.topic,
|
376
|
+
message.partition => message.offset
|
377
|
+
)
|
378
|
+
|
379
|
+
proxy = Proxy.new(@kafka)
|
380
|
+
|
381
|
+
# Now we can overwrite the seek message offset with our resolved offset and we can
|
382
|
+
# then seek to the appropriate message
|
383
|
+
# We set the timeout to 2_000 to make sure that remote clusters handle this well
|
384
|
+
real_offsets = proxy.offsets_for_times(tpl)
|
385
|
+
detected_partition = real_offsets.to_h.dig(message.topic, message.partition)
|
386
|
+
|
387
|
+
# There always needs to be an offset. In case we seek into the future, where there
|
388
|
+
# are no offsets yet, we get -1 which indicates the most recent offset
|
389
|
+
# We should always detect offset, whether it is 0, -1 or a corresponding
|
390
|
+
message.offset = detected_partition&.offset || raise(Errors::InvalidTimeBasedOffsetError)
|
391
|
+
end
|
392
|
+
|
393
|
+
# Never seek if we would get the same location as we would get without seeking
|
394
|
+
# This prevents us from the expensive buffer purges that can lead to increased network
|
395
|
+
# traffic and can cost a lot of money
|
396
|
+
#
|
397
|
+
# This code adds around 0.01 ms per seek but saves from many user unexpected behaviours in
|
398
|
+
# seeking and pausing
|
399
|
+
return if message.offset == topic_partition_position(message.topic, message.partition)
|
400
|
+
|
401
|
+
@kafka.seek(message)
|
402
|
+
end
|
403
|
+
|
404
|
+
# Commits the stored offsets in a sync way and closes the consumer.
|
405
|
+
def close
|
406
|
+
# Allow only one client to be closed at the same time
|
407
|
+
SHUTDOWN_MUTEX.synchronize do
|
408
|
+
# Once client is closed, we should not close it again
|
409
|
+
# This could only happen in case of a race-condition when forceful shutdown happens
|
410
|
+
# and triggers this from a different thread
|
411
|
+
return if @closed
|
412
|
+
|
413
|
+
@closed = true
|
414
|
+
|
415
|
+
# Remove callbacks runners that were registered
|
416
|
+
::Karafka::Core::Instrumentation.statistics_callbacks.delete(@subscription_group.id)
|
417
|
+
::Karafka::Core::Instrumentation.error_callbacks.delete(@subscription_group.id)
|
418
|
+
|
419
|
+
@kafka.close
|
420
|
+
@buffer.clear
|
421
|
+
# @note We do not clear rebalance manager here as we may still have revocation info
|
422
|
+
# here that we want to consider valid prior to running another reconnection
|
423
|
+
end
|
424
|
+
end
|
425
|
+
|
426
|
+
# Unsubscribes from all the subscriptions
|
427
|
+
# @note This is a private API to be used only on shutdown
|
428
|
+
# @note We do not re-raise since this is supposed to be only used on close and can be safely
|
429
|
+
# ignored. We do however want to instrument on it
|
430
|
+
def unsubscribe
|
431
|
+
@kafka.unsubscribe
|
432
|
+
rescue ::Rdkafka::RdkafkaError => e
|
433
|
+
Karafka.monitor.instrument(
|
434
|
+
'error.occurred',
|
435
|
+
caller: self,
|
436
|
+
error: e,
|
437
|
+
type: 'connection.client.unsubscribe.error'
|
438
|
+
)
|
439
|
+
end
|
440
|
+
|
441
|
+
# @param topic [String]
|
442
|
+
# @param partition [Integer]
|
443
|
+
# @return [Rdkafka::Consumer::TopicPartitionList]
|
444
|
+
def topic_partition_list(topic, partition)
|
445
|
+
rdkafka_partition = @kafka
|
446
|
+
.assignment
|
447
|
+
.to_h[topic]
|
448
|
+
&.detect { |part| part.partition == partition }
|
449
|
+
|
450
|
+
return unless rdkafka_partition
|
451
|
+
|
452
|
+
Rdkafka::Consumer::TopicPartitionList.new({ topic => [rdkafka_partition] })
|
453
|
+
end
|
454
|
+
|
455
|
+
# @param topic [String]
|
456
|
+
# @param partition [Integer]
|
457
|
+
# @return [Integer] current position within topic partition or `-1` if it could not be
|
458
|
+
# established. It may be `-1` in case we lost the assignment or we did not yet fetch data
|
459
|
+
# for this topic partition
|
460
|
+
def topic_partition_position(topic, partition)
|
461
|
+
rd_partition = ::Rdkafka::Consumer::Partition.new(partition, nil, 0)
|
462
|
+
tpl = ::Rdkafka::Consumer::TopicPartitionList.new(topic => [rd_partition])
|
463
|
+
|
464
|
+
@kafka.position(tpl).to_h.fetch(topic).first.offset || -1
|
465
|
+
end
|
466
|
+
|
467
|
+
# Performs a single poll operation and handles retries and error
|
468
|
+
#
|
469
|
+
# @param timeout [Integer] timeout for a single poll
|
470
|
+
# @return [Rdkafka::Consumer::Message, nil] fetched message or nil if nothing polled
|
471
|
+
def poll(timeout)
|
472
|
+
time_poll ||= TimeTrackers::Poll.new(timeout)
|
95
473
|
|
96
|
-
|
97
|
-
# that is set up to consume from topics of a given consumer group
|
98
|
-
def kafka_consumer
|
99
|
-
# @note We don't cache the connection internally because we cache kafka_consumer that uses
|
100
|
-
# kafka client object instance
|
101
|
-
@kafka_consumer ||= Builder.call(consumer_group).consumer(
|
102
|
-
**ApiAdapter.consumer(consumer_group)
|
103
|
-
).tap do |consumer|
|
104
|
-
consumer_group.topics.each do |topic|
|
105
|
-
settings = ApiAdapter.subscribe(topic)
|
474
|
+
return nil if time_poll.exceeded?
|
106
475
|
|
107
|
-
|
476
|
+
time_poll.start
|
477
|
+
|
478
|
+
@kafka.poll(timeout)
|
479
|
+
rescue ::Rdkafka::RdkafkaError => e
|
480
|
+
early_report = false
|
481
|
+
|
482
|
+
retryable = time_poll.attempts <= MAX_POLL_RETRIES && time_poll.retryable?
|
483
|
+
|
484
|
+
# There are retryable issues on which we want to report fast as they are source of
|
485
|
+
# problems and can mean some bigger system instabilities
|
486
|
+
# Those are mainly network issues and exceeding the max poll interval
|
487
|
+
# We want to report early on max poll interval exceeding because it may mean that the
|
488
|
+
# underlying processing is taking too much time and it is not LRJ
|
489
|
+
case e.code
|
490
|
+
when :max_poll_exceeded # -147
|
491
|
+
early_report = true
|
492
|
+
when :network_exception # 13
|
493
|
+
early_report = true
|
494
|
+
when :transport # -195
|
495
|
+
early_report = true
|
496
|
+
# @see
|
497
|
+
# https://github.com/confluentinc/confluent-kafka-dotnet/issues/1366#issuecomment-821842990
|
498
|
+
# This will be raised each time poll detects a non-existing topic. When auto creation is
|
499
|
+
# on, we can safely ignore it
|
500
|
+
when :unknown_topic_or_part # 3
|
501
|
+
return nil if @subscription_group.kafka[:'allow.auto.create.topics']
|
502
|
+
|
503
|
+
early_report = true
|
504
|
+
|
505
|
+
# No sense in retrying when no topic/partition and we're no longer running
|
506
|
+
retryable = false unless Karafka::App.running?
|
507
|
+
end
|
508
|
+
|
509
|
+
if early_report || !retryable
|
510
|
+
Karafka.monitor.instrument(
|
511
|
+
'error.occurred',
|
512
|
+
caller: self,
|
513
|
+
error: e,
|
514
|
+
type: 'connection.client.poll.error'
|
515
|
+
)
|
516
|
+
end
|
517
|
+
|
518
|
+
raise unless retryable
|
519
|
+
|
520
|
+
# Most of the errors can be safely ignored as librdkafka will recover from them
|
521
|
+
# @see https://github.com/edenhill/librdkafka/issues/1987#issuecomment-422008750
|
522
|
+
# @see https://github.com/edenhill/librdkafka/wiki/Error-handling
|
523
|
+
|
524
|
+
time_poll.checkpoint
|
525
|
+
time_poll.backoff
|
526
|
+
|
527
|
+
# poll may not only return message but also can run callbacks and if they changed,
|
528
|
+
# despite the errors we need to delegate to the other app parts
|
529
|
+
@rebalance_manager.changed? ? nil : retry
|
530
|
+
end
|
531
|
+
|
532
|
+
# Builds a new rdkafka consumer instance based on the subscription group configuration
|
533
|
+
# @return [Rdkafka::Consumer]
|
534
|
+
def build_consumer
|
535
|
+
::Rdkafka::Config.logger = ::Karafka::App.config.logger
|
536
|
+
config = ::Rdkafka::Config.new(@subscription_group.kafka)
|
537
|
+
config.consumer_rebalance_listener = @rebalance_callback
|
538
|
+
|
539
|
+
consumer = config.consumer
|
540
|
+
@name = consumer.name
|
541
|
+
|
542
|
+
# Register statistics runner for this particular type of callbacks
|
543
|
+
::Karafka::Core::Instrumentation.statistics_callbacks.add(
|
544
|
+
@subscription_group.id,
|
545
|
+
Instrumentation::Callbacks::Statistics.new(
|
546
|
+
@subscription_group.id,
|
547
|
+
@subscription_group.consumer_group.id,
|
548
|
+
@name
|
549
|
+
)
|
550
|
+
)
|
551
|
+
|
552
|
+
# Register error tracking callback
|
553
|
+
::Karafka::Core::Instrumentation.error_callbacks.add(
|
554
|
+
@subscription_group.id,
|
555
|
+
Instrumentation::Callbacks::Error.new(
|
556
|
+
@subscription_group.id,
|
557
|
+
@subscription_group.consumer_group.id,
|
558
|
+
@name
|
559
|
+
)
|
560
|
+
)
|
561
|
+
|
562
|
+
# Subscription needs to happen after we assigned the rebalance callbacks just in case of
|
563
|
+
# a race condition
|
564
|
+
consumer.subscribe(*@subscription_group.subscriptions)
|
565
|
+
consumer
|
566
|
+
end
|
567
|
+
|
568
|
+
# We may have a case where in the middle of data polling, we've lost a partition.
|
569
|
+
# In a case like this we should remove all the pre-buffered messages from list partitions as
|
570
|
+
# we are no longer responsible in a given process for processing those messages and they
|
571
|
+
# should have been picked up by a different process.
|
572
|
+
def remove_revoked_and_duplicated_messages
|
573
|
+
@rebalance_manager.lost_partitions.each do |topic, partitions|
|
574
|
+
partitions.each do |partition|
|
575
|
+
@buffer.delete(topic, partition)
|
108
576
|
end
|
109
577
|
end
|
110
|
-
|
111
|
-
|
112
|
-
# attempts if Kafka is down
|
113
|
-
sleep(consumer_group.reconnect_timeout)
|
114
|
-
# We don't log and just re-raise - this will be logged
|
115
|
-
# down the road
|
116
|
-
raise
|
578
|
+
|
579
|
+
@buffer.uniq!
|
117
580
|
end
|
118
581
|
end
|
119
582
|
end
|