karafka 1.4.12 → 2.2.10
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/.github/FUNDING.yml +1 -0
- data/.github/ISSUE_TEMPLATE/bug_report.md +10 -9
- data/.github/workflows/ci.yml +169 -31
- data/.rspec +4 -0
- data/.ruby-version +1 -1
- data/CHANGELOG.md +716 -607
- data/CONTRIBUTING.md +10 -19
- data/Gemfile +7 -0
- data/Gemfile.lock +69 -92
- data/LICENSE +17 -0
- data/LICENSE-COMM +89 -0
- data/LICENSE-LGPL +165 -0
- data/README.md +48 -47
- data/bin/benchmarks +99 -0
- data/bin/create_token +22 -0
- data/bin/integrations +310 -0
- data/bin/karafka +5 -14
- data/bin/record_rss +50 -0
- data/bin/rspecs +6 -0
- data/bin/scenario +29 -0
- data/bin/stress_many +13 -0
- data/bin/stress_one +13 -0
- data/bin/verify_license_integrity +37 -0
- data/bin/wait_for_kafka +24 -0
- data/certs/cert_chain.pem +26 -0
- data/certs/karafka-pro.pem +11 -0
- data/config/locales/errors.yml +97 -0
- data/config/locales/pro_errors.yml +59 -0
- data/docker-compose.yml +19 -11
- data/karafka.gemspec +26 -22
- data/lib/active_job/karafka.rb +17 -0
- data/lib/active_job/queue_adapters/karafka_adapter.rb +32 -0
- data/lib/karafka/active_job/consumer.rb +49 -0
- data/lib/karafka/active_job/current_attributes/loading.rb +36 -0
- data/lib/karafka/active_job/current_attributes/persistence.rb +28 -0
- data/lib/karafka/active_job/current_attributes.rb +42 -0
- data/lib/karafka/active_job/dispatcher.rb +69 -0
- data/lib/karafka/active_job/job_extensions.rb +34 -0
- data/lib/karafka/active_job/job_options_contract.rb +32 -0
- data/lib/karafka/admin.rb +313 -0
- data/lib/karafka/app.rb +47 -23
- data/lib/karafka/base_consumer.rb +260 -29
- data/lib/karafka/cli/base.rb +67 -36
- data/lib/karafka/cli/console.rb +18 -12
- data/lib/karafka/cli/help.rb +24 -0
- data/lib/karafka/cli/info.rb +47 -12
- data/lib/karafka/cli/install.rb +23 -14
- data/lib/karafka/cli/server.rb +101 -44
- data/lib/karafka/cli/topics.rb +146 -0
- data/lib/karafka/cli.rb +24 -27
- data/lib/karafka/connection/client.rb +553 -90
- data/lib/karafka/connection/consumer_group_coordinator.rb +48 -0
- data/lib/karafka/connection/listener.rb +294 -38
- data/lib/karafka/connection/listeners_batch.rb +40 -0
- data/lib/karafka/connection/messages_buffer.rb +84 -0
- data/lib/karafka/connection/pauses_manager.rb +46 -0
- data/lib/karafka/connection/proxy.rb +98 -0
- data/lib/karafka/connection/raw_messages_buffer.rb +101 -0
- data/lib/karafka/connection/rebalance_manager.rb +105 -0
- data/lib/karafka/contracts/base.rb +17 -0
- data/lib/karafka/contracts/config.rb +130 -11
- data/lib/karafka/contracts/consumer_group.rb +32 -187
- data/lib/karafka/contracts/server_cli_options.rb +80 -19
- data/lib/karafka/contracts/topic.rb +65 -0
- data/lib/karafka/contracts.rb +1 -1
- data/lib/karafka/embedded.rb +36 -0
- data/lib/karafka/env.rb +46 -0
- data/lib/karafka/errors.rb +37 -21
- data/lib/karafka/helpers/async.rb +33 -0
- data/lib/karafka/helpers/colorize.rb +26 -0
- data/lib/karafka/helpers/multi_delegator.rb +2 -2
- data/lib/karafka/instrumentation/callbacks/error.rb +39 -0
- data/lib/karafka/instrumentation/callbacks/rebalance.rb +64 -0
- data/lib/karafka/instrumentation/callbacks/statistics.rb +51 -0
- data/lib/karafka/instrumentation/logger_listener.rb +303 -0
- data/lib/karafka/instrumentation/monitor.rb +13 -61
- data/lib/karafka/instrumentation/notifications.rb +79 -0
- data/lib/karafka/instrumentation/proctitle_listener.rb +7 -16
- data/lib/karafka/instrumentation/vendors/appsignal/base.rb +30 -0
- data/lib/karafka/instrumentation/vendors/appsignal/client.rb +122 -0
- data/lib/karafka/instrumentation/vendors/appsignal/dashboard.json +222 -0
- data/lib/karafka/instrumentation/vendors/appsignal/errors_listener.rb +30 -0
- data/lib/karafka/instrumentation/vendors/appsignal/metrics_listener.rb +331 -0
- data/lib/karafka/instrumentation/vendors/datadog/dashboard.json +1 -0
- data/lib/karafka/instrumentation/vendors/datadog/logger_listener.rb +155 -0
- data/lib/karafka/instrumentation/vendors/datadog/metrics_listener.rb +264 -0
- data/lib/karafka/instrumentation/vendors/kubernetes/liveness_listener.rb +176 -0
- data/lib/karafka/licenser.rb +78 -0
- data/lib/karafka/messages/batch_metadata.rb +52 -0
- data/lib/karafka/messages/builders/batch_metadata.rb +60 -0
- data/lib/karafka/messages/builders/message.rb +40 -0
- data/lib/karafka/messages/builders/messages.rb +36 -0
- data/lib/karafka/{params/params.rb → messages/message.rb} +20 -13
- data/lib/karafka/messages/messages.rb +71 -0
- data/lib/karafka/{params → messages}/metadata.rb +4 -6
- data/lib/karafka/messages/parser.rb +14 -0
- data/lib/karafka/messages/seek.rb +12 -0
- data/lib/karafka/patches/rdkafka/bindings.rb +122 -0
- data/lib/karafka/patches/rdkafka/opaque.rb +36 -0
- data/lib/karafka/pro/active_job/consumer.rb +47 -0
- data/lib/karafka/pro/active_job/dispatcher.rb +86 -0
- data/lib/karafka/pro/active_job/job_options_contract.rb +45 -0
- data/lib/karafka/pro/cleaner/errors.rb +27 -0
- data/lib/karafka/pro/cleaner/messages/message.rb +46 -0
- data/lib/karafka/pro/cleaner/messages/messages.rb +42 -0
- data/lib/karafka/pro/cleaner.rb +41 -0
- data/lib/karafka/pro/contracts/base.rb +23 -0
- data/lib/karafka/pro/contracts/server_cli_options.rb +111 -0
- data/lib/karafka/pro/encryption/cipher.rb +58 -0
- data/lib/karafka/pro/encryption/contracts/config.rb +79 -0
- data/lib/karafka/pro/encryption/errors.rb +27 -0
- data/lib/karafka/pro/encryption/messages/middleware.rb +46 -0
- data/lib/karafka/pro/encryption/messages/parser.rb +56 -0
- data/lib/karafka/pro/encryption/setup/config.rb +48 -0
- data/lib/karafka/pro/encryption.rb +47 -0
- data/lib/karafka/pro/iterator/expander.rb +95 -0
- data/lib/karafka/pro/iterator/tpl_builder.rb +155 -0
- data/lib/karafka/pro/iterator.rb +170 -0
- data/lib/karafka/pro/loader.rb +106 -0
- data/lib/karafka/pro/performance_tracker.rb +84 -0
- data/lib/karafka/pro/processing/collapser.rb +62 -0
- data/lib/karafka/pro/processing/coordinator.rb +147 -0
- data/lib/karafka/pro/processing/filters/base.rb +61 -0
- data/lib/karafka/pro/processing/filters/delayer.rb +70 -0
- data/lib/karafka/pro/processing/filters/expirer.rb +51 -0
- data/lib/karafka/pro/processing/filters/inline_insights_delayer.rb +78 -0
- data/lib/karafka/pro/processing/filters/throttler.rb +84 -0
- data/lib/karafka/pro/processing/filters/virtual_limiter.rb +52 -0
- data/lib/karafka/pro/processing/filters_applier.rb +105 -0
- data/lib/karafka/pro/processing/jobs/consume_non_blocking.rb +39 -0
- data/lib/karafka/pro/processing/jobs/revoked_non_blocking.rb +37 -0
- data/lib/karafka/pro/processing/jobs_builder.rb +50 -0
- data/lib/karafka/pro/processing/partitioner.rb +69 -0
- data/lib/karafka/pro/processing/scheduler.rb +75 -0
- data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom.rb +70 -0
- data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom_vp.rb +76 -0
- data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_mom.rb +72 -0
- data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_mom_vp.rb +76 -0
- data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom.rb +66 -0
- data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom_vp.rb +70 -0
- data/lib/karafka/pro/processing/strategies/aj/dlq_mom.rb +64 -0
- data/lib/karafka/pro/processing/strategies/aj/dlq_mom_vp.rb +69 -0
- data/lib/karafka/pro/processing/strategies/aj/ftr_lrj_mom.rb +38 -0
- data/lib/karafka/pro/processing/strategies/aj/ftr_lrj_mom_vp.rb +66 -0
- data/lib/karafka/pro/processing/strategies/aj/ftr_mom.rb +38 -0
- data/lib/karafka/pro/processing/strategies/aj/ftr_mom_vp.rb +58 -0
- data/lib/karafka/pro/processing/strategies/aj/lrj_mom.rb +37 -0
- data/lib/karafka/pro/processing/strategies/aj/lrj_mom_vp.rb +82 -0
- data/lib/karafka/pro/processing/strategies/aj/mom.rb +36 -0
- data/lib/karafka/pro/processing/strategies/aj/mom_vp.rb +52 -0
- data/lib/karafka/pro/processing/strategies/base.rb +26 -0
- data/lib/karafka/pro/processing/strategies/default.rb +105 -0
- data/lib/karafka/pro/processing/strategies/dlq/default.rb +137 -0
- data/lib/karafka/pro/processing/strategies/dlq/ftr.rb +61 -0
- data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj.rb +75 -0
- data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_mom.rb +71 -0
- data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_mom_vp.rb +43 -0
- data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_vp.rb +41 -0
- data/lib/karafka/pro/processing/strategies/dlq/ftr_mom.rb +69 -0
- data/lib/karafka/pro/processing/strategies/dlq/ftr_mom_vp.rb +41 -0
- data/lib/karafka/pro/processing/strategies/dlq/ftr_vp.rb +40 -0
- data/lib/karafka/pro/processing/strategies/dlq/lrj.rb +64 -0
- data/lib/karafka/pro/processing/strategies/dlq/lrj_mom.rb +65 -0
- data/lib/karafka/pro/processing/strategies/dlq/lrj_mom_vp.rb +36 -0
- data/lib/karafka/pro/processing/strategies/dlq/lrj_vp.rb +39 -0
- data/lib/karafka/pro/processing/strategies/dlq/mom.rb +68 -0
- data/lib/karafka/pro/processing/strategies/dlq/mom_vp.rb +37 -0
- data/lib/karafka/pro/processing/strategies/dlq/vp.rb +40 -0
- data/lib/karafka/pro/processing/strategies/ftr/default.rb +111 -0
- data/lib/karafka/pro/processing/strategies/ftr/vp.rb +40 -0
- data/lib/karafka/pro/processing/strategies/lrj/default.rb +85 -0
- data/lib/karafka/pro/processing/strategies/lrj/ftr.rb +69 -0
- data/lib/karafka/pro/processing/strategies/lrj/ftr_mom.rb +67 -0
- data/lib/karafka/pro/processing/strategies/lrj/ftr_mom_vp.rb +40 -0
- data/lib/karafka/pro/processing/strategies/lrj/ftr_vp.rb +39 -0
- data/lib/karafka/pro/processing/strategies/lrj/mom.rb +77 -0
- data/lib/karafka/pro/processing/strategies/lrj/mom_vp.rb +38 -0
- data/lib/karafka/pro/processing/strategies/lrj/vp.rb +36 -0
- data/lib/karafka/pro/processing/strategies/mom/default.rb +46 -0
- data/lib/karafka/pro/processing/strategies/mom/ftr.rb +53 -0
- data/lib/karafka/pro/processing/strategies/mom/ftr_vp.rb +37 -0
- data/lib/karafka/pro/processing/strategies/mom/vp.rb +35 -0
- data/lib/karafka/pro/processing/strategies/vp/default.rb +124 -0
- data/lib/karafka/pro/processing/strategies.rb +22 -0
- data/lib/karafka/pro/processing/strategy_selector.rb +84 -0
- data/lib/karafka/pro/processing/virtual_offset_manager.rb +147 -0
- data/lib/karafka/pro/routing/features/active_job/builder.rb +45 -0
- data/lib/karafka/pro/routing/features/active_job.rb +26 -0
- data/lib/karafka/pro/routing/features/base.rb +24 -0
- data/lib/karafka/pro/routing/features/dead_letter_queue/contracts/topic.rb +53 -0
- data/lib/karafka/pro/routing/features/dead_letter_queue.rb +27 -0
- data/lib/karafka/pro/routing/features/delaying/config.rb +27 -0
- data/lib/karafka/pro/routing/features/delaying/contracts/topic.rb +41 -0
- data/lib/karafka/pro/routing/features/delaying/topic.rb +59 -0
- data/lib/karafka/pro/routing/features/delaying.rb +29 -0
- data/lib/karafka/pro/routing/features/expiring/config.rb +27 -0
- data/lib/karafka/pro/routing/features/expiring/contracts/topic.rb +41 -0
- data/lib/karafka/pro/routing/features/expiring/topic.rb +59 -0
- data/lib/karafka/pro/routing/features/expiring.rb +27 -0
- data/lib/karafka/pro/routing/features/filtering/config.rb +40 -0
- data/lib/karafka/pro/routing/features/filtering/contracts/topic.rb +44 -0
- data/lib/karafka/pro/routing/features/filtering/topic.rb +51 -0
- data/lib/karafka/pro/routing/features/filtering.rb +27 -0
- data/lib/karafka/pro/routing/features/inline_insights/config.rb +32 -0
- data/lib/karafka/pro/routing/features/inline_insights/contracts/topic.rb +41 -0
- data/lib/karafka/pro/routing/features/inline_insights/topic.rb +52 -0
- data/lib/karafka/pro/routing/features/inline_insights.rb +26 -0
- data/lib/karafka/pro/routing/features/long_running_job/config.rb +28 -0
- data/lib/karafka/pro/routing/features/long_running_job/contracts/topic.rb +40 -0
- data/lib/karafka/pro/routing/features/long_running_job/topic.rb +42 -0
- data/lib/karafka/pro/routing/features/long_running_job.rb +28 -0
- data/lib/karafka/pro/routing/features/patterns/builder.rb +38 -0
- data/lib/karafka/pro/routing/features/patterns/config.rb +54 -0
- data/lib/karafka/pro/routing/features/patterns/consumer_group.rb +72 -0
- data/lib/karafka/pro/routing/features/patterns/contracts/consumer_group.rb +62 -0
- data/lib/karafka/pro/routing/features/patterns/contracts/pattern.rb +46 -0
- data/lib/karafka/pro/routing/features/patterns/contracts/topic.rb +41 -0
- data/lib/karafka/pro/routing/features/patterns/detector.rb +71 -0
- data/lib/karafka/pro/routing/features/patterns/pattern.rb +95 -0
- data/lib/karafka/pro/routing/features/patterns/patterns.rb +35 -0
- data/lib/karafka/pro/routing/features/patterns/topic.rb +50 -0
- data/lib/karafka/pro/routing/features/patterns/topics.rb +53 -0
- data/lib/karafka/pro/routing/features/patterns.rb +33 -0
- data/lib/karafka/pro/routing/features/pausing/contracts/topic.rb +51 -0
- data/lib/karafka/pro/routing/features/pausing/topic.rb +44 -0
- data/lib/karafka/pro/routing/features/pausing.rb +25 -0
- data/lib/karafka/pro/routing/features/throttling/config.rb +32 -0
- data/lib/karafka/pro/routing/features/throttling/contracts/topic.rb +44 -0
- data/lib/karafka/pro/routing/features/throttling/topic.rb +69 -0
- data/lib/karafka/pro/routing/features/throttling.rb +30 -0
- data/lib/karafka/pro/routing/features/virtual_partitions/config.rb +30 -0
- data/lib/karafka/pro/routing/features/virtual_partitions/contracts/topic.rb +55 -0
- data/lib/karafka/pro/routing/features/virtual_partitions/topic.rb +56 -0
- data/lib/karafka/pro/routing/features/virtual_partitions.rb +27 -0
- data/lib/karafka/pro.rb +13 -0
- data/lib/karafka/process.rb +24 -8
- data/lib/karafka/processing/coordinator.rb +181 -0
- data/lib/karafka/processing/coordinators_buffer.rb +62 -0
- data/lib/karafka/processing/executor.rb +155 -0
- data/lib/karafka/processing/executors_buffer.rb +72 -0
- data/lib/karafka/processing/expansions_selector.rb +22 -0
- data/lib/karafka/processing/inline_insights/consumer.rb +41 -0
- data/lib/karafka/processing/inline_insights/listener.rb +19 -0
- data/lib/karafka/processing/inline_insights/tracker.rb +128 -0
- data/lib/karafka/processing/jobs/base.rb +55 -0
- data/lib/karafka/processing/jobs/consume.rb +45 -0
- data/lib/karafka/processing/jobs/idle.rb +24 -0
- data/lib/karafka/processing/jobs/revoked.rb +22 -0
- data/lib/karafka/processing/jobs/shutdown.rb +23 -0
- data/lib/karafka/processing/jobs_builder.rb +28 -0
- data/lib/karafka/processing/jobs_queue.rb +150 -0
- data/lib/karafka/processing/partitioner.rb +24 -0
- data/lib/karafka/processing/result.rb +42 -0
- data/lib/karafka/processing/scheduler.rb +22 -0
- data/lib/karafka/processing/strategies/aj_dlq_mom.rb +44 -0
- data/lib/karafka/processing/strategies/aj_mom.rb +21 -0
- data/lib/karafka/processing/strategies/base.rb +52 -0
- data/lib/karafka/processing/strategies/default.rb +158 -0
- data/lib/karafka/processing/strategies/dlq.rb +88 -0
- data/lib/karafka/processing/strategies/dlq_mom.rb +49 -0
- data/lib/karafka/processing/strategies/mom.rb +29 -0
- data/lib/karafka/processing/strategy_selector.rb +47 -0
- data/lib/karafka/processing/worker.rb +93 -0
- data/lib/karafka/processing/workers_batch.rb +27 -0
- data/lib/karafka/railtie.rb +141 -0
- data/lib/karafka/routing/activity_manager.rb +84 -0
- data/lib/karafka/routing/builder.rb +45 -19
- data/lib/karafka/routing/consumer_group.rb +56 -20
- data/lib/karafka/routing/consumer_mapper.rb +1 -12
- data/lib/karafka/routing/features/active_job/builder.rb +33 -0
- data/lib/karafka/routing/features/active_job/config.rb +15 -0
- data/lib/karafka/routing/features/active_job/contracts/topic.rb +44 -0
- data/lib/karafka/routing/features/active_job/proxy.rb +14 -0
- data/lib/karafka/routing/features/active_job/topic.rb +33 -0
- data/lib/karafka/routing/features/active_job.rb +13 -0
- data/lib/karafka/routing/features/base/expander.rb +59 -0
- data/lib/karafka/routing/features/base.rb +71 -0
- data/lib/karafka/routing/features/dead_letter_queue/config.rb +19 -0
- data/lib/karafka/routing/features/dead_letter_queue/contracts/topic.rb +46 -0
- data/lib/karafka/routing/features/dead_letter_queue/topic.rb +41 -0
- data/lib/karafka/routing/features/dead_letter_queue.rb +16 -0
- data/lib/karafka/routing/features/declaratives/config.rb +18 -0
- data/lib/karafka/routing/features/declaratives/contracts/topic.rb +33 -0
- data/lib/karafka/routing/features/declaratives/topic.rb +44 -0
- data/lib/karafka/routing/features/declaratives.rb +14 -0
- data/lib/karafka/routing/features/inline_insights/config.rb +15 -0
- data/lib/karafka/routing/features/inline_insights/contracts/topic.rb +27 -0
- data/lib/karafka/routing/features/inline_insights/topic.rb +31 -0
- data/lib/karafka/routing/features/inline_insights.rb +40 -0
- data/lib/karafka/routing/features/manual_offset_management/config.rb +15 -0
- data/lib/karafka/routing/features/manual_offset_management/contracts/topic.rb +27 -0
- data/lib/karafka/routing/features/manual_offset_management/topic.rb +35 -0
- data/lib/karafka/routing/features/manual_offset_management.rb +18 -0
- data/lib/karafka/routing/proxy.rb +22 -21
- data/lib/karafka/routing/router.rb +24 -10
- data/lib/karafka/routing/subscription_group.rb +110 -0
- data/lib/karafka/routing/subscription_groups_builder.rb +65 -0
- data/lib/karafka/routing/topic.rb +87 -24
- data/lib/karafka/routing/topics.rb +46 -0
- data/lib/karafka/runner.rb +52 -0
- data/lib/karafka/serialization/json/deserializer.rb +7 -15
- data/lib/karafka/server.rb +113 -37
- data/lib/karafka/setup/attributes_map.rb +348 -0
- data/lib/karafka/setup/config.rb +256 -175
- data/lib/karafka/status.rb +54 -7
- data/lib/karafka/templates/example_consumer.rb.erb +16 -0
- data/lib/karafka/templates/karafka.rb.erb +33 -55
- data/lib/karafka/time_trackers/base.rb +14 -0
- data/lib/karafka/time_trackers/pause.rb +122 -0
- data/lib/karafka/time_trackers/poll.rb +69 -0
- data/lib/karafka/version.rb +1 -1
- data/lib/karafka.rb +91 -17
- data/renovate.json +9 -0
- data.tar.gz.sig +0 -0
- metadata +330 -168
- metadata.gz.sig +0 -0
- data/MIT-LICENCE +0 -18
- data/certs/mensfeld.pem +0 -25
- data/config/errors.yml +0 -41
- data/lib/karafka/assignment_strategies/round_robin.rb +0 -13
- data/lib/karafka/attributes_map.rb +0 -63
- data/lib/karafka/backends/inline.rb +0 -16
- data/lib/karafka/base_responder.rb +0 -226
- data/lib/karafka/cli/flow.rb +0 -48
- data/lib/karafka/cli/missingno.rb +0 -19
- data/lib/karafka/code_reloader.rb +0 -67
- data/lib/karafka/connection/api_adapter.rb +0 -158
- data/lib/karafka/connection/batch_delegator.rb +0 -55
- data/lib/karafka/connection/builder.rb +0 -23
- data/lib/karafka/connection/message_delegator.rb +0 -36
- data/lib/karafka/consumers/batch_metadata.rb +0 -10
- data/lib/karafka/consumers/callbacks.rb +0 -71
- data/lib/karafka/consumers/includer.rb +0 -64
- data/lib/karafka/consumers/responders.rb +0 -24
- data/lib/karafka/consumers/single_params.rb +0 -15
- data/lib/karafka/contracts/consumer_group_topic.rb +0 -19
- data/lib/karafka/contracts/responder_usage.rb +0 -54
- data/lib/karafka/fetcher.rb +0 -42
- data/lib/karafka/helpers/class_matcher.rb +0 -88
- data/lib/karafka/helpers/config_retriever.rb +0 -46
- data/lib/karafka/helpers/inflector.rb +0 -26
- data/lib/karafka/instrumentation/stdout_listener.rb +0 -140
- data/lib/karafka/params/batch_metadata.rb +0 -26
- data/lib/karafka/params/builders/batch_metadata.rb +0 -30
- data/lib/karafka/params/builders/params.rb +0 -38
- data/lib/karafka/params/builders/params_batch.rb +0 -25
- data/lib/karafka/params/params_batch.rb +0 -60
- data/lib/karafka/patches/ruby_kafka.rb +0 -47
- data/lib/karafka/persistence/client.rb +0 -29
- data/lib/karafka/persistence/consumers.rb +0 -45
- data/lib/karafka/persistence/topics.rb +0 -48
- data/lib/karafka/responders/builder.rb +0 -36
- data/lib/karafka/responders/topic.rb +0 -55
- data/lib/karafka/routing/topic_mapper.rb +0 -53
- data/lib/karafka/serialization/json/serializer.rb +0 -31
- data/lib/karafka/setup/configurators/water_drop.rb +0 -36
- data/lib/karafka/templates/application_responder.rb.erb +0 -11
@@ -0,0 +1,62 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Karafka
|
4
|
+
module Processing
|
5
|
+
# Coordinators builder used to build coordinators per topic partition
|
6
|
+
#
|
7
|
+
# It provides direct pauses access for revocation
|
8
|
+
#
|
9
|
+
# @note This buffer operates only from the listener loop, thus we do not have to make it
|
10
|
+
# thread-safe.
|
11
|
+
class CoordinatorsBuffer
|
12
|
+
# @param topics [Karafka::Routing::Topics]
|
13
|
+
def initialize(topics)
|
14
|
+
@pauses_manager = Connection::PausesManager.new
|
15
|
+
@coordinator_class = ::Karafka::App.config.internal.processing.coordinator_class
|
16
|
+
@coordinators = Hash.new { |h, k| h[k] = {} }
|
17
|
+
@topics = topics
|
18
|
+
end
|
19
|
+
|
20
|
+
# @param topic_name [String] topic name
|
21
|
+
# @param partition [Integer] partition number
|
22
|
+
def find_or_create(topic_name, partition)
|
23
|
+
@coordinators[topic_name][partition] ||= begin
|
24
|
+
routing_topic = @topics.find(topic_name)
|
25
|
+
|
26
|
+
@coordinator_class.new(
|
27
|
+
routing_topic,
|
28
|
+
partition,
|
29
|
+
@pauses_manager.fetch(routing_topic, partition)
|
30
|
+
)
|
31
|
+
end
|
32
|
+
end
|
33
|
+
|
34
|
+
# Resumes processing of partitions for which pause time has ended.
|
35
|
+
# @param block we want to run for resumed topic partitions
|
36
|
+
# @yieldparam [String] topic name
|
37
|
+
# @yieldparam [Integer] partition number
|
38
|
+
def resume(&block)
|
39
|
+
@pauses_manager.resume(&block)
|
40
|
+
end
|
41
|
+
|
42
|
+
# @param topic_name [String] topic name
|
43
|
+
# @param partition [Integer] partition number
|
44
|
+
def revoke(topic_name, partition)
|
45
|
+
return unless @coordinators[topic_name].key?(partition)
|
46
|
+
|
47
|
+
# The fact that we delete here does not change the fact that the executor still holds the
|
48
|
+
# reference to this coordinator. We delete it here, as we will no longer process any
|
49
|
+
# new stuff with it and we may need a new coordinator if we regain this partition, but the
|
50
|
+
# coordinator may still be in use
|
51
|
+
@coordinators[topic_name].delete(partition).revoke
|
52
|
+
end
|
53
|
+
|
54
|
+
# Clears coordinators and re-created the pauses manager
|
55
|
+
# This should be used only for critical errors recovery
|
56
|
+
def reset
|
57
|
+
@pauses_manager = Connection::PausesManager.new
|
58
|
+
@coordinators.clear
|
59
|
+
end
|
60
|
+
end
|
61
|
+
end
|
62
|
+
end
|
@@ -0,0 +1,155 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Karafka
|
4
|
+
# Namespace that encapsulates all the logic related to processing data.
|
5
|
+
module Processing
|
6
|
+
# Executors:
|
7
|
+
# - run consumers code (for `#call`) or run given preparation / teardown operations when needed
|
8
|
+
# from separate threads.
|
9
|
+
# - they re-create consumer instances in case of partitions that were revoked and assigned
|
10
|
+
# back.
|
11
|
+
#
|
12
|
+
# @note Executors are not removed after partition is revoked. They are not that big and will
|
13
|
+
# be re-used in case of a re-claim
|
14
|
+
class Executor
|
15
|
+
extend Forwardable
|
16
|
+
|
17
|
+
def_delegators :@coordinator, :topic, :partition
|
18
|
+
|
19
|
+
# @return [String] unique id that we use to ensure, that we use for state tracking
|
20
|
+
attr_reader :id
|
21
|
+
|
22
|
+
# @return [String] subscription group id to which a given executor belongs
|
23
|
+
attr_reader :group_id
|
24
|
+
|
25
|
+
# @return [Karafka::Messages::Messages] messages batch
|
26
|
+
attr_reader :messages
|
27
|
+
|
28
|
+
# @return [Karafka::Processing::Coordinator] coordinator for this executor
|
29
|
+
attr_reader :coordinator
|
30
|
+
|
31
|
+
# @param group_id [String] id of the subscription group to which the executor belongs
|
32
|
+
# @param client [Karafka::Connection::Client] kafka client
|
33
|
+
# @param coordinator [Karafka::Processing::Coordinator]
|
34
|
+
def initialize(group_id, client, coordinator)
|
35
|
+
@id = SecureRandom.hex(6)
|
36
|
+
@group_id = group_id
|
37
|
+
@client = client
|
38
|
+
@coordinator = coordinator
|
39
|
+
end
|
40
|
+
|
41
|
+
# Allows us to prepare the consumer in the listener thread prior to the job being send to
|
42
|
+
# the queue. It also allows to run some code that is time sensitive and cannot wait in the
|
43
|
+
# queue as it could cause starvation.
|
44
|
+
#
|
45
|
+
# @param messages [Array<Karafka::Messages::Message>]
|
46
|
+
def before_enqueue(messages)
|
47
|
+
# Recreate consumer with each batch if persistence is not enabled
|
48
|
+
# We reload the consumers with each batch instead of relying on some external signals
|
49
|
+
# when needed for consistency. That way devs may have it on or off and not in this
|
50
|
+
# middle state, where re-creation of a consumer instance would occur only sometimes
|
51
|
+
@consumer = nil unless topic.consumer_persistence
|
52
|
+
|
53
|
+
# First we build messages batch...
|
54
|
+
consumer.messages = Messages::Builders::Messages.call(
|
55
|
+
messages,
|
56
|
+
topic,
|
57
|
+
partition,
|
58
|
+
# the moment we've received the batch or actually the moment we've enqueued it,
|
59
|
+
# but good enough
|
60
|
+
Time.now
|
61
|
+
)
|
62
|
+
|
63
|
+
consumer.on_before_enqueue
|
64
|
+
end
|
65
|
+
|
66
|
+
# Runs setup and warm-up code in the worker prior to running the consumption
|
67
|
+
def before_consume
|
68
|
+
consumer.on_before_consume
|
69
|
+
end
|
70
|
+
|
71
|
+
# Runs consumer data processing against given batch and handles failures and errors.
|
72
|
+
def consume
|
73
|
+
# We run the consumer client logic...
|
74
|
+
consumer.on_consume
|
75
|
+
end
|
76
|
+
|
77
|
+
# Runs consumer after consumption code
|
78
|
+
def after_consume
|
79
|
+
consumer.on_after_consume
|
80
|
+
end
|
81
|
+
|
82
|
+
# Runs consumer idle operations
|
83
|
+
# This may include house-keeping or other state management changes that can occur but that
|
84
|
+
# not mean there are any new messages available for the end user to process
|
85
|
+
def idle
|
86
|
+
# Initializes the messages set in case idle operation would happen before any processing
|
87
|
+
# This prevents us from having no messages object at all as the messages object and
|
88
|
+
# its metadata may be used for statistics
|
89
|
+
consumer.messages ||= Messages::Builders::Messages.call(
|
90
|
+
[],
|
91
|
+
topic,
|
92
|
+
partition,
|
93
|
+
Time.now
|
94
|
+
)
|
95
|
+
|
96
|
+
consumer.on_idle
|
97
|
+
end
|
98
|
+
|
99
|
+
# Runs the controller `#revoked` method that should be triggered when a given consumer is
|
100
|
+
# no longer needed due to partitions reassignment.
|
101
|
+
#
|
102
|
+
# @note Clearing the consumer will ensure, that if we get the partition back, it will be
|
103
|
+
# handled with a consumer with a clean state.
|
104
|
+
#
|
105
|
+
# @note We run it only when consumer was present, because presence indicates, that at least
|
106
|
+
# a single message has been consumed.
|
107
|
+
#
|
108
|
+
# @note We do not reset the consumer but we indicate need for recreation instead, because
|
109
|
+
# after the revocation, there still may be `#after_consume` running that needs a given
|
110
|
+
# consumer instance.
|
111
|
+
def revoked
|
112
|
+
consumer.on_revoked if @consumer
|
113
|
+
end
|
114
|
+
|
115
|
+
# Runs the controller `#shutdown` method that should be triggered when a given consumer is
|
116
|
+
# no longer needed as we're closing the process.
|
117
|
+
#
|
118
|
+
# @note While we do not need to clear the consumer here, it's a good habit to clean after
|
119
|
+
# work is done.
|
120
|
+
def shutdown
|
121
|
+
# There is a case, where the consumer no longer exists because it was revoked, in case like
|
122
|
+
# that we do not build a new instance and shutdown should not be triggered.
|
123
|
+
consumer.on_shutdown if @consumer
|
124
|
+
end
|
125
|
+
|
126
|
+
private
|
127
|
+
|
128
|
+
# @return [Object] cached consumer instance
|
129
|
+
def consumer
|
130
|
+
@consumer ||= begin
|
131
|
+
topic = @coordinator.topic
|
132
|
+
|
133
|
+
strategy = ::Karafka::App.config.internal.processing.strategy_selector.find(topic)
|
134
|
+
expansions = ::Karafka::App.config.internal.processing.expansions_selector.find(topic)
|
135
|
+
|
136
|
+
consumer = topic.consumer_class.new
|
137
|
+
# We use singleton class as the same consumer class may be used to process different
|
138
|
+
# topics with different settings
|
139
|
+
consumer.singleton_class.include(strategy)
|
140
|
+
|
141
|
+
# Specific features may expand consumer API beyond the injected strategy. The difference
|
142
|
+
# here is that strategy impacts the flow of states while extra APIs just provide some
|
143
|
+
# extra methods with informations, etc but do no deviate the flow behavior
|
144
|
+
expansions.each { |expansion| consumer.singleton_class.include(expansion) }
|
145
|
+
|
146
|
+
consumer.client = @client
|
147
|
+
consumer.producer = ::Karafka::App.producer
|
148
|
+
consumer.coordinator = @coordinator
|
149
|
+
|
150
|
+
consumer
|
151
|
+
end
|
152
|
+
end
|
153
|
+
end
|
154
|
+
end
|
155
|
+
end
|
@@ -0,0 +1,72 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Karafka
|
4
|
+
module Processing
|
5
|
+
# Buffer for executors of a given subscription group. It wraps around the concept of building
|
6
|
+
# and caching them, so we can re-use them instead of creating new each time.
|
7
|
+
class ExecutorsBuffer
|
8
|
+
# @param client [Connection::Client]
|
9
|
+
# @param subscription_group [Routing::SubscriptionGroup]
|
10
|
+
# @return [ExecutorsBuffer]
|
11
|
+
def initialize(client, subscription_group)
|
12
|
+
@subscription_group = subscription_group
|
13
|
+
@client = client
|
14
|
+
# We need two layers here to keep track of topics, partitions and processing groups
|
15
|
+
@buffer = Hash.new { |h, k| h[k] = Hash.new { |h2, k2| h2[k2] = {} } }
|
16
|
+
end
|
17
|
+
|
18
|
+
# Finds or creates an executor based on the provided details
|
19
|
+
#
|
20
|
+
# @param topic [String] topic name
|
21
|
+
# @param partition [Integer] partition number
|
22
|
+
# @param parallel_key [String] parallel group key
|
23
|
+
# @param coordinator [Karafka::Processing::Coordinator]
|
24
|
+
# @return [Executor] consumer executor
|
25
|
+
def find_or_create(topic, partition, parallel_key, coordinator)
|
26
|
+
@buffer[topic][partition][parallel_key] ||= Executor.new(
|
27
|
+
@subscription_group.id,
|
28
|
+
@client,
|
29
|
+
coordinator
|
30
|
+
)
|
31
|
+
end
|
32
|
+
|
33
|
+
# Revokes executors of a given topic partition, so they won't be used anymore for incoming
|
34
|
+
# messages
|
35
|
+
#
|
36
|
+
# @param topic [String] topic name
|
37
|
+
# @param partition [Integer] partition number
|
38
|
+
def revoke(topic, partition)
|
39
|
+
@buffer[topic][partition].clear
|
40
|
+
end
|
41
|
+
|
42
|
+
# Finds all the executors available for a given topic partition
|
43
|
+
#
|
44
|
+
# @param topic [String] topic name
|
45
|
+
# @param partition [Integer] partition number
|
46
|
+
# @return [Array<Executor>] executors in use for this topic + partition
|
47
|
+
def find_all(topic, partition)
|
48
|
+
@buffer[topic][partition].values
|
49
|
+
end
|
50
|
+
|
51
|
+
# Iterates over all available executors and yields them together with topic and partition
|
52
|
+
# info
|
53
|
+
# @yieldparam [Routing::Topic] karafka routing topic object
|
54
|
+
# @yieldparam [Integer] partition number
|
55
|
+
# @yieldparam [Executor] given executor
|
56
|
+
def each
|
57
|
+
@buffer.each do |_, partitions|
|
58
|
+
partitions.each do |_, executors|
|
59
|
+
executors.each do |_, executor|
|
60
|
+
yield(executor)
|
61
|
+
end
|
62
|
+
end
|
63
|
+
end
|
64
|
+
end
|
65
|
+
|
66
|
+
# Clears the executors buffer. Useful for critical errors recovery.
|
67
|
+
def clear
|
68
|
+
@buffer.clear
|
69
|
+
end
|
70
|
+
end
|
71
|
+
end
|
72
|
+
end
|
@@ -0,0 +1,22 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Karafka
|
4
|
+
module Processing
|
5
|
+
# Selector of appropriate topic setup based features enhancements.
|
6
|
+
#
|
7
|
+
# Those expansions to the consumer API are NOT about the flow of processing. For this we have
|
8
|
+
# strategies. Those are suppose to provide certain extra APIs that user can use to get some
|
9
|
+
# extra non-flow related functionalities.
|
10
|
+
class ExpansionsSelector
|
11
|
+
# @param topic [Karafka::Routing::Topic] topic with settings based on which we find
|
12
|
+
# expansions
|
13
|
+
# @return [Array<Module>] modules with proper expansions we're suppose to use to enhance the
|
14
|
+
# consumer
|
15
|
+
def find(topic)
|
16
|
+
expansions = []
|
17
|
+
expansions << Processing::InlineInsights::Consumer if topic.inline_insights?
|
18
|
+
expansions
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
@@ -0,0 +1,41 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Karafka
|
4
|
+
module Processing
|
5
|
+
# Namespace of the Inline Insights feature "non routing" related components
|
6
|
+
#
|
7
|
+
# @note We use both `#insights` because it is the feature name but also `#statistics` to make
|
8
|
+
# it consistent with the fact that we publish and operate on statistics. User can pick
|
9
|
+
# whichever name they prefer.
|
10
|
+
module InlineInsights
|
11
|
+
# Module that adds extra methods to the consumer that allow us to fetch the insights
|
12
|
+
module Consumer
|
13
|
+
# @return [Hash] empty hash or hash with given partition insights if already present
|
14
|
+
# @note We cache insights on the consumer, as in some scenarios we may no longer have them
|
15
|
+
# inside the Tracker, for example under involuntary revocation, incoming statistics may
|
16
|
+
# no longer have lost partition insights. Since we want to be consistent during single
|
17
|
+
# batch operations, we want to ensure, that if we have insights they are available
|
18
|
+
# throughout the whole processing.
|
19
|
+
def insights
|
20
|
+
insights = Tracker.find(topic, partition)
|
21
|
+
|
22
|
+
# If we no longer have new insights but we still have them locally, we can use them
|
23
|
+
return @insights if @insights && insights.empty?
|
24
|
+
# If insights are still the same, we can use them
|
25
|
+
return @insights if @insights.equal?(insights)
|
26
|
+
|
27
|
+
# If we've received new insights that are not empty, we can cache them
|
28
|
+
@insights = insights
|
29
|
+
end
|
30
|
+
|
31
|
+
# @return [Boolean] true if there are insights to work with, otherwise false
|
32
|
+
def insights?
|
33
|
+
!insights.empty?
|
34
|
+
end
|
35
|
+
|
36
|
+
alias statistics insights
|
37
|
+
alias statistics? insights?
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
@@ -0,0 +1,19 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Karafka
|
4
|
+
module Processing
|
5
|
+
module InlineInsights
|
6
|
+
# Listener that adds statistics to our inline tracker
|
7
|
+
class Listener
|
8
|
+
# Adds statistics to the tracker
|
9
|
+
# @param event [Karafka::Core::Monitoring::Event] event with statistics
|
10
|
+
def on_statistics_emitted(event)
|
11
|
+
Tracker.add(
|
12
|
+
event[:consumer_group_id],
|
13
|
+
event[:statistics]
|
14
|
+
)
|
15
|
+
end
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
@@ -0,0 +1,128 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Karafka
|
4
|
+
module Processing
|
5
|
+
module InlineInsights
|
6
|
+
# Object used to track statistics coming from librdkafka in a way that can be accessible by
|
7
|
+
# the consumers
|
8
|
+
#
|
9
|
+
# We use a single tracker because we do not need state management here as our consumer groups
|
10
|
+
# clients identified by statistics name value are unique. On top of that, having a per
|
11
|
+
# process one that is a singleton allows us to use tracker easily also from other places like
|
12
|
+
# filtering API etc.
|
13
|
+
#
|
14
|
+
# @note We include cache of 5 minutes for revoked partitions to compensate for cases where
|
15
|
+
# when using LRJ a lost partition data would not be present anymore, however we would still
|
16
|
+
# be in the processing phase. Since those metrics are published with each `poll`, regular
|
17
|
+
# processing is not a subject of this issue. For LRJ we keep the reference. The only case
|
18
|
+
# where this could be switched midway is when LRJ is running for an extended period of time
|
19
|
+
# after the involuntary revocation. Having a time based cache instead of tracking
|
20
|
+
# simplifies the design as we do not have to deal with state tracking, especially since
|
21
|
+
# we would have to track also operations running in a revoked state.
|
22
|
+
#
|
23
|
+
# @note This tracker keeps in memory data about all topics and partitions that it encounters
|
24
|
+
# because in case of routing patterns, we may start getting statistics prior to registering
|
25
|
+
# given topic via dynamic routing expansions. In such case we would not have insights
|
26
|
+
# where they were actually available for us to use.
|
27
|
+
#
|
28
|
+
# @note Memory usage is negligible as long as we can evict expired data. Single metrics set
|
29
|
+
# for a single partition contains around 4KB of data. This means, that in case of an
|
30
|
+
# assignment of 1000 partitions, we use around 4MB of space for tracking those metrics.
|
31
|
+
class Tracker
|
32
|
+
include Singleton
|
33
|
+
include Karafka::Core::Helpers::Time
|
34
|
+
|
35
|
+
# Empty hash we want to return in any case where we could not locate appropriate topic
|
36
|
+
# partition statistics.
|
37
|
+
EMPTY_HASH = {}.freeze
|
38
|
+
|
39
|
+
# Empty array to save on memory allocations.
|
40
|
+
EMPTY_ARRAY = [].freeze
|
41
|
+
|
42
|
+
# 5 minutes of cache. We cache last result per consumer group topic partition so we are
|
43
|
+
# not affected by involuntary rebalances during LRJ execution.
|
44
|
+
TTL = 5 * 60 * 1_000
|
45
|
+
|
46
|
+
private_constant :EMPTY_HASH, :EMPTY_ARRAY, :TTL
|
47
|
+
|
48
|
+
class << self
|
49
|
+
extend Forwardable
|
50
|
+
|
51
|
+
def_delegators :instance, :find, :add, :exists?, :clear
|
52
|
+
end
|
53
|
+
|
54
|
+
def initialize
|
55
|
+
@accu = {}
|
56
|
+
@mutex = Mutex.new
|
57
|
+
end
|
58
|
+
|
59
|
+
# Adds each partition statistics into internal accumulator. Single statistics set may
|
60
|
+
# contain data from multiple topics and their partitions because a single client can
|
61
|
+
# operate on multiple topics and partitions.
|
62
|
+
#
|
63
|
+
# We iterate over those topics and partitions and store topics partitions data only.
|
64
|
+
#
|
65
|
+
# @param consumer_group_id [String] id of the consumer group for which statistics were
|
66
|
+
# emitted.
|
67
|
+
# @param statistics [Hash] librdkafka enriched statistics
|
68
|
+
def add(consumer_group_id, statistics)
|
69
|
+
@mutex.synchronize do
|
70
|
+
statistics.fetch('topics', EMPTY_HASH).each do |topic_name, t_details|
|
71
|
+
t_details.fetch('partitions', EMPTY_HASH).each do |partition_id, p_details|
|
72
|
+
next unless track?(partition_id, p_details)
|
73
|
+
|
74
|
+
key = "#{consumer_group_id}_#{topic_name}_#{partition_id}"
|
75
|
+
@accu[key] = [monotonic_now, p_details]
|
76
|
+
end
|
77
|
+
end
|
78
|
+
|
79
|
+
evict
|
80
|
+
end
|
81
|
+
end
|
82
|
+
|
83
|
+
# Finds statistics about requested consumer group topic partition
|
84
|
+
#
|
85
|
+
# @param topic [Karafka::Routing::Topic]
|
86
|
+
# @param partition [Integer]
|
87
|
+
# @return [Hash] hash with given topic partition statistics or empty hash if not present
|
88
|
+
#
|
89
|
+
# @note We do not enclose it with a mutex mainly because the only thing that could happen
|
90
|
+
# here that would be a race-condition is a miss that anyhow we need to support due to
|
91
|
+
# how librdkafka ships metrics and a potential removal of data on heavily revoked LRJ.
|
92
|
+
def find(topic, partition)
|
93
|
+
key = "#{topic.consumer_group.id}_#{topic.name}_#{partition}"
|
94
|
+
@accu.fetch(key, EMPTY_ARRAY).last || EMPTY_HASH
|
95
|
+
end
|
96
|
+
|
97
|
+
# Clears the tracker
|
98
|
+
def clear
|
99
|
+
@mutex.synchronize { @accu.clear }
|
100
|
+
end
|
101
|
+
|
102
|
+
private
|
103
|
+
|
104
|
+
# Evicts expired data from the cache
|
105
|
+
def evict
|
106
|
+
@accu.delete_if { |_, details| monotonic_now - details.first > TTL }
|
107
|
+
end
|
108
|
+
|
109
|
+
# Should we track given partition
|
110
|
+
#
|
111
|
+
# We do not track stopped partitions and the once we do not work with actively
|
112
|
+
# @param partition_id [String] partition id as a string
|
113
|
+
# @param p_details [Hash] partition statistics details
|
114
|
+
# @return [Boolean] true if we should track given partition
|
115
|
+
def track?(partition_id, p_details)
|
116
|
+
return false if partition_id == '-1'
|
117
|
+
|
118
|
+
fetch_state = p_details.fetch('fetch_state')
|
119
|
+
|
120
|
+
return false if fetch_state == 'stopped'
|
121
|
+
return false if fetch_state == 'none'
|
122
|
+
|
123
|
+
true
|
124
|
+
end
|
125
|
+
end
|
126
|
+
end
|
127
|
+
end
|
128
|
+
end
|
@@ -0,0 +1,55 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Karafka
|
4
|
+
module Processing
|
5
|
+
# Namespace for all the jobs that are supposed to run in workers.
|
6
|
+
module Jobs
|
7
|
+
# Base class for all the jobs types that are suppose to run in workers threads.
|
8
|
+
# Each job can have 3 main entry-points: `#before_call`, `#call` and `#after_call`
|
9
|
+
# Only `#call` is required.
|
10
|
+
class Base
|
11
|
+
extend Forwardable
|
12
|
+
|
13
|
+
# @note Since one job has always one executor, we use the jobs id and group id as reference
|
14
|
+
def_delegators :executor, :id, :group_id
|
15
|
+
|
16
|
+
attr_reader :executor
|
17
|
+
|
18
|
+
# Creates a new job instance
|
19
|
+
def initialize
|
20
|
+
# All jobs are blocking by default and they can release the lock when blocking operations
|
21
|
+
# are done (if needed)
|
22
|
+
@non_blocking = false
|
23
|
+
end
|
24
|
+
|
25
|
+
# When redefined can run any code prior to the job being enqueued
|
26
|
+
# @note This will run in the listener thread and not in the worker
|
27
|
+
def before_enqueue; end
|
28
|
+
|
29
|
+
# When redefined can run any code that should run before executing the proper code
|
30
|
+
def before_call; end
|
31
|
+
|
32
|
+
# The main entry-point of a job
|
33
|
+
def call
|
34
|
+
raise NotImplementedError, 'Please implement in a subclass'
|
35
|
+
end
|
36
|
+
|
37
|
+
# When redefined can run any code that should run after executing the proper code
|
38
|
+
def after_call; end
|
39
|
+
|
40
|
+
# @return [Boolean] is this a non-blocking job
|
41
|
+
#
|
42
|
+
# @note Blocking job is a job, that will cause the job queue to wait until it is finished
|
43
|
+
# before removing the lock on new jobs being added
|
44
|
+
#
|
45
|
+
# @note All the jobs are blocking by default
|
46
|
+
#
|
47
|
+
# @note Job **needs** to mark itself as non-blocking only **after** it is done with all
|
48
|
+
# the blocking things (pausing partition, etc).
|
49
|
+
def non_blocking?
|
50
|
+
@non_blocking
|
51
|
+
end
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
@@ -0,0 +1,45 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Karafka
|
4
|
+
module Processing
|
5
|
+
module Jobs
|
6
|
+
# The main job type. It runs the executor that triggers given topic partition messages
|
7
|
+
# processing in an underlying consumer instance.
|
8
|
+
class Consume < Base
|
9
|
+
# @return [Array<Rdkafka::Consumer::Message>] array with messages
|
10
|
+
attr_reader :messages
|
11
|
+
|
12
|
+
# @param executor [Karafka::Processing::Executor] executor that is suppose to run a given
|
13
|
+
# job
|
14
|
+
# @param messages [Karafka::Messages::Messages] karafka messages batch
|
15
|
+
# @return [Consume]
|
16
|
+
def initialize(executor, messages)
|
17
|
+
@executor = executor
|
18
|
+
@messages = messages
|
19
|
+
super()
|
20
|
+
end
|
21
|
+
|
22
|
+
# Runs all the preparation code on the executor that needs to happen before the job is
|
23
|
+
# enqueued.
|
24
|
+
def before_enqueue
|
25
|
+
executor.before_enqueue(@messages)
|
26
|
+
end
|
27
|
+
|
28
|
+
# Runs the before consumption preparations on the executor
|
29
|
+
def before_call
|
30
|
+
executor.before_consume
|
31
|
+
end
|
32
|
+
|
33
|
+
# Runs the given executor
|
34
|
+
def call
|
35
|
+
executor.consume
|
36
|
+
end
|
37
|
+
|
38
|
+
# Runs any error handling and other post-consumption stuff on the executor
|
39
|
+
def after_call
|
40
|
+
executor.after_consume
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
45
|
+
end
|
@@ -0,0 +1,24 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Karafka
|
4
|
+
module Processing
|
5
|
+
module Jobs
|
6
|
+
# Type of job that we may use to run some extra handling that happens without the user
|
7
|
+
# related lifecycle event like consumption, revocation, etc.
|
8
|
+
class Idle < Base
|
9
|
+
# @param executor [Karafka::Processing::Executor] executor that is suppose to run a given
|
10
|
+
# job on an active consumer
|
11
|
+
# @return [Shutdown]
|
12
|
+
def initialize(executor)
|
13
|
+
@executor = executor
|
14
|
+
super()
|
15
|
+
end
|
16
|
+
|
17
|
+
# Run the idle work via the executor
|
18
|
+
def call
|
19
|
+
executor.idle
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
24
|
+
end
|
@@ -0,0 +1,22 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Karafka
|
4
|
+
module Processing
|
5
|
+
module Jobs
|
6
|
+
# Job that runs the revoked operation when we loose a partition on a consumer that lost it.
|
7
|
+
class Revoked < Base
|
8
|
+
# @param executor [Karafka::Processing::Executor] executor that is suppose to run the job
|
9
|
+
# @return [Revoked]
|
10
|
+
def initialize(executor)
|
11
|
+
@executor = executor
|
12
|
+
super()
|
13
|
+
end
|
14
|
+
|
15
|
+
# Runs the revoking job via an executor.
|
16
|
+
def call
|
17
|
+
executor.revoked
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
@@ -0,0 +1,23 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Karafka
|
4
|
+
module Processing
|
5
|
+
module Jobs
|
6
|
+
# Job that runs on each active consumer upon process shutdown (one job per consumer).
|
7
|
+
class Shutdown < Base
|
8
|
+
# @param executor [Karafka::Processing::Executor] executor that is suppose to run a given
|
9
|
+
# job on an active consumer
|
10
|
+
# @return [Shutdown]
|
11
|
+
def initialize(executor)
|
12
|
+
@executor = executor
|
13
|
+
super()
|
14
|
+
end
|
15
|
+
|
16
|
+
# Runs the shutdown job via an executor.
|
17
|
+
def call
|
18
|
+
executor.shutdown
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|