karafka 2.5.2 → 2.5.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/ci_linux_ubuntu_x86_64_gnu.yml +18 -0
- data/.yard-lint.yml +174 -0
- data/CHANGELOG.md +6 -0
- data/Gemfile +1 -0
- data/Gemfile.lock +24 -19
- data/examples/payloads/json/sample_set_03/event_type_1.json +1 -1
- data/examples/payloads/json/sample_set_03/event_type_2.json +1 -1
- data/examples/payloads/json/sample_set_03/event_type_3.json +1 -1
- data/karafka.gemspec +2 -2
- data/lib/active_job/queue_adapters/karafka_adapter.rb +2 -2
- data/lib/karafka/active_job/consumer.rb +2 -2
- data/lib/karafka/active_job/current_attributes.rb +2 -2
- data/lib/karafka/active_job/deserializer.rb +1 -1
- data/lib/karafka/active_job/dispatcher.rb +2 -2
- data/lib/karafka/admin/configs/resource.rb +7 -1
- data/lib/karafka/admin/consumer_groups.rb +6 -8
- data/lib/karafka/admin/topics.rb +5 -4
- data/lib/karafka/admin.rb +10 -10
- data/lib/karafka/app.rb +3 -3
- data/lib/karafka/base_consumer.rb +1 -1
- data/lib/karafka/cli/base.rb +1 -1
- data/lib/karafka/cli/console.rb +1 -1
- data/lib/karafka/cli/contracts/server.rb +1 -1
- data/lib/karafka/cli/help.rb +1 -1
- data/lib/karafka/cli/install.rb +2 -1
- data/lib/karafka/cli/server.rb +1 -1
- data/lib/karafka/cli/swarm.rb +1 -1
- data/lib/karafka/connection/client.rb +19 -18
- data/lib/karafka/connection/manager.rb +1 -0
- data/lib/karafka/connection/proxy.rb +1 -1
- data/lib/karafka/connection/rebalance_manager.rb +1 -1
- data/lib/karafka/connection/status.rb +1 -0
- data/lib/karafka/constraints.rb +1 -1
- data/lib/karafka/contracts/base.rb +1 -1
- data/lib/karafka/deserializers/payload.rb +1 -1
- data/lib/karafka/helpers/async.rb +1 -1
- data/lib/karafka/helpers/config_importer.rb +3 -3
- data/lib/karafka/helpers/multi_delegator.rb +3 -0
- data/lib/karafka/instrumentation/assignments_tracker.rb +2 -1
- data/lib/karafka/instrumentation/callbacks/error.rb +2 -2
- data/lib/karafka/instrumentation/callbacks/statistics.rb +3 -3
- data/lib/karafka/instrumentation/logger.rb +6 -6
- data/lib/karafka/instrumentation/monitor.rb +2 -2
- data/lib/karafka/instrumentation/vendors/appsignal/base.rb +1 -1
- data/lib/karafka/instrumentation/vendors/datadog/logger_listener.rb +1 -1
- data/lib/karafka/instrumentation/vendors/datadog/metrics_listener.rb +2 -2
- data/lib/karafka/instrumentation/vendors/kubernetes/base_listener.rb +1 -1
- data/lib/karafka/instrumentation/vendors/kubernetes/liveness_listener.rb +3 -15
- data/lib/karafka/messages/builders/batch_metadata.rb +1 -1
- data/lib/karafka/pro/active_job/consumer.rb +2 -2
- data/lib/karafka/pro/active_job/dispatcher.rb +3 -3
- data/lib/karafka/pro/cleaner.rb +3 -3
- data/lib/karafka/pro/cli/contracts/server.rb +1 -1
- data/lib/karafka/pro/cli/parallel_segments/base.rb +4 -3
- data/lib/karafka/pro/cli/parallel_segments/collapse.rb +1 -1
- data/lib/karafka/pro/cli/parallel_segments/distribute.rb +1 -1
- data/lib/karafka/pro/cli/parallel_segments.rb +1 -1
- data/lib/karafka/pro/connection/manager.rb +1 -2
- data/lib/karafka/pro/connection/multiplexing/listener.rb +1 -0
- data/lib/karafka/pro/contracts/base.rb +1 -1
- data/lib/karafka/pro/encryption/cipher.rb +3 -2
- data/lib/karafka/pro/encryption/contracts/config.rb +1 -1
- data/lib/karafka/pro/encryption/messages/parser.rb +1 -1
- data/lib/karafka/pro/encryption/setup/config.rb +1 -1
- data/lib/karafka/pro/iterator/tpl_builder.rb +1 -1
- data/lib/karafka/pro/iterator.rb +1 -1
- data/lib/karafka/pro/loader.rb +1 -1
- data/lib/karafka/pro/processing/coordinator.rb +1 -1
- data/lib/karafka/pro/processing/filters/base.rb +1 -0
- data/lib/karafka/pro/processing/filters/delayer.rb +1 -1
- data/lib/karafka/pro/processing/filters/expirer.rb +1 -1
- data/lib/karafka/pro/processing/filters/inline_insights_delayer.rb +1 -1
- data/lib/karafka/pro/processing/jobs/consume_non_blocking.rb +1 -1
- data/lib/karafka/pro/processing/jobs/eofed_non_blocking.rb +1 -1
- data/lib/karafka/pro/processing/jobs/periodic.rb +1 -1
- data/lib/karafka/pro/processing/jobs/revoked_non_blocking.rb +1 -1
- data/lib/karafka/pro/processing/jobs_builder.rb +1 -1
- data/lib/karafka/pro/processing/offset_metadata/fetcher.rb +1 -0
- data/lib/karafka/pro/processing/partitioner.rb +1 -1
- data/lib/karafka/pro/processing/strategies/base.rb +1 -1
- data/lib/karafka/pro/processing/strategies/default.rb +2 -2
- data/lib/karafka/pro/processing/strategy_selector.rb +1 -0
- data/lib/karafka/pro/processing/virtual_partitions/distributors/balanced.rb +4 -2
- data/lib/karafka/pro/processing/virtual_partitions/distributors/consistent.rb +4 -2
- data/lib/karafka/pro/recurring_tasks/consumer.rb +3 -2
- data/lib/karafka/pro/recurring_tasks/contracts/config.rb +2 -2
- data/lib/karafka/pro/recurring_tasks/contracts/task.rb +1 -1
- data/lib/karafka/pro/recurring_tasks/deserializer.rb +1 -1
- data/lib/karafka/pro/recurring_tasks/dispatcher.rb +1 -1
- data/lib/karafka/pro/recurring_tasks/executor.rb +2 -1
- data/lib/karafka/pro/recurring_tasks/schedule.rb +5 -2
- data/lib/karafka/pro/recurring_tasks/serializer.rb +6 -5
- data/lib/karafka/pro/recurring_tasks/setup/config.rb +2 -2
- data/lib/karafka/pro/recurring_tasks/task.rb +1 -1
- data/lib/karafka/pro/routing/features/dead_letter_queue/topic.rb +3 -0
- data/lib/karafka/pro/routing/features/multiplexing/subscription_groups_builder.rb +1 -1
- data/lib/karafka/pro/routing/features/multiplexing.rb +5 -5
- data/lib/karafka/pro/routing/features/offset_metadata.rb +4 -4
- data/lib/karafka/pro/routing/features/parallel_segments/builder.rb +1 -1
- data/lib/karafka/pro/routing/features/patterns/patterns.rb +1 -1
- data/lib/karafka/pro/routing/features/periodic_job/topic.rb +1 -1
- data/lib/karafka/pro/routing/features/recurring_tasks/builder.rb +1 -1
- data/lib/karafka/pro/routing/features/swarm.rb +1 -1
- data/lib/karafka/pro/routing/features/throttling/topic.rb +3 -1
- data/lib/karafka/pro/scheduled_messages/consumer.rb +1 -1
- data/lib/karafka/pro/scheduled_messages/contracts/config.rb +2 -2
- data/lib/karafka/pro/scheduled_messages/contracts/message.rb +1 -1
- data/lib/karafka/pro/scheduled_messages/daily_buffer.rb +3 -2
- data/lib/karafka/pro/scheduled_messages/day.rb +1 -0
- data/lib/karafka/pro/scheduled_messages/deserializers/headers.rb +1 -1
- data/lib/karafka/pro/scheduled_messages/deserializers/payload.rb +1 -1
- data/lib/karafka/pro/scheduled_messages/max_epoch.rb +1 -0
- data/lib/karafka/pro/scheduled_messages/proxy.rb +1 -1
- data/lib/karafka/pro/scheduled_messages/serializer.rb +3 -3
- data/lib/karafka/pro/scheduled_messages/setup/config.rb +2 -2
- data/lib/karafka/pro/scheduled_messages/state.rb +1 -0
- data/lib/karafka/pro/scheduled_messages/tracker.rb +1 -0
- data/lib/karafka/process.rb +4 -4
- data/lib/karafka/processing/executor.rb +1 -1
- data/lib/karafka/processing/inline_insights/tracker.rb +1 -0
- data/lib/karafka/processing/jobs_queue.rb +1 -1
- data/lib/karafka/processing/result.rb +1 -0
- data/lib/karafka/processing/strategy_selector.rb +1 -0
- data/lib/karafka/routing/activity_manager.rb +1 -0
- data/lib/karafka/routing/builder.rb +3 -1
- data/lib/karafka/routing/contracts/consumer_group.rb +3 -2
- data/lib/karafka/routing/contracts/topic.rb +5 -2
- data/lib/karafka/routing/features/dead_letter_queue/contracts/topic.rb +1 -1
- data/lib/karafka/routing/features/declaratives/topic.rb +5 -2
- data/lib/karafka/routing/features/deserializers/topic.rb +3 -3
- data/lib/karafka/routing/features/inline_insights.rb +5 -5
- data/lib/karafka/routing/router.rb +1 -1
- data/lib/karafka/routing/subscription_group.rb +1 -1
- data/lib/karafka/routing/subscription_groups_builder.rb +1 -0
- data/lib/karafka/routing/topic.rb +3 -3
- data/lib/karafka/server.rb +1 -1
- data/lib/karafka/setup/attributes_map.rb +4 -2
- data/lib/karafka/setup/config.rb +21 -10
- data/lib/karafka/setup/config_proxy.rb +209 -0
- data/lib/karafka/setup/contracts/config.rb +1 -1
- data/lib/karafka/swarm/liveness_listener.rb +1 -0
- data/lib/karafka/swarm/manager.rb +7 -6
- data/lib/karafka/swarm/node.rb +1 -1
- data/lib/karafka/swarm/supervisor.rb +1 -0
- data/lib/karafka/time_trackers/base.rb +1 -1
- data/lib/karafka/version.rb +1 -1
- data/lib/karafka.rb +2 -2
- metadata +7 -5
|
@@ -12,6 +12,7 @@ module Karafka
|
|
|
12
12
|
# Listener used to connect listeners manager to the lifecycle events that are significant
|
|
13
13
|
# to its operations
|
|
14
14
|
class Listener
|
|
15
|
+
# Initializes the multiplexing listener with the connection manager
|
|
15
16
|
def initialize
|
|
16
17
|
@manager = App.config.internal.connection.manager
|
|
17
18
|
end
|
|
@@ -12,6 +12,7 @@ module Karafka
|
|
|
12
12
|
encryption: %i[encryption]
|
|
13
13
|
)
|
|
14
14
|
|
|
15
|
+
# Initializes the cipher with empty private keys cache
|
|
15
16
|
def initialize
|
|
16
17
|
@private_pems = {}
|
|
17
18
|
end
|
|
@@ -35,7 +36,7 @@ module Karafka
|
|
|
35
36
|
|
|
36
37
|
# @return [::OpenSSL::PKey::RSA] rsa public key
|
|
37
38
|
def public_pem
|
|
38
|
-
@public_pem ||=
|
|
39
|
+
@public_pem ||= OpenSSL::PKey::RSA.new(encryption.public_key)
|
|
39
40
|
end
|
|
40
41
|
|
|
41
42
|
# @param version [String] version for which we want to get the rsa key
|
|
@@ -46,7 +47,7 @@ module Karafka
|
|
|
46
47
|
key_string = encryption.private_keys[version]
|
|
47
48
|
key_string || raise(Errors::PrivateKeyNotFoundError, version)
|
|
48
49
|
|
|
49
|
-
@private_pems[version] =
|
|
50
|
+
@private_pems[version] = OpenSSL::PKey::RSA.new(key_string)
|
|
50
51
|
end
|
|
51
52
|
end
|
|
52
53
|
end
|
|
@@ -9,7 +9,7 @@ module Karafka
|
|
|
9
9
|
# Encryption related contracts
|
|
10
10
|
module Contracts
|
|
11
11
|
# Makes sure, all the expected config is defined as it should be
|
|
12
|
-
class Config <
|
|
12
|
+
class Config < Karafka::Contracts::Base
|
|
13
13
|
configure do |config|
|
|
14
14
|
config.error_messages = YAML.safe_load_file(
|
|
15
15
|
File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
|
|
@@ -11,7 +11,7 @@ module Karafka
|
|
|
11
11
|
# @note There may be a case where someone decides not to encrypt data and we start getting
|
|
12
12
|
# unencrypted payloads. That is why we always rely on message headers for encryption
|
|
13
13
|
# indication.
|
|
14
|
-
class Parser <
|
|
14
|
+
class Parser < Karafka::Messages::Parser
|
|
15
15
|
include Helpers::ConfigImporter.new(
|
|
16
16
|
cipher: %i[encryption cipher],
|
|
17
17
|
active: %i[encryption active],
|
|
@@ -22,7 +22,7 @@ module Karafka
|
|
|
22
22
|
# @param consumer [::Rdkafka::Consumer] consumer instance needed to talk with Kafka
|
|
23
23
|
# @param expanded_topics [Hash] hash with expanded and normalized topics data
|
|
24
24
|
def initialize(consumer, expanded_topics)
|
|
25
|
-
@consumer =
|
|
25
|
+
@consumer = Karafka::Connection::Proxy.new(consumer)
|
|
26
26
|
@expanded_topics = expanded_topics
|
|
27
27
|
@mapped_topics = Hash.new { |h, k| h[k] = {} }
|
|
28
28
|
end
|
data/lib/karafka/pro/iterator.rb
CHANGED
|
@@ -44,7 +44,7 @@ module Karafka
|
|
|
44
44
|
@topics_with_partitions = Expander.new.call(topics)
|
|
45
45
|
|
|
46
46
|
@routing_topics = @topics_with_partitions.to_h do |name, _|
|
|
47
|
-
[name,
|
|
47
|
+
[name, Karafka::Routing::Router.find_or_initialize_by_name(name)]
|
|
48
48
|
end
|
|
49
49
|
|
|
50
50
|
@total_partitions = @topics_with_partitions.map(&:last).sum(&:count)
|
data/lib/karafka/pro/loader.rb
CHANGED
|
@@ -120,7 +120,7 @@ module Karafka
|
|
|
120
120
|
# Loads the Pro features of Karafka
|
|
121
121
|
# @note Object space lookup is not the fastest but we do it once during boot, so it's ok
|
|
122
122
|
def load_topic_features
|
|
123
|
-
|
|
123
|
+
Karafka::Pro::Routing::Features::Base.load_all
|
|
124
124
|
end
|
|
125
125
|
end
|
|
126
126
|
end
|
|
@@ -8,7 +8,7 @@ module Karafka
|
|
|
8
8
|
module Processing
|
|
9
9
|
# Pro coordinator that provides extra orchestration methods useful for parallel processing
|
|
10
10
|
# within the same partition
|
|
11
|
-
class Coordinator <
|
|
11
|
+
class Coordinator < Karafka::Processing::Coordinator
|
|
12
12
|
extend Forwardable
|
|
13
13
|
include Helpers::ConfigImporter.new(
|
|
14
14
|
errors_tracker_class: %i[internal processing errors_tracker_class]
|
|
@@ -25,7 +25,7 @@ module Karafka
|
|
|
25
25
|
|
|
26
26
|
# Time on message is in seconds with ms precision, so we need to convert the ttl that
|
|
27
27
|
# is in ms to this format
|
|
28
|
-
border =
|
|
28
|
+
border = Time.now.utc - (@delay / 1_000.0)
|
|
29
29
|
|
|
30
30
|
messages.delete_if do |message|
|
|
31
31
|
too_young = message.timestamp > border
|
|
@@ -26,7 +26,7 @@ module Karafka
|
|
|
26
26
|
|
|
27
27
|
# Time on message is in seconds with ms precision, so we need to convert the ttl that
|
|
28
28
|
# is in ms to this format
|
|
29
|
-
border =
|
|
29
|
+
border = Time.now.utc - (@ttl / 1_000.to_f)
|
|
30
30
|
|
|
31
31
|
messages.delete_if do |message|
|
|
32
32
|
too_old = message.timestamp < border
|
|
@@ -17,7 +17,7 @@ module Karafka
|
|
|
17
17
|
#
|
|
18
18
|
# @note It needs to be working with a proper consumer that will handle the partition
|
|
19
19
|
# management. This layer of the framework knows nothing about Kafka messages consumption.
|
|
20
|
-
class ConsumeNonBlocking <
|
|
20
|
+
class ConsumeNonBlocking < Karafka::Processing::Jobs::Consume
|
|
21
21
|
self.action = :consume
|
|
22
22
|
|
|
23
23
|
# Makes this job non-blocking from the start
|
|
@@ -11,7 +11,7 @@ module Karafka
|
|
|
11
11
|
# We use this version for LRJ topics for cases where saturated resources would not allow
|
|
12
12
|
# to run this job for extended period of time. Under such scenarios, if we would not use
|
|
13
13
|
# a non-blocking one, we would reach max.poll.interval.ms.
|
|
14
|
-
class EofedNonBlocking <
|
|
14
|
+
class EofedNonBlocking < Karafka::Processing::Jobs::Eofed
|
|
15
15
|
self.action = :eofed
|
|
16
16
|
|
|
17
17
|
# @param args [Array] any arguments accepted by `::Karafka::Processing::Jobs::Eofed`
|
|
@@ -9,7 +9,7 @@ module Karafka
|
|
|
9
9
|
module Jobs
|
|
10
10
|
# Job that represents a "ticking" work. Work that we run periodically for the Periodics
|
|
11
11
|
# enabled topics.
|
|
12
|
-
class Periodic <
|
|
12
|
+
class Periodic < Karafka::Processing::Jobs::Base
|
|
13
13
|
self.action = :tick
|
|
14
14
|
|
|
15
15
|
# @param executor [Karafka::Pro::Processing::Executor] pro executor that is suppose to
|
|
@@ -15,7 +15,7 @@ module Karafka
|
|
|
15
15
|
# It can be useful when having long lasting jobs that would exceed `max.poll.interval`
|
|
16
16
|
# in scenarios where there are more jobs than threads, without this being async we
|
|
17
17
|
# would potentially stop polling
|
|
18
|
-
class RevokedNonBlocking <
|
|
18
|
+
class RevokedNonBlocking < Karafka::Processing::Jobs::Revoked
|
|
19
19
|
self.action = :revoked
|
|
20
20
|
|
|
21
21
|
# Makes this job non-blocking from the start
|
|
@@ -7,7 +7,7 @@ module Karafka
|
|
|
7
7
|
module Pro
|
|
8
8
|
module Processing
|
|
9
9
|
# Pro jobs builder that supports lrj
|
|
10
|
-
class JobsBuilder <
|
|
10
|
+
class JobsBuilder < Karafka::Processing::JobsBuilder
|
|
11
11
|
# @param executor [Karafka::Pro::Processing::Executor]
|
|
12
12
|
def idle(executor)
|
|
13
13
|
Karafka::Processing::Jobs::Idle.new(executor)
|
|
@@ -7,7 +7,7 @@ module Karafka
|
|
|
7
7
|
module Pro
|
|
8
8
|
module Processing
|
|
9
9
|
# Pro partitioner that can distribute work based on the virtual partitioner settings
|
|
10
|
-
class Partitioner <
|
|
10
|
+
class Partitioner < Karafka::Processing::Partitioner
|
|
11
11
|
# @param topic [String] topic name
|
|
12
12
|
# @param messages [Array<Karafka::Messages::Message>] karafka messages
|
|
13
13
|
# @param coordinator [Karafka::Pro::Processing::Coordinator] processing coordinator that
|
|
@@ -14,7 +14,7 @@ module Karafka
|
|
|
14
14
|
# Nothing. Just standard, automatic flow
|
|
15
15
|
module Default
|
|
16
16
|
include Base
|
|
17
|
-
include
|
|
17
|
+
include Karafka::Processing::Strategies::Default
|
|
18
18
|
|
|
19
19
|
# Apply strategy for a non-feature based flow
|
|
20
20
|
FEATURES = %i[].freeze
|
|
@@ -263,7 +263,7 @@ module Karafka
|
|
|
263
263
|
# the post-user code execution marking with transactional producer to result in a
|
|
264
264
|
# boolean state of marking for further framework flow. This is a normalization to make it
|
|
265
265
|
# behave the same way as it would behave with a non-transactional one
|
|
266
|
-
rescue
|
|
266
|
+
rescue Rdkafka::RdkafkaError, Errors::AssignmentLostError
|
|
267
267
|
false
|
|
268
268
|
ensure
|
|
269
269
|
@_transaction_internal = false
|
|
@@ -11,8 +11,10 @@ module Karafka
|
|
|
11
11
|
# Balanced distributor that groups messages by partition key
|
|
12
12
|
# and processes larger groups first while maintaining message order within groups
|
|
13
13
|
class Balanced < Base
|
|
14
|
-
#
|
|
15
|
-
#
|
|
14
|
+
# Distributes messages to virtual partitions ensuring balanced load across workers
|
|
15
|
+
# by grouping messages by partition key and assigning larger groups first
|
|
16
|
+
# @param messages [Array<Karafka::Messages::Message>]
|
|
17
|
+
# @return [Hash{Integer => Array<Karafka::Messages::Message>}] hash with group ids as
|
|
16
18
|
# keys and message groups as values
|
|
17
19
|
def call(messages)
|
|
18
20
|
# Group messages by partition key
|
|
@@ -11,8 +11,10 @@ module Karafka
|
|
|
11
11
|
# Consistent distributor that ensures messages with the same partition key
|
|
12
12
|
# are always processed in the same virtual partition
|
|
13
13
|
class Consistent < Base
|
|
14
|
-
#
|
|
15
|
-
#
|
|
14
|
+
# Distributes messages ensuring consistent routing where messages with the same
|
|
15
|
+
# partition key always go to the same virtual partition
|
|
16
|
+
# @param messages [Array<Karafka::Messages::Message>]
|
|
17
|
+
# @return [Hash{Integer => Array<Karafka::Messages::Message>}] hash with group ids as
|
|
16
18
|
# keys and message groups as values
|
|
17
19
|
def call(messages)
|
|
18
20
|
messages
|
|
@@ -11,13 +11,14 @@ module Karafka
|
|
|
11
11
|
# - we only run schedules that are of same or newer version
|
|
12
12
|
# - we always mark as consumed in such a way, that the first message received after
|
|
13
13
|
# assignment (if any) is a state
|
|
14
|
-
class Consumer <
|
|
14
|
+
class Consumer < Karafka::BaseConsumer
|
|
15
15
|
# @param args [Array] all arguments accepted by the consumer
|
|
16
16
|
def initialize(*args)
|
|
17
17
|
super
|
|
18
18
|
@executor = Executor.new
|
|
19
19
|
end
|
|
20
20
|
|
|
21
|
+
# Consumes messages and manages recurring tasks execution
|
|
21
22
|
def consume
|
|
22
23
|
# There is nothing we can do if we operate on a newer schedule. In such cases we should
|
|
23
24
|
# just wait and re-raise error hoping someone will notice or that this will be
|
|
@@ -54,7 +55,7 @@ module Karafka
|
|
|
54
55
|
# that collectively have a different outcome
|
|
55
56
|
@executor.call
|
|
56
57
|
else
|
|
57
|
-
raise
|
|
58
|
+
raise Karafka::Errors::UnsupportedCaseError, type
|
|
58
59
|
end
|
|
59
60
|
end
|
|
60
61
|
|
|
@@ -9,7 +9,7 @@ module Karafka
|
|
|
9
9
|
# Recurring Tasks related contracts
|
|
10
10
|
module Contracts
|
|
11
11
|
# Makes sure, all the expected config is defined as it should be
|
|
12
|
-
class Config <
|
|
12
|
+
class Config < Karafka::Contracts::Base
|
|
13
13
|
configure do |config|
|
|
14
14
|
config.error_messages = YAML.safe_load_file(
|
|
15
15
|
File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
|
|
@@ -17,7 +17,7 @@ module Karafka
|
|
|
17
17
|
end
|
|
18
18
|
|
|
19
19
|
nested(:recurring_tasks) do
|
|
20
|
-
required(:consumer_class) { |val| val <
|
|
20
|
+
required(:consumer_class) { |val| val < Karafka::BaseConsumer }
|
|
21
21
|
required(:deserializer) { |val| !val.nil? }
|
|
22
22
|
required(:logging) { |val| [true, false].include?(val) }
|
|
23
23
|
# Do not allow to run more often than every 5 seconds
|
|
@@ -9,7 +9,7 @@ module Karafka
|
|
|
9
9
|
# Recurring Tasks related contracts
|
|
10
10
|
module Contracts
|
|
11
11
|
# Ensures that task details are as expected
|
|
12
|
-
class Task <
|
|
12
|
+
class Task < Karafka::Contracts::Base
|
|
13
13
|
configure do |config|
|
|
14
14
|
config.error_messages = YAML.safe_load_file(
|
|
15
15
|
File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
|
|
@@ -51,7 +51,7 @@ module Karafka
|
|
|
51
51
|
# @note We do not fetch it via the ConfigImporter not to cache it so we can re-use it
|
|
52
52
|
# if needed
|
|
53
53
|
def producer
|
|
54
|
-
|
|
54
|
+
Karafka::App.config.recurring_tasks.producer
|
|
55
55
|
end
|
|
56
56
|
|
|
57
57
|
# @return [Serializer]
|
|
@@ -17,6 +17,7 @@ module Karafka
|
|
|
17
17
|
trigger
|
|
18
18
|
].freeze
|
|
19
19
|
|
|
20
|
+
# Initializes the executor in replaying mode
|
|
20
21
|
def initialize
|
|
21
22
|
@replaying = true
|
|
22
23
|
@incompatible = false
|
|
@@ -131,7 +132,7 @@ module Karafka
|
|
|
131
132
|
|
|
132
133
|
# @return [Karafka::Pro::RecurringTasks::Schedule] current in-memory schedule
|
|
133
134
|
def schedule
|
|
134
|
-
|
|
135
|
+
Karafka::Pro::RecurringTasks.schedule
|
|
135
136
|
end
|
|
136
137
|
|
|
137
138
|
# Dispatches the current schedule state to Kafka
|
|
@@ -13,7 +13,7 @@ module Karafka
|
|
|
13
13
|
# @return [String]
|
|
14
14
|
attr_reader :version
|
|
15
15
|
|
|
16
|
-
# @return [Hash
|
|
16
|
+
# @return [Hash{String => Task}]
|
|
17
17
|
attr_reader :tasks
|
|
18
18
|
|
|
19
19
|
# @param version [String] schedule version. In case of usage of versioning it is used to
|
|
@@ -43,7 +43,10 @@ module Karafka
|
|
|
43
43
|
end
|
|
44
44
|
|
|
45
45
|
# Allows us to have a nice DSL for defining schedules
|
|
46
|
-
# @param args [
|
|
46
|
+
# @param args [Hash] attributes accepted by the task initializer
|
|
47
|
+
# @option args [String] :id unique task identifier
|
|
48
|
+
# @option args [String] :cron cron expression for task scheduling
|
|
49
|
+
# @option args [Proc] :previous_time optional lambda returning previous execution time
|
|
47
50
|
def schedule(**args, &)
|
|
48
51
|
self << Task.new(**args, &)
|
|
49
52
|
end
|
|
@@ -11,7 +11,8 @@ module Karafka
|
|
|
11
11
|
# Current recurring tasks related schema structure
|
|
12
12
|
SCHEMA_VERSION = '1.0'
|
|
13
13
|
|
|
14
|
-
#
|
|
14
|
+
# Serializes and compresses the schedule with all its tasks and their execution state
|
|
15
|
+
# @param schedule [Karafka::Pro::RecurringTasks::Schedule]
|
|
15
16
|
# @return [String] serialized and compressed current schedule data with its tasks and their
|
|
16
17
|
# current state.
|
|
17
18
|
def schedule(schedule)
|
|
@@ -46,7 +47,7 @@ module Karafka
|
|
|
46
47
|
def command(command_name, task_id)
|
|
47
48
|
data = {
|
|
48
49
|
schema_version: SCHEMA_VERSION,
|
|
49
|
-
schedule_version:
|
|
50
|
+
schedule_version: Karafka::Pro::RecurringTasks.schedule.version,
|
|
50
51
|
dispatched_at: Time.now.to_f,
|
|
51
52
|
type: 'command',
|
|
52
53
|
command: {
|
|
@@ -69,7 +70,7 @@ module Karafka
|
|
|
69
70
|
|
|
70
71
|
data = {
|
|
71
72
|
schema_version: SCHEMA_VERSION,
|
|
72
|
-
schedule_version:
|
|
73
|
+
schedule_version: Karafka::Pro::RecurringTasks.schedule.version,
|
|
73
74
|
dispatched_at: Time.now.to_f,
|
|
74
75
|
type: 'log',
|
|
75
76
|
task: {
|
|
@@ -92,9 +93,9 @@ module Karafka
|
|
|
92
93
|
hash.to_json
|
|
93
94
|
end
|
|
94
95
|
|
|
95
|
-
# Compresses the provided data
|
|
96
|
+
# Compresses the provided data using Zlib deflate algorithm
|
|
96
97
|
#
|
|
97
|
-
# @param data [String]
|
|
98
|
+
# @param data [String]
|
|
98
99
|
# @return [String] compressed data
|
|
99
100
|
def compress(data)
|
|
100
101
|
Zlib::Deflate.deflate(data)
|
|
@@ -10,7 +10,7 @@ module Karafka
|
|
|
10
10
|
module Setup
|
|
11
11
|
# Config for recurring tasks
|
|
12
12
|
class Config
|
|
13
|
-
extend
|
|
13
|
+
extend Karafka::Core::Configurable
|
|
14
14
|
|
|
15
15
|
setting(:consumer_class, default: Consumer)
|
|
16
16
|
setting(:deserializer, default: Deserializer.new)
|
|
@@ -27,7 +27,7 @@ module Karafka
|
|
|
27
27
|
# a separate instance in case of heavy usage of the transactional producer, etc.
|
|
28
28
|
setting(
|
|
29
29
|
:producer,
|
|
30
|
-
constructor: -> {
|
|
30
|
+
constructor: -> { Karafka.producer },
|
|
31
31
|
lazy: true
|
|
32
32
|
)
|
|
33
33
|
|
|
@@ -31,7 +31,7 @@ module Karafka
|
|
|
31
31
|
# @param block [Proc] code to execute.
|
|
32
32
|
def initialize(id:, cron:, previous_time: 0, enabled: true, &block)
|
|
33
33
|
@id = id
|
|
34
|
-
@cron =
|
|
34
|
+
@cron = Fugit::Cron.do_parse(cron)
|
|
35
35
|
@previous_time = previous_time
|
|
36
36
|
@start_time = Time.now
|
|
37
37
|
@executable = block
|
|
@@ -22,6 +22,9 @@ module Karafka
|
|
|
22
22
|
# @param strategy [#call, nil] Strategy we want to use or nil if a default strategy
|
|
23
23
|
# (same as in OSS) should be applied
|
|
24
24
|
# @param args [Hash] Pro DLQ arguments
|
|
25
|
+
# @option args [String, nil] :topic name of the dead letter queue topic
|
|
26
|
+
# @option args [Integer] :max_retries maximum number of retries before dispatch to DLQ
|
|
27
|
+
# @option args [Boolean] :independent whether DLQ runs independently
|
|
25
28
|
def dead_letter_queue(strategy: nil, **args)
|
|
26
29
|
return @dead_letter_queue if @dead_letter_queue
|
|
27
30
|
|
|
@@ -19,7 +19,7 @@ module Karafka
|
|
|
19
19
|
factor = topics_array.first.subscription_group_details.fetch(:multiplexing_max, 1)
|
|
20
20
|
|
|
21
21
|
Array.new(factor) do |i|
|
|
22
|
-
|
|
22
|
+
Karafka::Routing::Topics.new(
|
|
23
23
|
i.zero? ? topics_array : topics_array.map(&:dup)
|
|
24
24
|
)
|
|
25
25
|
end
|
|
@@ -17,7 +17,7 @@ module Karafka
|
|
|
17
17
|
# @param _config [Karafka::Core::Configurable::Node] app config node
|
|
18
18
|
def pre_setup(_config)
|
|
19
19
|
# Make sure we use proper unique validator for topics definitions
|
|
20
|
-
|
|
20
|
+
Karafka::Routing::Contracts::ConsumerGroup.singleton_class.prepend(
|
|
21
21
|
Patches::Contracts::ConsumerGroup
|
|
22
22
|
)
|
|
23
23
|
end
|
|
@@ -26,11 +26,11 @@ module Karafka
|
|
|
26
26
|
#
|
|
27
27
|
# @param _config [Karafka::Core::Configurable::Node] app config
|
|
28
28
|
def post_setup(_config)
|
|
29
|
-
|
|
29
|
+
Karafka::App.monitor.subscribe('app.running') do
|
|
30
30
|
# Do not install the manager and listener to control multiplexing unless there is
|
|
31
31
|
# multiplexing enabled and it is dynamic.
|
|
32
32
|
# We only need to control multiplexing when it is in a dynamic state
|
|
33
|
-
next unless
|
|
33
|
+
next unless Karafka::App
|
|
34
34
|
.subscription_groups
|
|
35
35
|
.values
|
|
36
36
|
.flat_map(&:itself)
|
|
@@ -38,8 +38,8 @@ module Karafka
|
|
|
38
38
|
|
|
39
39
|
# Subscribe for events and possibility to manage via the Pro connection manager
|
|
40
40
|
# that supports multiplexing
|
|
41
|
-
|
|
42
|
-
|
|
41
|
+
Karafka.monitor.subscribe(
|
|
42
|
+
Karafka::Pro::Connection::Multiplexing::Listener.new
|
|
43
43
|
)
|
|
44
44
|
end
|
|
45
45
|
end
|
|
@@ -14,13 +14,13 @@ module Karafka
|
|
|
14
14
|
#
|
|
15
15
|
# @param _config [Karafka::Core::Configurable::Node] app config
|
|
16
16
|
def post_setup(_config)
|
|
17
|
-
|
|
17
|
+
Karafka::App.monitor.subscribe('app.running') do
|
|
18
18
|
# Initialize the tracker prior to becoming multi-threaded
|
|
19
|
-
|
|
19
|
+
Karafka::Processing::InlineInsights::Tracker.instance
|
|
20
20
|
|
|
21
21
|
# Subscribe to the statistics reports and collect them
|
|
22
|
-
|
|
23
|
-
|
|
22
|
+
Karafka.monitor.subscribe(
|
|
23
|
+
Karafka::Pro::Processing::OffsetMetadata::Listener.new
|
|
24
24
|
)
|
|
25
25
|
end
|
|
26
26
|
end
|
|
@@ -23,7 +23,7 @@ module Karafka
|
|
|
23
23
|
# We build a temp consumer group and a target to check if it has parallel segments
|
|
24
24
|
# enabled and if so, we do not add it to the routing but instead we build the
|
|
25
25
|
# appropriate number of parallel segment groups
|
|
26
|
-
temp_consumer_group =
|
|
26
|
+
temp_consumer_group = Karafka::Routing::ConsumerGroup.new(group_id.to_s)
|
|
27
27
|
temp_target = Karafka::Routing::Proxy.new(temp_consumer_group, &block).target
|
|
28
28
|
config = temp_target.parallel_segments
|
|
29
29
|
|
|
@@ -9,7 +9,7 @@ module Karafka
|
|
|
9
9
|
module Features
|
|
10
10
|
class Patterns < Base
|
|
11
11
|
# Representation of groups of topics
|
|
12
|
-
class Patterns <
|
|
12
|
+
class Patterns < Karafka::Routing::Topics
|
|
13
13
|
# Finds first pattern matching given topic name
|
|
14
14
|
#
|
|
15
15
|
# @param topic_name [String] topic name that may match a pattern
|
|
@@ -108,7 +108,7 @@ module Karafka
|
|
|
108
108
|
require 'fugit'
|
|
109
109
|
rescue LoadError
|
|
110
110
|
raise(
|
|
111
|
-
|
|
111
|
+
Karafka::Errors::DependencyConstraintsError,
|
|
112
112
|
<<~ERROR_MSG
|
|
113
113
|
Failed to require fugit gem.
|
|
114
114
|
Add it to your Gemfile, as it is required for the recurring tasks to work.
|
|
@@ -14,7 +14,7 @@ module Karafka
|
|
|
14
14
|
# Binds our routing validation contract prior to warmup in the supervisor, so we can
|
|
15
15
|
# run it when all the context should be there (config + full routing)
|
|
16
16
|
#
|
|
17
|
-
# @param config [Karafka::Core::Configurable::Node]
|
|
17
|
+
# @param config [Karafka::Core::Configurable::Node]
|
|
18
18
|
def post_setup(config)
|
|
19
19
|
config.monitor.subscribe('app.before_warmup') do
|
|
20
20
|
Contracts::Routing.new.validate!(
|
|
@@ -46,7 +46,9 @@ module Karafka
|
|
|
46
46
|
|
|
47
47
|
# Just an alias for nice API
|
|
48
48
|
#
|
|
49
|
-
# @param args [
|
|
49
|
+
# @param args [Hash] Anything `#throttling` accepts
|
|
50
|
+
# @option args [Integer] :limit max messages to process in a time interval
|
|
51
|
+
# @option args [Integer] :interval time interval for processing in milliseconds
|
|
50
52
|
def throttle(**args)
|
|
51
53
|
throttling(**args)
|
|
52
54
|
end
|
|
@@ -7,7 +7,7 @@ module Karafka
|
|
|
7
7
|
module Pro
|
|
8
8
|
module ScheduledMessages
|
|
9
9
|
# Consumer that coordinates scheduling of messages when the time comes
|
|
10
|
-
class Consumer <
|
|
10
|
+
class Consumer < Karafka::BaseConsumer
|
|
11
11
|
include Helpers::ConfigImporter.new(
|
|
12
12
|
dispatcher_class: %i[scheduled_messages dispatcher_class]
|
|
13
13
|
)
|