karafka 2.0.0.alpha4 → 2.0.0.beta1

Sign up to get free protection for your applications and to get access to all the features.
@@ -9,35 +9,50 @@ module Karafka
9
9
  #
10
10
  # @note Since this does not happen really often, we try to stick with same objects for the
11
11
  # empty states most of the time, so we don't create many objects during the manager life
12
+ #
13
+ # @note Internally in the rebalance manager we have a notion of lost partitions. Partitions
14
+ # that are lost, are those that got revoked but did not get re-assigned back. We do not
15
+ # expose this concept outside and we normalize to have them revoked, as it is irrelevant
16
+ # from the rest of the code perspective as only those that are lost are truly revoked.
12
17
  class RebalanceManager
18
+ # Empty array for internal usage not to create new objects
19
+ EMPTY_ARRAY = [].freeze
20
+
21
+ private_constant :EMPTY_ARRAY
22
+
13
23
  # @return [RebalanceManager]
14
24
  def initialize
15
- @assigned = {}
16
- @revoked = {}
25
+ @assigned_partitions = {}
26
+ @revoked_partitions = {}
27
+ @lost_partitions = {}
17
28
  end
18
29
 
19
- # @return [Hash<String, Array<Integer>>] hash where the keys are the names of topics for
20
- # which we've got new partitions assigned and array with ids of the partitions as the value
21
- # @note Once assigned partitions are fetched, the state will be reset since the callbacks
22
- # for new assigned partitions are set only during a state change
23
- def assigned_partitions
24
- return @assigned if @assigned.empty?
25
-
26
- result = @assigned.dup
27
- @assigned.clear
28
- result
30
+ # Resets the rebalance manager state
31
+ # This needs to be done before each polling loop as during the polling, the state may be
32
+ # changed
33
+ def clear
34
+ @assigned_partitions.clear
35
+ @revoked_partitions.clear
36
+ @lost_partitions.clear
29
37
  end
30
38
 
31
39
  # @return [Hash<String, Array<Integer>>] hash where the keys are the names of topics for
32
40
  # which we've lost partitions and array with ids of the partitions as the value
33
- # @note Once revoked partitions are fetched, the state will be reset since the callbacks
34
- # for new revoked partitions are set only during a state change
41
+ # @note We do not consider as lost topics and partitions that got revoked and assigned
35
42
  def revoked_partitions
36
- return @revoked if @revoked.empty?
43
+ return @revoked_partitions if @revoked_partitions.empty?
44
+ return @lost_partitions unless @lost_partitions.empty?
45
+
46
+ @revoked_partitions.each do |topic, partitions|
47
+ @lost_partitions[topic] = partitions - @assigned_partitions.fetch(topic, EMPTY_ARRAY)
48
+ end
49
+
50
+ @lost_partitions
51
+ end
37
52
 
38
- result = @revoked.dup
39
- @revoked.clear
40
- result
53
+ # @return [Boolean] true if any partitions were revoked
54
+ def revoked_partitions?
55
+ !revoked_partitions.empty?
41
56
  end
42
57
 
43
58
  # Callback that kicks in inside of rdkafka, when new partitions are assigned.
@@ -46,7 +61,7 @@ module Karafka
46
61
  # @param _ [Rdkafka::Consumer]
47
62
  # @param partitions [Rdkafka::Consumer::TopicPartitionList]
48
63
  def on_partitions_assigned(_, partitions)
49
- @assigned = partitions.to_h.transform_values { |part| part.map(&:partition) }
64
+ @assigned_partitions = partitions.to_h.transform_values { |part| part.map(&:partition) }
50
65
  end
51
66
 
52
67
  # Callback that kicks in inside of rdkafka, when partitions are revoked.
@@ -55,7 +70,7 @@ module Karafka
55
70
  # @param _ [Rdkafka::Consumer]
56
71
  # @param partitions [Rdkafka::Consumer::TopicPartitionList]
57
72
  def on_partitions_revoked(_, partitions)
58
- @revoked = partitions.to_h.transform_values { |part| part.map(&:partition) }
73
+ @revoked_partitions = partitions.to_h.transform_values { |part| part.map(&:partition) }
59
74
  end
60
75
  end
61
76
  end
@@ -32,6 +32,7 @@ module Karafka
32
32
  required(:routing_builder)
33
33
  required(:status)
34
34
  required(:process)
35
+ required(:scheduler)
35
36
  required(:subscription_groups_builder)
36
37
  end
37
38
  end
@@ -4,7 +4,7 @@ module Karafka
4
4
  module Instrumentation
5
5
  # Default listener that hooks up to our instrumentation and uses its events for logging
6
6
  # It can be removed/replaced or anything without any harm to the Karafka app flow.
7
- class StdoutListener
7
+ class LoggerListener
8
8
  # Log levels that we use in this particular listener
9
9
  USED_LOG_LEVELS = %i[
10
10
  debug
@@ -22,7 +22,8 @@ module Karafka
22
22
  app.stopping
23
23
  app.stopped
24
24
 
25
- consumer.consume
25
+ consumer.prepared
26
+ consumer.consumed
26
27
  consumer.revoked
27
28
  consumer.shutdown
28
29
 
@@ -1,18 +1,18 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- # This Karafka component is a Pro component.
4
- # All of the commercial components are present in the lib/karafka/pro directory of this repository
5
- # and their usage requires commercial license agreement.
6
- #
7
- # Karafka has also commercial-friendly license, commercial support and commercial components.
8
- #
9
- # By sending a pull request to the pro components, you are agreeing to transfer the copyright of
10
- # your code to Maciej Mensfeld.
11
-
12
3
  module Karafka
13
4
  module Pro
14
5
  # Karafka Pro ActiveJob components
15
6
  module ActiveJob
7
+ # This Karafka component is a Pro component.
8
+ # All of the commercial components are present in the lib/karafka/pro directory of this
9
+ # repository and their usage requires commercial license agreement.
10
+ #
11
+ # Karafka has also commercial-friendly license, commercial support and commercial components.
12
+ #
13
+ # By sending a pull request to the pro components, you are agreeing to transfer the copyright
14
+ # of your code to Maciej Mensfeld.
15
+
16
16
  # Pro dispatcher that sends the ActiveJob job to a proper topic based on the queue name
17
17
  # and that allows to inject additional options into the producer, effectively allowing for a
18
18
  # much better and more granular control over the dispatch and consumption process.
@@ -1,17 +1,17 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- # This Karafka component is a Pro component.
4
- # All of the commercial components are present in the lib/karafka/pro directory of this repository
5
- # and their usage requires commercial license agreement.
6
- #
7
- # Karafka has also commercial-friendly license, commercial support and commercial components.
8
- #
9
- # By sending a pull request to the pro components, you are agreeing to transfer the copyright of
10
- # your code to Maciej Mensfeld.
11
-
12
3
  module Karafka
13
4
  module Pro
14
5
  module ActiveJob
6
+ # This Karafka component is a Pro component.
7
+ # All of the commercial components are present in the lib/karafka/pro directory of this
8
+ # repository and their usage requires commercial license agreement.
9
+ #
10
+ # Karafka has also commercial-friendly license, commercial support and commercial components.
11
+ #
12
+ # By sending a pull request to the pro components, you are agreeing to transfer the copyright
13
+ # of your code to Maciej Mensfeld.
14
+
15
15
  # Contract for validating the options that can be altered with `#karafka_options` per job
16
16
  # class that works with Pro features.
17
17
  class JobOptionsContract < ::Karafka::ActiveJob::JobOptionsContract
@@ -1,15 +1,16 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- # This Karafka component is a Pro component.
4
- # All of the commercial components are present in the lib/karafka/pro directory of this repository
5
- # and their usage requires commercial license agreement.
6
- #
7
- # Karafka has also commercial-friendly license, commercial support and commercial components.
8
- #
9
- # By sending a pull request to the pro components, you are agreeing to transfer the copyright of
10
- # your code to Maciej Mensfeld.
11
3
  module Karafka
12
4
  module Pro
5
+ # This Karafka component is a Pro component.
6
+ # All of the commercial components are present in the lib/karafka/pro directory of this
7
+ # repository and their usage requires commercial license agreement.
8
+ #
9
+ # Karafka has also commercial-friendly license, commercial support and commercial components.
10
+ #
11
+ # By sending a pull request to the pro components, you are agreeing to transfer the copyright
12
+ # of your code to Maciej Mensfeld.
13
+
13
14
  # Loader requires and loads all the pro components only when they are needed
14
15
  class Loader
15
16
  class << self
@@ -17,11 +18,15 @@ module Karafka
17
18
  # @param config [Dry::Configurable::Config] whole app config that we can alter with pro
18
19
  # components
19
20
  def setup(config)
21
+ require_relative 'performance_tracker'
20
22
  require_relative 'active_job/dispatcher'
21
23
  require_relative 'active_job/job_options_contract'
22
24
 
23
25
  config.internal.active_job.dispatcher = ActiveJob::Dispatcher.new
24
26
  config.internal.active_job.job_options_contract = ActiveJob::JobOptionsContract.new
27
+
28
+ # Monitor time needed to process each message from a single partition
29
+ config.monitor.subscribe(PerformanceTracker.instance)
25
30
  end
26
31
  end
27
32
  end
@@ -0,0 +1,80 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Pro
5
+ # This Karafka component is a Pro component.
6
+ # All of the commercial components are present in the lib/karafka/pro directory of this
7
+ # repository and their usage requires commercial license agreement.
8
+ #
9
+ # Karafka has also commercial-friendly license, commercial support and commercial components.
10
+ #
11
+ # By sending a pull request to the pro components, you are agreeing to transfer the copyright
12
+ # of your code to Maciej Mensfeld.
13
+
14
+ # Tracker used to keep track of performance metrics
15
+ # It provides insights that can be used to optimize processing flow
16
+ class PerformanceTracker
17
+ include Singleton
18
+
19
+ # How many samples do we collect per topic partition
20
+ SAMPLES_COUNT = 200
21
+
22
+ private_constant :SAMPLES_COUNT
23
+
24
+ # Builds up nested concurrent hash for data tracking
25
+ def initialize
26
+ @processing_times = Concurrent::Hash.new do |topics_hash, topic|
27
+ topics_hash[topic] = Concurrent::Hash.new do |partitions_hash, partition|
28
+ # This array does not have to be concurrent because we always access single partition
29
+ # data via instrumentation that operates in a single thread via consumer
30
+ partitions_hash[partition] = []
31
+ end
32
+ end
33
+ end
34
+
35
+ # @param topic [String]
36
+ # @param partition [Integer]
37
+ # @return [Float] p95 processing time of a single message from a single topic partition
38
+ def processing_time_p95(topic, partition)
39
+ values = @processing_times[topic][partition]
40
+
41
+ return 0 if values.empty?
42
+ return values.first if values.size == 1
43
+
44
+ percentile(0.95, values)
45
+ end
46
+
47
+ # @private
48
+ # @param event [Dry::Events::Event] event details
49
+ # Tracks time taken to process a single message of a given topic partition
50
+ def on_consumer_consumed(event)
51
+ consumer = event[:caller]
52
+ messages = consumer.messages
53
+ topic = messages.metadata.topic
54
+ partition = messages.metadata.partition
55
+
56
+ samples = @processing_times[topic][partition]
57
+ samples << event[:time] / messages.count
58
+
59
+ return unless samples.size > SAMPLES_COUNT
60
+
61
+ samples.shift
62
+ end
63
+
64
+ private
65
+
66
+ # Computers the requested percentile out of provided values
67
+ # @param percentile [Float]
68
+ # @param values [Array<String>] all the values based on which we should
69
+ # @return [Float] computed percentile
70
+ def percentile(percentile, values)
71
+ values_sorted = values.sort
72
+
73
+ floor = (percentile * (values_sorted.length - 1) + 1).floor - 1
74
+ mod = (percentile * (values_sorted.length - 1) + 1).modulo(1)
75
+
76
+ values_sorted[floor] + (mod * (values_sorted[floor + 1] - values_sorted[floor]))
77
+ end
78
+ end
79
+ end
80
+ end
@@ -4,10 +4,10 @@ module Karafka
4
4
  # Namespace that encapsulates all the logic related to processing data.
5
5
  module Processing
6
6
  # Executors:
7
- # - run consumers code with provided messages batch (for `#call`) or run given teardown
8
- # operations when needed from separate threads.
9
- # - they re-create consumer instances in case of partitions that were revoked
10
- # and assigned back.
7
+ # - run consumers code (for `#call`) or run given preparation / teardown operations when needed
8
+ # from separate threads.
9
+ # - they re-create consumer instances in case of partitions that were revoked and assigned
10
+ # back.
11
11
  #
12
12
  # @note Executors are not removed after partition is revoked. They are not that big and will
13
13
  # be re-used in case of a re-claim
@@ -21,21 +21,21 @@ module Karafka
21
21
  # @param group_id [String] id of the subscription group to which the executor belongs
22
22
  # @param client [Karafka::Connection::Client] kafka client
23
23
  # @param topic [Karafka::Routing::Topic] topic for which this executor will run
24
- # @param pause [Karafka::TimeTrackers::Pause] fetch pause object for crash pausing
25
- def initialize(group_id, client, topic, pause)
24
+ # @param pause_tracker [Karafka::TimeTrackers::Pause] fetch pause tracker for pausing
25
+ def initialize(group_id, client, topic, pause_tracker)
26
26
  @id = SecureRandom.uuid
27
27
  @group_id = group_id
28
28
  @client = client
29
29
  @topic = topic
30
- @pause = pause
30
+ @pause_tracker = pause_tracker
31
31
  end
32
32
 
33
- # Runs consumer data processing against given batch and handles failures and errors.
33
+ # Builds the consumer instance and sets all that is needed to run the user consumption logic
34
34
  #
35
35
  # @param messages [Array<Rdkafka::Consumer::Message>] raw rdkafka messages
36
36
  # @param received_at [Time] the moment we've received the batch (actually the moment we've)
37
37
  # enqueued it, but good enough
38
- def consume(messages, received_at)
38
+ def prepare(messages, received_at)
39
39
  # Recreate consumer with each batch if persistence is not enabled
40
40
  # We reload the consumers with each batch instead of relying on some external signals
41
41
  # when needed for consistency. That way devs may have it on or off and not in this
@@ -49,6 +49,11 @@ module Karafka
49
49
  received_at
50
50
  )
51
51
 
52
+ consumer.on_prepared
53
+ end
54
+
55
+ # Runs consumer data processing against given batch and handles failures and errors.
56
+ def consume
52
57
  # We run the consumer client logic...
53
58
  consumer.on_consume
54
59
  end
@@ -86,7 +91,7 @@ module Karafka
86
91
  consumer = @topic.consumer.new
87
92
  consumer.topic = @topic
88
93
  consumer.client = @client
89
- consumer.pause = @pause
94
+ consumer.pause_tracker = @pause_tracker
90
95
  consumer.producer = ::Karafka::App.producer
91
96
  consumer
92
97
  end
@@ -5,6 +5,8 @@ module Karafka
5
5
  # Namespace for all the jobs that are suppose to run in workers.
6
6
  module Jobs
7
7
  # Base class for all the jobs types that are suppose to run in workers threads.
8
+ # Each job can have 3 main entry-points: `#prepare`, `#call` and `#teardown`
9
+ # Only `#call` is required.
8
10
  class Base
9
11
  extend Forwardable
10
12
 
@@ -12,6 +14,20 @@ module Karafka
12
14
  def_delegators :executor, :id, :group_id
13
15
 
14
16
  attr_reader :executor
17
+
18
+ # When redefined can run any code that should run before executing the proper code
19
+ def prepare; end
20
+
21
+ # When redefined can run any code that should run after executing the proper code
22
+ def teardown; end
23
+
24
+ # @return [Boolean] is this a non-blocking job
25
+ # @note Blocking job is a job, that will cause the job queue to wait until it is finished
26
+ # before removing the lock on new jobs being added
27
+ # @note All the jobs are blocking by default
28
+ def non_blocking?
29
+ false
30
+ end
15
31
  end
16
32
  end
17
33
  end
@@ -18,9 +18,14 @@ module Karafka
18
18
  super()
19
19
  end
20
20
 
21
- # Runs the given executor.
21
+ # Runs the preparations on the executor
22
+ def prepare
23
+ executor.prepare(@messages, @created_at)
24
+ end
25
+
26
+ # Runs the given executor
22
27
  def call
23
- executor.consume(@messages, @created_at)
28
+ executor.consume
24
29
  end
25
30
  end
26
31
  end
@@ -21,7 +21,7 @@ module Karafka
21
21
  # We cannot use a single semaphore as it could potentially block in listeners that should
22
22
  # process with their data and also could unlock when a given group needs to remain locked
23
23
  @semaphores = Hash.new { |h, k| h[k] = Queue.new }
24
- @in_processing = Hash.new { |h, k| h[k] = {} }
24
+ @in_processing = Hash.new { |h, k| h[k] = [] }
25
25
  @mutex = Mutex.new
26
26
  end
27
27
 
@@ -44,9 +44,9 @@ module Karafka
44
44
  @mutex.synchronize do
45
45
  group = @in_processing[job.group_id]
46
46
 
47
- raise(Errors::JobsQueueSynchronizationError, job.group_id) if group.key?(job.id)
47
+ raise(Errors::JobsQueueSynchronizationError, job.group_id) if group.include?(job)
48
48
 
49
- group[job.id] = true
49
+ group << job
50
50
  end
51
51
 
52
52
  @queue << job
@@ -60,14 +60,21 @@ module Karafka
60
60
  @queue.pop
61
61
  end
62
62
 
63
+ # Causes the wait lock to re-check the lock conditions and potential unlock.
64
+ # @param group_id [String] id of the group we want to unlock for one tick
65
+ # @note This does not release the wait lock. It just causes a conditions recheck
66
+ def tick(group_id)
67
+ @semaphores[group_id] << true
68
+ end
69
+
63
70
  # Marks a given job from a given group as completed. When there are no more jobs from a given
64
71
  # group to be executed, we won't wait.
65
72
  #
66
73
  # @param [Jobs::Base] job that was completed
67
74
  def complete(job)
68
75
  @mutex.synchronize do
69
- @in_processing[job.group_id].delete(job.id)
70
- @semaphores[job.group_id] << true
76
+ @in_processing[job.group_id].delete(job)
77
+ tick(job.group_id)
71
78
  end
72
79
  end
73
80
 
@@ -79,7 +86,7 @@ module Karafka
79
86
  @mutex.synchronize do
80
87
  @in_processing[group_id].clear
81
88
  # We unlock it just in case it was blocked when clearing started
82
- @semaphores[group_id] << true
89
+ tick(group_id)
83
90
  end
84
91
  end
85
92
 
@@ -108,13 +115,15 @@ module Karafka
108
115
  # @param group_id [String] id of the group in which jobs we're interested.
109
116
  # @return [Boolean] should we keep waiting or not
110
117
  def wait?(group_id)
118
+ group = @in_processing[group_id]
119
+
111
120
  # If it is stopping, all the previous messages that are processed at the moment need to
112
121
  # finish. Otherwise we may risk closing the client and committing offsets afterwards
113
- return false if Karafka::App.stopping? && @in_processing[group_id].empty?
122
+ return false if Karafka::App.stopping? && group.empty?
114
123
  return false if @queue.closed?
115
- return false if @in_processing[group_id].empty?
124
+ return false if group.empty?
116
125
 
117
- true
126
+ !group.all?(&:non_blocking?)
118
127
  end
119
128
  end
120
129
  end
@@ -4,6 +4,18 @@ module Karafka
4
4
  module Processing
5
5
  # Workers are used to run jobs in separate threads.
6
6
  # Workers are the main processing units of the Karafka framework.
7
+ #
8
+ # Each job runs in three stages:
9
+ # - prepare - here we can run any code that we would need to run blocking before we allow
10
+ # the job to run fully async (non blocking). This will always run in a blocking
11
+ # way and can be used to make sure all the resources and external dependencies
12
+ # are satisfied before going async.
13
+ #
14
+ # - call - actual processing logic that can run sync or async
15
+ #
16
+ # - teardown - it should include any code that we want to run after we executed the user
17
+ # code. This can be used to unlock certain resources or do other things that are
18
+ # not user code but need to run after user code base is executed.
7
19
  class Worker
8
20
  extend Forwardable
9
21
 
@@ -33,7 +45,18 @@ module Karafka
33
45
  job = @jobs_queue.pop
34
46
 
35
47
  if job
48
+ job.prepare
49
+
50
+ # If a job is marked as non blocking, we can run a tick in the job queue and if there
51
+ # are no other blocking factors, the job queue will be unlocked.
52
+ # If this does not run, all the things will be blocking and job queue won't allow to
53
+ # pass it until done.
54
+ @jobs_queue.tick(job.group_id) if job.non_blocking?
55
+
36
56
  job.call
57
+
58
+ job.teardown
59
+
37
60
  true
38
61
  else
39
62
  false
@@ -82,8 +82,20 @@ if rails
82
82
  initializer 'karafka.require_karafka_boot_file' do |app|
83
83
  rails6plus = Rails.gem_version >= Gem::Version.new('6.0.0')
84
84
 
85
+ # If the boot file location is set to "false", we should not raise an exception and we
86
+ # should just not load karafka stuff. Setting this explicitly to false indicates, that
87
+ # karafka is part of the supply chain but it is not a first class citizen of a given
88
+ # system (may be just a dependency of a dependency), thus railtie should not kick in to
89
+ # load the non-existing boot file
90
+ next if Karafka.boot_file.to_s == 'false'
91
+
85
92
  karafka_boot_file = Rails.root.join(Karafka.boot_file.to_s).to_s
86
93
 
94
+ # Provide more comprehensive error for when no boot file
95
+ unless File.exist?(karafka_boot_file)
96
+ raise(Karafka::Errors::MissingBootFileError, karafka_boot_file)
97
+ end
98
+
87
99
  if rails6plus
88
100
  app.reloader.to_prepare do
89
101
  # Load Karafka boot file, so it can be used in Rails server context
@@ -0,0 +1,21 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ # FIFO scheduler for messages coming from various topics and partitions
5
+ class Scheduler
6
+ # Yields messages from partitions in the fifo order
7
+ #
8
+ # @param messages_buffer [Karafka::Connection::MessagesBuffer] messages buffer with data from
9
+ # multiple topics and partitions
10
+ # @yieldparam [String] topic name
11
+ # @yieldparam [Integer] partition number
12
+ # @yieldparam [Array<Rdkafka::Consumer::Message>] topic partition aggregated results
13
+ def call(messages_buffer)
14
+ messages_buffer.each do |topic, partitions|
15
+ partitions.each do |partition, messages|
16
+ yield(topic, partition, messages)
17
+ end
18
+ end
19
+ end
20
+ end
21
+ end
@@ -60,7 +60,7 @@ module Karafka
60
60
  # option [Boolean] should we leave offset management to the user
61
61
  setting :manual_offset_management, default: false
62
62
  # options max_messages [Integer] how many messages do we want to fetch from Kafka in one go
63
- setting :max_messages, default: 100_000
63
+ setting :max_messages, default: 1_000
64
64
  # option [Integer] number of milliseconds we can wait while fetching data
65
65
  setting :max_wait_time, default: 10_000
66
66
  # option shutdown_timeout [Integer] the number of milliseconds after which Karafka no
@@ -96,6 +96,8 @@ module Karafka
96
96
  # option subscription_groups_builder [Routing::SubscriptionGroupsBuilder] subscription
97
97
  # group builder
98
98
  setting :subscription_groups_builder, default: Routing::SubscriptionGroupsBuilder.new
99
+ # option scheduler [Class] scheduler we will be using
100
+ setting :scheduler, default: Scheduler.new
99
101
 
100
102
  # Karafka components for ActiveJob
101
103
  setting :active_job do
@@ -40,7 +40,7 @@ class KarafkaApp < Karafka::App
40
40
  # interested in logging events for certain environments. Since instrumentation
41
41
  # notifications add extra boilerplate, if you want to achieve max performance,
42
42
  # listen to only what you really need for given environment.
43
- Karafka.monitor.subscribe(Karafka::Instrumentation::StdoutListener.new)
43
+ Karafka.monitor.subscribe(Karafka::Instrumentation::LoggerListener.new)
44
44
  # Karafka.monitor.subscribe(Karafka::Instrumentation::ProctitleListener.new)
45
45
 
46
46
  routes.draw do
@@ -41,9 +41,12 @@ module Karafka
41
41
 
42
42
  # Pauses the processing from now till the end of the interval (backoff or non-backoff)
43
43
  # and records the count.
44
- def pause
44
+ # @param timeout [Integer] timeout value in milliseconds that overwrites the default timeout
45
+ # @note Providing this value can be useful when we explicitly want to pause for a certain
46
+ # period of time, outside of any regular pausing logic
47
+ def pause(timeout = backoff_interval)
45
48
  @started_at = now
46
- @ends_at = @started_at + backoff_interval
49
+ @ends_at = @started_at + timeout
47
50
  @count += 1
48
51
  end
49
52
 
@@ -53,6 +56,11 @@ module Karafka
53
56
  @ends_at = nil
54
57
  end
55
58
 
59
+ # Expires the pause, so it can be considered expired
60
+ def expire
61
+ @ends_at = nil
62
+ end
63
+
56
64
  # @return [Boolean] are we paused from processing
57
65
  def paused?
58
66
  !@started_at.nil?
@@ -3,5 +3,5 @@
3
3
  # Main module namespace
4
4
  module Karafka
5
5
  # Current Karafka version
6
- VERSION = '2.0.0.alpha4'
6
+ VERSION = '2.0.0.beta1'
7
7
  end
data.tar.gz.sig CHANGED
Binary file