karafka 2.2.11 → 2.2.12

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: '08439276b10ee121dabe7ef6496334d7aef3cf6a8a49fdc723e6333a147d57d7'
4
- data.tar.gz: f1022962556a4e3397ec256bdd63e7ced077cadabddf72849473eef8ec190186
3
+ metadata.gz: 2ee648826503a1b1841a97e368ec2894a16eadc3509270d2a5dbbbe9ee703b3a
4
+ data.tar.gz: 915285e224ab6dcaa4b2f75e2b6aa52f4f6eb0f613b8b06efe78e2ef4ff3f514
5
5
  SHA512:
6
- metadata.gz: 75f6a68aba0fa013bcdbcd618c9186f5b2e8870723aaef87bbfb8cb745c4a33862efac55c2a46938b7ad843f1f5e6640ebe381861c4365f459df8f115288cf2d
7
- data.tar.gz: 6e495a3376f1c9650039534260c3d21ea697b77104ad2b9d7393b1ae8301cc29a116a6757efb9ead13716932ec7f6b188ab2404f3f0d86a68942f2c9972a5dc6
6
+ metadata.gz: 1f7f109c533a98a46306be62a2172432f0d18af7003e401d3a894aa356bc2cae2622ba4c323bfdd230a66f0ae544a7cfb61ee0168b396e2e809e408a657eecb6
7
+ data.tar.gz: d10de0ca361236c35bed27ca3c5db13e9e245805412f85c2d8d4e6a140fe088025403be7a65e1d97831613f02032bfe3fb2194c5ec7f6a880bc7ddc67a112813
checksums.yaml.gz.sig CHANGED
Binary file
data/CHANGELOG.md CHANGED
@@ -1,5 +1,10 @@
1
1
  # Karafka framework changelog
2
2
 
3
+ ## 2.2.12 (2023-11-09)
4
+ - [Improvement] Rewrite the polling engine to update statistics and error callbacks despite longer non LRJ processing or long `max_wait_time` setups. This change provides stability to the statistics and background error emitting making them time-reliable.
5
+ - [Improvement] Auto-update Inline Insights if new insights are present for all consumers and not only LRJ (OSS and Pro).
6
+ - [Improvement] Alias `#insights` with `#inline_insights` and `#insights?` with `#inline_insights?`
7
+
3
8
  ## 2.2.11 (2023-11-03)
4
9
  - [Improvement] Allow marking as consumed in the user `#synchronize` block.
5
10
  - [Improvement] Make whole Pro VP marking as consumed concurrency safe for both async and sync scenarios.
data/Gemfile.lock CHANGED
@@ -1,9 +1,9 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- karafka (2.2.11)
5
- karafka-core (>= 2.2.6, < 2.3.0)
6
- waterdrop (>= 2.6.10, < 3.0.0)
4
+ karafka (2.2.12)
5
+ karafka-core (>= 2.2.7, < 2.3.0)
6
+ waterdrop (>= 2.6.11, < 3.0.0)
7
7
  zeitwerk (~> 2.3)
8
8
 
9
9
  GEM
@@ -39,10 +39,10 @@ GEM
39
39
  activesupport (>= 6.1)
40
40
  i18n (1.14.1)
41
41
  concurrent-ruby (~> 1.0)
42
- karafka-core (2.2.6)
42
+ karafka-core (2.2.7)
43
43
  concurrent-ruby (>= 1.1)
44
- karafka-rdkafka (>= 0.13.8, < 0.15.0)
45
- karafka-rdkafka (0.13.8)
44
+ karafka-rdkafka (>= 0.13.9, < 0.15.0)
45
+ karafka-rdkafka (0.13.9)
46
46
  ffi (~> 1.15)
47
47
  mini_portile2 (~> 2.6)
48
48
  rake (> 12)
@@ -26,6 +26,7 @@ en:
26
26
  internal.active_job.consumer_class: cannot be nil
27
27
  internal.status_format: needs to be present
28
28
  internal.process_format: needs to be present
29
+ internal.tick_interval_format: needs to be an integer bigger or equal to 1000
29
30
  internal.routing.builder_format: needs to be present
30
31
  internal.routing.subscription_groups_builder_format: needs to be present
31
32
  internal.connection.proxy.query_watermark_offsets.timeout_format: needs to be an integer bigger than 0
data/docker-compose.yml CHANGED
@@ -3,7 +3,7 @@ version: '2'
3
3
  services:
4
4
  kafka:
5
5
  container_name: kafka
6
- image: confluentinc/cp-kafka:7.5.1
6
+ image: confluentinc/cp-kafka:7.5.2
7
7
 
8
8
  ports:
9
9
  - 9092:9092
data/karafka.gemspec CHANGED
@@ -21,8 +21,8 @@ Gem::Specification.new do |spec|
21
21
  without having to focus on things that are not your business domain.
22
22
  DESC
23
23
 
24
- spec.add_dependency 'karafka-core', '>= 2.2.6', '< 2.3.0'
25
- spec.add_dependency 'waterdrop', '>= 2.6.10', '< 3.0.0'
24
+ spec.add_dependency 'karafka-core', '>= 2.2.7', '< 2.3.0'
25
+ spec.add_dependency 'waterdrop', '>= 2.6.11', '< 3.0.0'
26
26
  spec.add_dependency 'zeitwerk', '~> 2.3'
27
27
 
28
28
  if $PROGRAM_NAME.end_with?('gem')
@@ -43,11 +43,13 @@ module Karafka
43
43
  @closed = false
44
44
  @subscription_group = subscription_group
45
45
  @buffer = RawMessagesBuffer.new
46
+ @tick_interval = ::Karafka::App.config.internal.tick_interval
46
47
  @rebalance_manager = RebalanceManager.new(@subscription_group.id)
47
48
  @rebalance_callback = Instrumentation::Callbacks::Rebalance.new(
48
49
  @subscription_group.id,
49
50
  @subscription_group.consumer_group.id
50
51
  )
52
+ @events_poller = Helpers::IntervalRunner.new { events_poll }
51
53
  @kafka = build_consumer
52
54
  # There are few operations that can happen in parallel from the listener threads as well
53
55
  # as from the workers. They are not fully thread-safe because they may be composed out of
@@ -64,6 +66,8 @@ module Karafka
64
66
 
65
67
  # Fetches messages within boundaries defined by the settings (time, size, topics, etc).
66
68
  #
69
+ # Also periodically runs the events polling to trigger events callbacks.
70
+ #
67
71
  # @return [Karafka::Connection::MessagesBuffer] messages buffer that holds messages per topic
68
72
  # partition
69
73
  # @note This method should not be executed from many threads at the same time
@@ -73,38 +77,46 @@ module Karafka
73
77
  @buffer.clear
74
78
  @rebalance_manager.clear
75
79
 
80
+ events_poll
81
+
76
82
  loop do
77
83
  time_poll.start
78
84
 
79
85
  # Don't fetch more messages if we do not have any time left
80
86
  break if time_poll.exceeded?
81
- # Don't fetch more messages if we've fetched max as we've wanted
87
+ # Don't fetch more messages if we've fetched max that we've wanted
82
88
  break if @buffer.size >= @subscription_group.max_messages
83
89
 
84
90
  # Fetch message within our time boundaries
85
- message = poll(time_poll.remaining)
91
+ response = poll(time_poll.remaining)
86
92
 
87
93
  # Put a message to the buffer if there is one
88
- @buffer << message if message
94
+ @buffer << response if response && response != :tick_time
89
95
 
90
96
  # Upon polling rebalance manager might have been updated.
91
97
  # If partition revocation happens, we need to remove messages from revoked partitions
92
98
  # as well as ensure we do not have duplicated due to the offset reset for partitions
93
99
  # that we got assigned
100
+ #
94
101
  # We also do early break, so the information about rebalance is used as soon as possible
95
102
  if @rebalance_manager.changed?
103
+ # Since rebalances do not occur often, we can run events polling as well without
104
+ # any throttling
105
+ events_poll
96
106
  remove_revoked_and_duplicated_messages
97
107
  break
98
108
  end
99
109
 
110
+ @events_poller.call
111
+
100
112
  # Track time spent on all of the processing and polling
101
113
  time_poll.checkpoint
102
114
 
103
115
  # Finally once we've (potentially) removed revoked, etc, if no messages were returned
104
- # we can break.
116
+ # and it was not an early poll exist, we can break.
105
117
  # Worth keeping in mind, that the rebalance manager might have been updated despite no
106
118
  # messages being returned during a poll
107
- break unless message
119
+ break unless response
108
120
  end
109
121
 
110
122
  @buffer
@@ -299,22 +311,38 @@ module Karafka
299
311
  def reset
300
312
  close
301
313
 
314
+ @events_poller.reset
302
315
  @closed = false
303
316
  @paused_tpls.clear
304
317
  @kafka = build_consumer
305
318
  end
306
319
 
307
- # Runs a single poll ignoring all the potential errors
320
+ # Runs a single poll on the main queue and consumer queue ignoring all the potential errors
308
321
  # This is used as a keep-alive in the shutdown stage and any errors that happen here are
309
322
  # irrelevant from the shutdown process perspective
310
323
  #
311
- # This is used only to trigger rebalance callbacks
324
+ # This is used only to trigger rebalance callbacks and other callbacks
312
325
  def ping
326
+ events_poll(100)
313
327
  poll(100)
314
328
  rescue Rdkafka::RdkafkaError
315
329
  nil
316
330
  end
317
331
 
332
+ # Triggers the rdkafka main queue events by consuming this queue. This is not the consumer
333
+ # consumption queue but the one with:
334
+ # - error callbacks
335
+ # - stats callbacks
336
+ # - OAUTHBEARER token refresh callbacks
337
+ #
338
+ # @param timeout [Integer] number of milliseconds to wait on events or 0 not to wait.
339
+ #
340
+ # @note It is non-blocking when timeout 0 and will not wait if queue empty. It costs up to
341
+ # 2ms when no callbacks are triggered.
342
+ def events_poll(timeout = 0)
343
+ @kafka.events_poll(timeout)
344
+ end
345
+
318
346
  private
319
347
 
320
348
  # When we cannot store an offset, it means we no longer own the partition
@@ -464,18 +492,52 @@ module Karafka
464
492
  @kafka.position(tpl).to_h.fetch(topic).first.offset || -1
465
493
  end
466
494
 
467
- # Performs a single poll operation and handles retries and error
495
+ # Performs a single poll operation and handles retries and errors
496
+ #
497
+ # Keep in mind, that this timeout will be limited by a tick interval value, because we cannot
498
+ # block on a single poll longer than that. Otherwise our events polling would not be able to
499
+ # run frequently enough. This means, that even if you provide big value, it will not block
500
+ # for that long. This is anyhow compensated by the `#batch_poll` that can run for extended
501
+ # period of time but will run events polling frequently while waiting for the requested total
502
+ # time.
468
503
  #
469
- # @param timeout [Integer] timeout for a single poll
470
- # @return [Rdkafka::Consumer::Message, nil] fetched message or nil if nothing polled
504
+ # @param timeout [Integer] timeout for a single poll.
505
+ # @return [Rdkafka::Consumer::Message, nil, Symbol] fetched message, nil if nothing polled
506
+ # within the time we had or symbol indicating the early return reason
471
507
  def poll(timeout)
472
508
  time_poll ||= TimeTrackers::Poll.new(timeout)
473
509
 
474
510
  return nil if time_poll.exceeded?
475
511
 
476
512
  time_poll.start
513
+ remaining = time_poll.remaining
514
+
515
+ # We should not run a single poll longer than the tick frequency. Otherwise during a single
516
+ # `#batch_poll` we would not be able to run `#events_poll` often enough effectively
517
+ # blocking events from being handled.
518
+ poll_tick = timeout > @tick_interval ? @tick_interval : timeout
519
+
520
+ result = @kafka.poll(poll_tick)
521
+
522
+ # If we've got a message, we can return it
523
+ return result if result
524
+
525
+ time_poll.checkpoint
526
+
527
+ # We need to check if we have used all the allocated time as depending on the outcome, the
528
+ # batch loop behavior will differ. Using all time means, that we had nothing to do as no
529
+ # messages were present but if we did not exceed total time, it means we can still try
530
+ # polling again as we are withing user expected max wait time
531
+ used = remaining - time_poll.remaining
532
+
533
+ # In case we did not use enough time, it means that an internal event occured that means
534
+ # that something has changed without messages being published. For example a rebalance.
535
+ # In cases like this we finish early as well
536
+ return nil if used < poll_tick
477
537
 
478
- @kafka.poll(timeout)
538
+ # If we did not exceed total time allocated, it means that we finished because of the
539
+ # tick interval time limitations and not because time run out without any data
540
+ time_poll.exceeded? ? nil : :tick_time
479
541
  rescue ::Rdkafka::RdkafkaError => e
480
542
  early_report = false
481
543
 
@@ -535,6 +597,10 @@ module Karafka
535
597
  ::Rdkafka::Config.logger = ::Karafka::App.config.logger
536
598
  config = ::Rdkafka::Config.new(@subscription_group.kafka)
537
599
  config.consumer_rebalance_listener = @rebalance_callback
600
+ # We want to manage the events queue independently from the messages queue. Thanks to that
601
+ # we can ensure, that we get statistics and errors often enough even when not polling
602
+ # new messages. This allows us to report statistics while data is still being processed
603
+ config.consumer_poll_set = false
538
604
 
539
605
  consumer = config.consumer
540
606
  @name = consumer.name
@@ -14,6 +14,12 @@ module Karafka
14
14
  # @return [String] id of this listener
15
15
  attr_reader :id
16
16
 
17
+ # How long to wait in the initial events poll. Increases chances of having the initial events
18
+ # immediately available
19
+ INITIAL_EVENTS_POLL_TIMEOUT = 100
20
+
21
+ private_constant :INITIAL_EVENTS_POLL_TIMEOUT
22
+
17
23
  # @param consumer_group_coordinator [Karafka::Connection::ConsumerGroupCoordinator]
18
24
  # @param subscription_group [Karafka::Routing::SubscriptionGroup]
19
25
  # @param jobs_queue [Karafka::Processing::JobsQueue] queue where we should push work
@@ -32,6 +38,7 @@ module Karafka
32
38
  @partitioner = proc_config.partitioner_class.new(subscription_group)
33
39
  # We reference scheduler here as it is much faster than fetching this each time
34
40
  @scheduler = proc_config.scheduler
41
+ @events_poller = Helpers::IntervalRunner.new { @client.events_poll }
35
42
  # We keep one buffer for messages to preserve memory and not allocate extra objects
36
43
  # We can do this that way because we always first schedule jobs using messages before we
37
44
  # fetch another batch.
@@ -84,6 +91,15 @@ module Karafka
84
91
  # Kafka connections / Internet connection issues / Etc. Business logic problems should not
85
92
  # propagate this far.
86
93
  def fetch_loop
94
+ # Run the initial events fetch to improve chances of having metrics and initial callbacks
95
+ # triggers on start.
96
+ #
97
+ # In theory this may slow down the initial boot but we limit it up to 100ms, so it should
98
+ # not have a big initial impact. It may not be enough but Karafka does not give the boot
99
+ # warranties of statistics or other callbacks being immediately available, hence this is
100
+ # a fair trade-off
101
+ @client.events_poll(INITIAL_EVENTS_POLL_TIMEOUT)
102
+
87
103
  # Run the main loop as long as we are not stopping or moving into quiet mode
88
104
  until Karafka::App.done?
89
105
  Karafka.monitor.instrument(
@@ -287,7 +303,7 @@ module Karafka
287
303
 
288
304
  # Waits for all the jobs from a given subscription group to finish before moving forward
289
305
  def wait
290
- @jobs_queue.wait(@subscription_group.id)
306
+ @jobs_queue.wait(@subscription_group.id) { @events_poller.call }
291
307
  end
292
308
 
293
309
  # Waits without blocking the polling
@@ -318,6 +334,7 @@ module Karafka
318
334
  # resetting.
319
335
  @jobs_queue.wait(@subscription_group.id)
320
336
  @jobs_queue.clear(@subscription_group.id)
337
+ @events_poller.reset
321
338
  @client.reset
322
339
  @coordinators.reset
323
340
  @executors = Processing::ExecutorsBuffer.new(@client, @subscription_group)
@@ -46,6 +46,9 @@ module Karafka
46
46
  nested(:internal) do
47
47
  required(:status) { |val| !val.nil? }
48
48
  required(:process) { |val| !val.nil? }
49
+ # In theory this could be less than a second, however this would impact the maximum time
50
+ # of a single consumer queue poll, hence we prevent it
51
+ required(:tick_interval) { |val| val.is_a?(Integer) && val >= 1_000 }
49
52
 
50
53
  nested(:connection) do
51
54
  nested(:proxy) do
@@ -0,0 +1,39 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Helpers
5
+ # Object responsible for running given code with a given interval. It won't run given code
6
+ # more often than with a given interval.
7
+ #
8
+ # This allows us to execute certain code only once in a while.
9
+ #
10
+ # This can be used when we have code that could be invoked often due to it being in loops
11
+ # or other places but would only slow things down if would run with each tick.
12
+ class IntervalRunner
13
+ include Karafka::Core::Helpers::Time
14
+
15
+ # @param interval [Integer] interval in ms for running the provided code. Defaults to the
16
+ # `internal.tick_interval` value
17
+ # @param block [Proc] block of code we want to run once in a while
18
+ def initialize(interval: ::Karafka::App.config.internal.tick_interval, &block)
19
+ @block = block
20
+ @interval = interval
21
+ @last_called_at = monotonic_now - @interval
22
+ end
23
+
24
+ # Runs the requested code if it was not executed previously recently
25
+ def call
26
+ return if monotonic_now - @last_called_at < @interval
27
+
28
+ @last_called_at = monotonic_now
29
+
30
+ @block.call
31
+ end
32
+
33
+ # Resets the runner, so next `#call` will run the underlying code
34
+ def reset
35
+ @last_called_at = monotonic_now - @interval
36
+ end
37
+ end
38
+ end
39
+ end
@@ -137,15 +137,7 @@ module Karafka
137
137
  def push_tags
138
138
  return unless Karafka.logger.respond_to?(:push_tags)
139
139
 
140
- # Older versions of ddtrace do not have the `#log_correlation` method, so we fallback
141
- # to the older method for tags
142
- tags = if client.respond_to?(:log_correlation)
143
- client.log_correlation
144
- else
145
- client.active_correlation.to_s
146
- end
147
-
148
- Karafka.logger.push_tags(tags)
140
+ Karafka.logger.push_tags(client.log_correlation)
149
141
  end
150
142
 
151
143
  # Pops datadog's tags from the logger
@@ -35,6 +35,8 @@ module Karafka
35
35
 
36
36
  alias statistics insights
37
37
  alias statistics? insights?
38
+ alias inline_insights insights
39
+ alias inline_insights? insights?
38
40
  end
39
41
  end
40
42
  end
@@ -21,9 +21,12 @@ module Karafka
21
21
  # We cannot use a single semaphore as it could potentially block in listeners that should
22
22
  # process with their data and also could unlock when a given group needs to remain locked
23
23
  @semaphores = Concurrent::Map.new do |h, k|
24
- h.compute_if_absent(k) { Queue.new }
24
+ # Ruby prior to 3.2 did not have queue with a timeout on `#pop`, that is why for those
25
+ # versions we use our custom queue wrapper
26
+ h.compute_if_absent(k) { RUBY_VERSION < '3.2' ? TimedQueue.new : Queue.new }
25
27
  end
26
28
 
29
+ @tick_interval = ::Karafka::App.config.internal.tick_interval
27
30
  @in_processing = Hash.new { |h, k| h[k] = [] }
28
31
 
29
32
  @mutex = Mutex.new
@@ -118,11 +121,19 @@ module Karafka
118
121
  # jobs from a given group are completed
119
122
  #
120
123
  # @param group_id [String] id of the group in which jobs we're interested.
124
+ # @yieldparam [Block] block we want to run before each pop (in case of Ruby pre 3.2) or
125
+ # before each pop and on every tick interval.
126
+ # This allows us to run extra code that needs to be executed even when we are waiting on
127
+ # the work to be finished.
121
128
  # @note This method is blocking.
122
129
  def wait(group_id)
123
130
  # Go doing other things while we cannot process and wait for anyone to finish their work
124
131
  # and re-check the wait status
125
- @semaphores[group_id].pop while wait?(group_id)
132
+ while wait?(group_id)
133
+ yield if block_given?
134
+
135
+ @semaphores[group_id].pop(timeout: @tick_interval / 1_000.0)
136
+ end
126
137
  end
127
138
 
128
139
  # - `busy` - number of jobs that are currently being processed (active work)
@@ -0,0 +1,62 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Processing
5
+ # Minimal queue with timeout for Ruby 3.1 and lower.
6
+ #
7
+ # It is needed because only since 3.2, Ruby has a timeout on `#pop`
8
+ class TimedQueue
9
+ include Karafka::Core::Helpers::Time
10
+
11
+ def initialize
12
+ @queue = Queue.new
13
+ @mutex = Thread::Mutex.new
14
+ @resource = Thread::ConditionVariable.new
15
+ end
16
+
17
+ # Adds element to the queue
18
+ #
19
+ # @param obj [Object] pushes an element onto the queue
20
+ def push(obj)
21
+ @mutex.synchronize do
22
+ @queue << obj
23
+ @resource.broadcast
24
+ end
25
+ end
26
+
27
+ alias << push
28
+
29
+ # No timeout means waiting up to 31 years
30
+ #
31
+ # @param timeout [Integer] max number of seconds to wait on the pop
32
+ # @return [Object] element inserted on the array or `nil` on timeout
33
+ #
34
+ # @note We use timeout in seconds because this is how Ruby 3.2+ works and we want to have
35
+ # the same API for newer and older Ruby versions
36
+ def pop(timeout: 10_000_000_000)
37
+ deadline = monotonic_now + timeout * 1000
38
+
39
+ @mutex.synchronize do
40
+ loop do
41
+ return @queue.pop unless @queue.empty?
42
+ return @queue.pop if @queue.closed?
43
+
44
+ to_wait = (deadline - monotonic_now) / 1_000.0
45
+
46
+ return nil if to_wait <= 0
47
+
48
+ @resource.wait(@mutex, to_wait)
49
+ end
50
+ end
51
+ end
52
+
53
+ # Closes the internal queue and releases the lock
54
+ def close
55
+ @mutex.synchronize do
56
+ @queue.close
57
+ @resource.broadcast
58
+ end
59
+ end
60
+ end
61
+ end
62
+ end
@@ -152,6 +152,17 @@ module Karafka
152
152
  # instances
153
153
  setting :process, default: Process.new
154
154
 
155
+ # Interval of "ticking". This is used to define the maximum time between consecutive
156
+ # polling of the main rdkafka queue. It should match also the `statistics.interval.ms`
157
+ # smallest value defined in any of the per-kafka settings, so metrics are published with
158
+ # the desired frequency. It is set to 5 seconds because `statistics.interval.ms` is also
159
+ # set to five seconds.
160
+ #
161
+ # It is NOT allowed to set it to a value less than 1 seconds because it could cause polling
162
+ # not to have enough time to run. This (not directly) defines also a single poll
163
+ # max timeout as to allow for frequent enough events polling
164
+ setting :tick_interval, default: 5_000
165
+
155
166
  # Namespace for CLI related settings
156
167
  setting :cli do
157
168
  # option contract [Object] cli setup validation contract (in the context of options and
@@ -3,5 +3,5 @@
3
3
  # Main module namespace
4
4
  module Karafka
5
5
  # Current Karafka version
6
- VERSION = '2.2.11'
6
+ VERSION = '2.2.12'
7
7
  end
data.tar.gz.sig CHANGED
Binary file
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: karafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.2.11
4
+ version: 2.2.12
5
5
  platform: ruby
6
6
  authors:
7
7
  - Maciej Mensfeld
@@ -35,7 +35,7 @@ cert_chain:
35
35
  AnG1dJU+yL2BK7vaVytLTstJME5mepSZ46qqIJXMuWob/YPDmVaBF39TDSG9e34s
36
36
  msG3BiCqgOgHAnL23+CN3Rt8MsuRfEtoTKpJVcCfoEoNHOkc
37
37
  -----END CERTIFICATE-----
38
- date: 2023-11-03 00:00:00.000000000 Z
38
+ date: 2023-11-09 00:00:00.000000000 Z
39
39
  dependencies:
40
40
  - !ruby/object:Gem::Dependency
41
41
  name: karafka-core
@@ -43,7 +43,7 @@ dependencies:
43
43
  requirements:
44
44
  - - ">="
45
45
  - !ruby/object:Gem::Version
46
- version: 2.2.6
46
+ version: 2.2.7
47
47
  - - "<"
48
48
  - !ruby/object:Gem::Version
49
49
  version: 2.3.0
@@ -53,7 +53,7 @@ dependencies:
53
53
  requirements:
54
54
  - - ">="
55
55
  - !ruby/object:Gem::Version
56
- version: 2.2.6
56
+ version: 2.2.7
57
57
  - - "<"
58
58
  - !ruby/object:Gem::Version
59
59
  version: 2.3.0
@@ -63,7 +63,7 @@ dependencies:
63
63
  requirements:
64
64
  - - ">="
65
65
  - !ruby/object:Gem::Version
66
- version: 2.6.10
66
+ version: 2.6.11
67
67
  - - "<"
68
68
  - !ruby/object:Gem::Version
69
69
  version: 3.0.0
@@ -73,7 +73,7 @@ dependencies:
73
73
  requirements:
74
74
  - - ">="
75
75
  - !ruby/object:Gem::Version
76
- version: 2.6.10
76
+ version: 2.6.11
77
77
  - - "<"
78
78
  - !ruby/object:Gem::Version
79
79
  version: 3.0.0
@@ -181,6 +181,7 @@ files:
181
181
  - lib/karafka/errors.rb
182
182
  - lib/karafka/helpers/async.rb
183
183
  - lib/karafka/helpers/colorize.rb
184
+ - lib/karafka/helpers/interval_runner.rb
184
185
  - lib/karafka/helpers/multi_delegator.rb
185
186
  - lib/karafka/instrumentation/callbacks/error.rb
186
187
  - lib/karafka/instrumentation/callbacks/rebalance.rb
@@ -374,6 +375,7 @@ files:
374
375
  - lib/karafka/processing/strategies/dlq_mom.rb
375
376
  - lib/karafka/processing/strategies/mom.rb
376
377
  - lib/karafka/processing/strategy_selector.rb
378
+ - lib/karafka/processing/timed_queue.rb
377
379
  - lib/karafka/processing/worker.rb
378
380
  - lib/karafka/processing/workers_batch.rb
379
381
  - lib/karafka/railtie.rb
metadata.gz.sig CHANGED
Binary file