karafka 2.0.0.alpha5 → 2.0.0.beta2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.ruby-version +1 -1
  4. data/CHANGELOG.md +35 -2
  5. data/Gemfile.lock +6 -6
  6. data/bin/integrations +55 -43
  7. data/config/errors.yml +1 -0
  8. data/docker-compose.yml +4 -1
  9. data/lib/active_job/karafka.rb +2 -2
  10. data/lib/karafka/active_job/routing/extensions.rb +21 -0
  11. data/lib/karafka/base_consumer.rb +65 -12
  12. data/lib/karafka/connection/client.rb +36 -6
  13. data/lib/karafka/connection/listener.rb +92 -27
  14. data/lib/karafka/connection/listeners_batch.rb +24 -0
  15. data/lib/karafka/connection/messages_buffer.rb +49 -22
  16. data/lib/karafka/connection/pauses_manager.rb +2 -2
  17. data/lib/karafka/connection/raw_messages_buffer.rb +101 -0
  18. data/lib/karafka/connection/rebalance_manager.rb +35 -20
  19. data/lib/karafka/contracts/config.rb +8 -0
  20. data/lib/karafka/helpers/async.rb +33 -0
  21. data/lib/karafka/instrumentation/monitor.rb +2 -1
  22. data/lib/karafka/messages/batch_metadata.rb +26 -3
  23. data/lib/karafka/messages/builders/batch_metadata.rb +17 -29
  24. data/lib/karafka/messages/builders/message.rb +1 -0
  25. data/lib/karafka/messages/builders/messages.rb +4 -12
  26. data/lib/karafka/pro/active_job/consumer.rb +21 -0
  27. data/lib/karafka/pro/active_job/dispatcher.rb +10 -10
  28. data/lib/karafka/pro/active_job/job_options_contract.rb +9 -9
  29. data/lib/karafka/pro/loader.rb +17 -8
  30. data/lib/karafka/pro/performance_tracker.rb +80 -0
  31. data/lib/karafka/pro/processing/jobs/consume_non_blocking.rb +38 -0
  32. data/lib/karafka/pro/scheduler.rb +54 -0
  33. data/lib/karafka/processing/executor.rb +19 -11
  34. data/lib/karafka/processing/executors_buffer.rb +15 -7
  35. data/lib/karafka/processing/jobs/base.rb +28 -0
  36. data/lib/karafka/processing/jobs/consume.rb +11 -4
  37. data/lib/karafka/processing/jobs_queue.rb +28 -16
  38. data/lib/karafka/processing/worker.rb +30 -9
  39. data/lib/karafka/processing/workers_batch.rb +5 -0
  40. data/lib/karafka/railtie.rb +12 -0
  41. data/lib/karafka/routing/consumer_group.rb +1 -1
  42. data/lib/karafka/routing/subscription_group.rb +1 -1
  43. data/lib/karafka/routing/subscription_groups_builder.rb +3 -2
  44. data/lib/karafka/routing/topics.rb +38 -0
  45. data/lib/karafka/runner.rb +19 -27
  46. data/lib/karafka/scheduler.rb +20 -0
  47. data/lib/karafka/server.rb +24 -23
  48. data/lib/karafka/setup/config.rb +4 -1
  49. data/lib/karafka/time_trackers/pause.rb +10 -2
  50. data/lib/karafka/version.rb +1 -1
  51. data.tar.gz.sig +0 -0
  52. metadata +13 -4
  53. metadata.gz.sig +0 -0
  54. data/lib/karafka/active_job/routing_extensions.rb +0 -18
@@ -12,7 +12,7 @@ module Karafka
12
12
  class JobsQueue
13
13
  # @return [Karafka::Processing::JobsQueue]
14
14
  def initialize
15
- @queue = ::Queue.new
15
+ @queue = Queue.new
16
16
  # Those queues will act as a semaphores internally. Since we need an indicator for waiting
17
17
  # we could use Thread.pass but this is expensive. Instead we can just lock until any
18
18
  # of the workers finishes their work and we can re-check. This means that in the worse
@@ -21,7 +21,7 @@ module Karafka
21
21
  # We cannot use a single semaphore as it could potentially block in listeners that should
22
22
  # process with their data and also could unlock when a given group needs to remain locked
23
23
  @semaphores = Hash.new { |h, k| h[k] = Queue.new }
24
- @in_processing = Hash.new { |h, k| h[k] = {} }
24
+ @in_processing = Hash.new { |h, k| h[k] = [] }
25
25
  @mutex = Mutex.new
26
26
  end
27
27
 
@@ -44,9 +44,9 @@ module Karafka
44
44
  @mutex.synchronize do
45
45
  group = @in_processing[job.group_id]
46
46
 
47
- raise(Errors::JobsQueueSynchronizationError, job.group_id) if group.key?(job.id)
47
+ raise(Errors::JobsQueueSynchronizationError, job.group_id) if group.include?(job)
48
48
 
49
- group[job.id] = true
49
+ group << job
50
50
  end
51
51
 
52
52
  @queue << job
@@ -60,14 +60,21 @@ module Karafka
60
60
  @queue.pop
61
61
  end
62
62
 
63
+ # Causes the wait lock to re-check the lock conditions and potential unlock.
64
+ # @param group_id [String] id of the group we want to unlock for one tick
65
+ # @note This does not release the wait lock. It just causes a conditions recheck
66
+ def tick(group_id)
67
+ @semaphores[group_id] << true
68
+ end
69
+
63
70
  # Marks a given job from a given group as completed. When there are no more jobs from a given
64
71
  # group to be executed, we won't wait.
65
72
  #
66
73
  # @param [Jobs::Base] job that was completed
67
74
  def complete(job)
68
75
  @mutex.synchronize do
69
- @in_processing[job.group_id].delete(job.id)
70
- @semaphores[job.group_id] << true
76
+ @in_processing[job.group_id].delete(job)
77
+ tick(job.group_id)
71
78
  end
72
79
  end
73
80
 
@@ -79,7 +86,7 @@ module Karafka
79
86
  @mutex.synchronize do
80
87
  @in_processing[group_id].clear
81
88
  # We unlock it just in case it was blocked when clearing started
82
- @semaphores[group_id] << true
89
+ tick(group_id)
83
90
  end
84
91
  end
85
92
 
@@ -93,8 +100,17 @@ module Karafka
93
100
  end
94
101
  end
95
102
 
96
- # Blocks when there are things in the queue in a given group and waits until all the jobs
97
- # from a given group are completed
103
+ # @param group_id [String]
104
+ #
105
+ # @return [Boolean] tell us if we have anything in the processing (or for processing) from
106
+ # a given group.
107
+ def empty?(group_id)
108
+ @in_processing[group_id].empty?
109
+ end
110
+
111
+ # Blocks when there are things in the queue in a given group and waits until all the blocking
112
+ # jobs from a given group are completed
113
+ #
98
114
  # @param group_id [String] id of the group in which jobs we're interested.
99
115
  # @note This method is blocking.
100
116
  def wait(group_id)
@@ -107,14 +123,10 @@ module Karafka
107
123
 
108
124
  # @param group_id [String] id of the group in which jobs we're interested.
109
125
  # @return [Boolean] should we keep waiting or not
126
+ # @note We do not wait for non-blocking jobs. Their flow should allow for `poll` running
127
+ # as they may exceed `max.poll.interval`
110
128
  def wait?(group_id)
111
- # If it is stopping, all the previous messages that are processed at the moment need to
112
- # finish. Otherwise we may risk closing the client and committing offsets afterwards
113
- return false if Karafka::App.stopping? && @in_processing[group_id].empty?
114
- return false if @queue.closed?
115
- return false if @in_processing[group_id].empty?
116
-
117
- true
129
+ !@in_processing[group_id].all?(&:non_blocking?)
118
130
  end
119
131
  end
120
132
  end
@@ -4,25 +4,35 @@ module Karafka
4
4
  module Processing
5
5
  # Workers are used to run jobs in separate threads.
6
6
  # Workers are the main processing units of the Karafka framework.
7
+ #
8
+ # Each job runs in three stages:
9
+ # - prepare - here we can run any code that we would need to run blocking before we allow
10
+ # the job to run fully async (non blocking). This will always run in a blocking
11
+ # way and can be used to make sure all the resources and external dependencies
12
+ # are satisfied before going async.
13
+ #
14
+ # - call - actual processing logic that can run sync or async
15
+ #
16
+ # - teardown - it should include any code that we want to run after we executed the user
17
+ # code. This can be used to unlock certain resources or do other things that are
18
+ # not user code but need to run after user code base is executed.
7
19
  class Worker
8
- extend Forwardable
9
-
10
- def_delegators :@thread, :join, :terminate, :alive?
20
+ include Helpers::Async
11
21
 
12
22
  # @param jobs_queue [JobsQueue]
13
23
  # @return [Worker]
14
24
  def initialize(jobs_queue)
15
25
  @jobs_queue = jobs_queue
16
- @thread = Thread.new do
17
- # If anything goes wrong in this worker thread, it means something went really wrong and
18
- # we should terminate.
19
- Thread.current.abort_on_exception = true
20
- loop { break unless process }
21
- end
22
26
  end
23
27
 
24
28
  private
25
29
 
30
+ # Runs processing of jobs in a loop
31
+ # Stops when queue is closed.
32
+ def call
33
+ loop { break unless process }
34
+ end
35
+
26
36
  # Fetches a single job, processes it and marks as completed.
27
37
  #
28
38
  # @note We do not have error handling here, as no errors should propagate this far. If they
@@ -33,7 +43,18 @@ module Karafka
33
43
  job = @jobs_queue.pop
34
44
 
35
45
  if job
46
+ job.prepare
47
+
48
+ # If a job is marked as non blocking, we can run a tick in the job queue and if there
49
+ # are no other blocking factors, the job queue will be unlocked.
50
+ # If this does not run, all the things will be blocking and job queue won't allow to
51
+ # pass it until done.
52
+ @jobs_queue.tick(job.group_id) if job.non_blocking?
53
+
36
54
  job.call
55
+
56
+ job.teardown
57
+
37
58
  true
38
59
  else
39
60
  false
@@ -17,6 +17,11 @@ module Karafka
17
17
  def each(&block)
18
18
  @batch.each(&block)
19
19
  end
20
+
21
+ # @return [Integer] number of workers in the batch
22
+ def size
23
+ @batch.size
24
+ end
20
25
  end
21
26
  end
22
27
  end
@@ -82,8 +82,20 @@ if rails
82
82
  initializer 'karafka.require_karafka_boot_file' do |app|
83
83
  rails6plus = Rails.gem_version >= Gem::Version.new('6.0.0')
84
84
 
85
+ # If the boot file location is set to "false", we should not raise an exception and we
86
+ # should just not load karafka stuff. Setting this explicitly to false indicates, that
87
+ # karafka is part of the supply chain but it is not a first class citizen of a given
88
+ # system (may be just a dependency of a dependency), thus railtie should not kick in to
89
+ # load the non-existing boot file
90
+ next if Karafka.boot_file.to_s == 'false'
91
+
85
92
  karafka_boot_file = Rails.root.join(Karafka.boot_file.to_s).to_s
86
93
 
94
+ # Provide more comprehensive error for when no boot file
95
+ unless File.exist?(karafka_boot_file)
96
+ raise(Karafka::Errors::MissingBootFileError, karafka_boot_file)
97
+ end
98
+
87
99
  if rails6plus
88
100
  app.reloader.to_prepare do
89
101
  # Load Karafka boot file, so it can be used in Rails server context
@@ -17,7 +17,7 @@ module Karafka
17
17
  def initialize(name)
18
18
  @name = name
19
19
  @id = Karafka::App.config.consumer_mapper.call(name)
20
- @topics = []
20
+ @topics = Topics.new([])
21
21
  end
22
22
 
23
23
  # @return [Boolean] true if this consumer group should be active in our current process
@@ -10,7 +10,7 @@ module Karafka
10
10
  class SubscriptionGroup
11
11
  attr_reader :id, :topics
12
12
 
13
- # @param topics [Array<Topic>] all the topics that share the same key settings
13
+ # @param topics [Karafka::Routing::Topics] all the topics that share the same key settings
14
14
  # @return [SubscriptionGroup] built subscription group
15
15
  def initialize(topics)
16
16
  @id = SecureRandom.uuid
@@ -23,8 +23,8 @@ module Karafka
23
23
 
24
24
  private_constant :DISTRIBUTION_KEYS
25
25
 
26
- # @param topics [Array<Topic>] array with topics based on which we want to build subscription
27
- # groups
26
+ # @param topics [Karafka::Routing::Topics] all the topics based on which we want to build
27
+ # subscription groups
28
28
  # @return [Array<SubscriptionGroup>] all subscription groups we need in separate threads
29
29
  def call(topics)
30
30
  topics
@@ -32,6 +32,7 @@ module Karafka
32
32
  .group_by(&:first)
33
33
  .values
34
34
  .map { |value| value.map(&:last) }
35
+ .map { |topics_array| Routing::Topics.new(topics_array) }
35
36
  .map { |grouped_topics| SubscriptionGroup.new(grouped_topics) }
36
37
  end
37
38
 
@@ -0,0 +1,38 @@
1
+ # frozen_string_literal: true
2
+
3
+ # frozen_string_literal: true
4
+
5
+ module Karafka
6
+ module Routing
7
+ # Abstraction layer on top of groups of topics
8
+ class Topics
9
+ include Enumerable
10
+ extend Forwardable
11
+
12
+ def_delegators :@accumulator, :[], :size, :empty?, :last, :<<
13
+
14
+ # @param topics_array [Array<Karafka::Routing::Topic>] array with topics
15
+ def initialize(topics_array)
16
+ @accumulator = topics_array.dup
17
+ end
18
+
19
+ # Yields each topic
20
+ #
21
+ # @param [Proc] block we want to yield with on each topic
22
+ def each(&block)
23
+ @accumulator.each(&block)
24
+ end
25
+
26
+ # Finds topic by its name
27
+ #
28
+ # @param topic_name [String] topic name
29
+ # @return [Karafka::Routing::Topic]
30
+ # @raise [Karafka::Errors::TopicNotFoundError] this should never happen. If you see it,
31
+ # please create an issue.
32
+ def find(topic_name)
33
+ @accumulator.find { |topic| topic.name == topic_name } ||
34
+ raise(Karafka::Errors::TopicNotFoundError, topic_name)
35
+ end
36
+ end
37
+ end
38
+ end
@@ -3,32 +3,37 @@
3
3
  module Karafka
4
4
  # Class used to run the Karafka listeners in separate threads
5
5
  class Runner
6
- # Starts listening on all the listeners asynchronously
7
- # Fetch loop should never end. If they do, it is a critical error
6
+ # Starts listening on all the listeners asynchronously and handles the jobs queue closing
7
+ # after listeners are done with their work.
8
8
  def call
9
9
  # Despite possibility of having several independent listeners, we aim to have one queue for
10
10
  # jobs across and one workers poll for that
11
11
  jobs_queue = Processing::JobsQueue.new
12
12
 
13
13
  workers = Processing::WorkersBatch.new(jobs_queue)
14
- Karafka::Server.workers = workers
14
+ listeners = Connection::ListenersBatch.new(jobs_queue)
15
15
 
16
- threads = listeners(jobs_queue).map do |listener|
17
- # We abort on exception because there should be an exception handling developed for
18
- # each listener running in separate threads, so the exceptions should never leak
19
- # and if that happens, it means that something really bad happened and we should stop
20
- # the whole process
21
- Thread
22
- .new { listener.call }
23
- .tap { |thread| thread.abort_on_exception = true }
24
- end
16
+ workers.each(&:async_call)
17
+ listeners.each(&:async_call)
25
18
 
26
19
  # We aggregate threads here for a supervised shutdown process
27
- Karafka::Server.consumer_threads = threads
20
+ Karafka::Server.workers = workers
21
+ Karafka::Server.listeners = listeners
28
22
 
29
23
  # All the listener threads need to finish
30
- threads.each(&:join)
24
+ listeners.each(&:join)
25
+
26
+ # We close the jobs queue only when no listener threads are working.
27
+ # This ensures, that everything was closed prior to us not accepting anymore jobs and that
28
+ # no more jobs will be enqueued. Since each listener waits for jobs to finish, once those
29
+ # are done, we can close.
30
+ jobs_queue.close
31
+
31
32
  # All the workers need to stop processing anything before we can stop the runner completely
33
+ # This ensures that even async long-running jobs have time to finish before we are done
34
+ # with everything. One thing worth keeping in mind though: It is the end user responsibility
35
+ # to handle the shutdown detection in their long-running processes. Otherwise if timeout
36
+ # is exceeded, there will be a forced shutdown.
32
37
  workers.each(&:join)
33
38
  # If anything crashes here, we need to raise the error and crush the runner because it means
34
39
  # that something terrible happened
@@ -42,18 +47,5 @@ module Karafka
42
47
  Karafka::App.stop!
43
48
  raise e
44
49
  end
45
-
46
- private
47
-
48
- # @param jobs_queue [Processing::JobsQueue] the main processing queue
49
- # @return [Array<Karafka::Connection::Listener>] listeners that will consume messages for each
50
- # of the subscription groups
51
- def listeners(jobs_queue)
52
- App
53
- .subscription_groups
54
- .map do |subscription_group|
55
- Karafka::Connection::Listener.new(subscription_group, jobs_queue)
56
- end
57
- end
58
50
  end
59
51
  end
@@ -0,0 +1,20 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ # FIFO scheduler for messages coming from various topics and partitions
5
+ class Scheduler
6
+ # Schedules jobs in the fifo order
7
+ #
8
+ # @param queue [Karafka::Processing::JobsQueue] queue where we want to put the jobs
9
+ # @param jobs_array [Array<Karafka::Processing::Jobs::Base>] jobs we want to schedule
10
+ def schedule_consumption(queue, jobs_array)
11
+ jobs_array.each do |job|
12
+ queue << job
13
+ end
14
+ end
15
+
16
+ # Both revocation and shutdown jobs can also run in fifo by default
17
+ alias schedule_revocation schedule_consumption
18
+ alias schedule_shutdown schedule_consumption
19
+ end
20
+ end
@@ -15,7 +15,7 @@ module Karafka
15
15
 
16
16
  class << self
17
17
  # Set of consuming threads. Each consumer thread contains a single consumer
18
- attr_accessor :consumer_threads
18
+ attr_accessor :listeners
19
19
 
20
20
  # Set of workers
21
21
  attr_accessor :workers
@@ -25,9 +25,12 @@ module Karafka
25
25
 
26
26
  # Method which runs app
27
27
  def run
28
- process.on_sigint { stop }
29
- process.on_sigquit { stop }
30
- process.on_sigterm { stop }
28
+ # Since we do a lot of threading and queuing, we don't want to stop from the trap context
29
+ # as some things may not work there as expected, that is why we spawn a separate thread to
30
+ # handle the stopping process
31
+ process.on_sigint { Thread.new { stop } }
32
+ process.on_sigquit { Thread.new { stop } }
33
+ process.on_sigterm { Thread.new { stop } }
31
34
 
32
35
  # Start is blocking until stop is called and when we stop, it will wait until
33
36
  # all of the things are ready to stop
@@ -35,6 +38,8 @@ module Karafka
35
38
 
36
39
  # We always need to wait for Karafka to stop here since we should wait for the stop running
37
40
  # in a separate thread (or trap context) to indicate everything is closed
41
+ # Since `#start` is blocking, we were get here only after the runner is done. This will
42
+ # not add any performance degradation because of that.
38
43
  Thread.pass until Karafka::App.stopped?
39
44
  # Try its best to shutdown underlying components before re-raising
40
45
  # rubocop:disable Lint/RescueException
@@ -70,16 +75,16 @@ module Karafka
70
75
  def stop
71
76
  Karafka::App.stop!
72
77
 
73
- timeout = Thread.new { Karafka::App.config.shutdown_timeout }.join.value
78
+ timeout = Karafka::App.config.shutdown_timeout
74
79
 
75
80
  # We check from time to time (for the timeout period) if all the threads finished
76
81
  # their work and if so, we can just return and normal shutdown process will take place
77
82
  # We divide it by 1000 because we use time in ms.
78
83
  ((timeout / 1_000) * SUPERVISION_CHECK_FACTOR).to_i.times do
79
- if consumer_threads.count(&:alive?).zero? &&
84
+ if listeners.count(&:alive?).zero? &&
80
85
  workers.count(&:alive?).zero?
81
86
 
82
- Thread.new { Karafka::App.producer.close }.join
87
+ Karafka::App.producer.close
83
88
 
84
89
  return
85
90
  end
@@ -89,22 +94,18 @@ module Karafka
89
94
 
90
95
  raise Errors::ForcefulShutdownError
91
96
  rescue Errors::ForcefulShutdownError => e
92
- thread = Thread.new do
93
- Karafka.monitor.instrument(
94
- 'error.occurred',
95
- caller: self,
96
- error: e,
97
- type: 'app.stopping.error'
98
- )
99
-
100
- # We're done waiting, lets kill them!
101
- workers.each(&:terminate)
102
- consumer_threads.each(&:terminate)
103
-
104
- Karafka::App.producer.close
105
- end
106
-
107
- thread.join
97
+ Karafka.monitor.instrument(
98
+ 'error.occurred',
99
+ caller: self,
100
+ error: e,
101
+ type: 'app.stopping.error'
102
+ )
103
+
104
+ # We're done waiting, lets kill them!
105
+ workers.each(&:terminate)
106
+ listeners.each(&:terminate)
107
+
108
+ Karafka::App.producer.close
108
109
 
109
110
  # exit! is not within the instrumentation as it would not trigger due to exit
110
111
  Kernel.exit! FORCEFUL_EXIT_CODE
@@ -60,7 +60,7 @@ module Karafka
60
60
  # option [Boolean] should we leave offset management to the user
61
61
  setting :manual_offset_management, default: false
62
62
  # options max_messages [Integer] how many messages do we want to fetch from Kafka in one go
63
- setting :max_messages, default: 100_000
63
+ setting :max_messages, default: 1_000
64
64
  # option [Integer] number of milliseconds we can wait while fetching data
65
65
  setting :max_wait_time, default: 10_000
66
66
  # option shutdown_timeout [Integer] the number of milliseconds after which Karafka no
@@ -96,6 +96,8 @@ module Karafka
96
96
  # option subscription_groups_builder [Routing::SubscriptionGroupsBuilder] subscription
97
97
  # group builder
98
98
  setting :subscription_groups_builder, default: Routing::SubscriptionGroupsBuilder.new
99
+ # option scheduler [Class] scheduler we will be using
100
+ setting :scheduler, default: Scheduler.new
99
101
 
100
102
  # Karafka components for ActiveJob
101
103
  setting :active_job do
@@ -115,6 +117,7 @@ module Karafka
115
117
  def setup(&block)
116
118
  configure(&block)
117
119
  merge_kafka_defaults!(config)
120
+
118
121
  Contracts::Config.new.validate!(config.to_h)
119
122
 
120
123
  # Check the license presence (if needed) and
@@ -41,9 +41,12 @@ module Karafka
41
41
 
42
42
  # Pauses the processing from now till the end of the interval (backoff or non-backoff)
43
43
  # and records the count.
44
- def pause
44
+ # @param timeout [Integer] timeout value in milliseconds that overwrites the default timeout
45
+ # @note Providing this value can be useful when we explicitly want to pause for a certain
46
+ # period of time, outside of any regular pausing logic
47
+ def pause(timeout = backoff_interval)
45
48
  @started_at = now
46
- @ends_at = @started_at + backoff_interval
49
+ @ends_at = @started_at + timeout
47
50
  @count += 1
48
51
  end
49
52
 
@@ -53,6 +56,11 @@ module Karafka
53
56
  @ends_at = nil
54
57
  end
55
58
 
59
+ # Expires the pause, so it can be considered expired
60
+ def expire
61
+ @ends_at = nil
62
+ end
63
+
56
64
  # @return [Boolean] are we paused from processing
57
65
  def paused?
58
66
  !@started_at.nil?
@@ -3,5 +3,5 @@
3
3
  # Main module namespace
4
4
  module Karafka
5
5
  # Current Karafka version
6
- VERSION = '2.0.0.alpha5'
6
+ VERSION = '2.0.0.beta2'
7
7
  end
data.tar.gz.sig CHANGED
Binary file
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: karafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.0.0.alpha5
4
+ version: 2.0.0.beta2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Maciej Mensfeld
@@ -34,7 +34,7 @@ cert_chain:
34
34
  R2P11bWoCtr70BsccVrN8jEhzwXngMyI2gVt750Y+dbTu1KgRqZKp/ECe7ZzPzXj
35
35
  pIy9vHxTANKYVyI4qj8OrFdEM5BQNu8oQpL0iQ==
36
36
  -----END CERTIFICATE-----
37
- date: 2022-04-03 00:00:00.000000000 Z
37
+ date: 2022-06-07 00:00:00.000000000 Z
38
38
  dependencies:
39
39
  - !ruby/object:Gem::Dependency
40
40
  name: dry-configurable
@@ -184,7 +184,7 @@ files:
184
184
  - lib/karafka/active_job/dispatcher.rb
185
185
  - lib/karafka/active_job/job_extensions.rb
186
186
  - lib/karafka/active_job/job_options_contract.rb
187
- - lib/karafka/active_job/routing_extensions.rb
187
+ - lib/karafka/active_job/routing/extensions.rb
188
188
  - lib/karafka/app.rb
189
189
  - lib/karafka/base_consumer.rb
190
190
  - lib/karafka/cli.rb
@@ -195,8 +195,10 @@ files:
195
195
  - lib/karafka/cli/server.rb
196
196
  - lib/karafka/connection/client.rb
197
197
  - lib/karafka/connection/listener.rb
198
+ - lib/karafka/connection/listeners_batch.rb
198
199
  - lib/karafka/connection/messages_buffer.rb
199
200
  - lib/karafka/connection/pauses_manager.rb
201
+ - lib/karafka/connection/raw_messages_buffer.rb
200
202
  - lib/karafka/connection/rebalance_manager.rb
201
203
  - lib/karafka/contracts.rb
202
204
  - lib/karafka/contracts/base.rb
@@ -206,6 +208,7 @@ files:
206
208
  - lib/karafka/contracts/server_cli_options.rb
207
209
  - lib/karafka/env.rb
208
210
  - lib/karafka/errors.rb
211
+ - lib/karafka/helpers/async.rb
209
212
  - lib/karafka/helpers/multi_delegator.rb
210
213
  - lib/karafka/instrumentation.rb
211
214
  - lib/karafka/instrumentation/callbacks/error.rb
@@ -225,9 +228,13 @@ files:
225
228
  - lib/karafka/messages/seek.rb
226
229
  - lib/karafka/patches/rdkafka/consumer.rb
227
230
  - lib/karafka/pro.rb
231
+ - lib/karafka/pro/active_job/consumer.rb
228
232
  - lib/karafka/pro/active_job/dispatcher.rb
229
233
  - lib/karafka/pro/active_job/job_options_contract.rb
230
234
  - lib/karafka/pro/loader.rb
235
+ - lib/karafka/pro/performance_tracker.rb
236
+ - lib/karafka/pro/processing/jobs/consume_non_blocking.rb
237
+ - lib/karafka/pro/scheduler.rb
231
238
  - lib/karafka/process.rb
232
239
  - lib/karafka/processing/executor.rb
233
240
  - lib/karafka/processing/executors_buffer.rb
@@ -247,7 +254,9 @@ files:
247
254
  - lib/karafka/routing/subscription_group.rb
248
255
  - lib/karafka/routing/subscription_groups_builder.rb
249
256
  - lib/karafka/routing/topic.rb
257
+ - lib/karafka/routing/topics.rb
250
258
  - lib/karafka/runner.rb
259
+ - lib/karafka/scheduler.rb
251
260
  - lib/karafka/serialization/json/deserializer.rb
252
261
  - lib/karafka/server.rb
253
262
  - lib/karafka/setup/config.rb
@@ -282,7 +291,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
282
291
  - !ruby/object:Gem::Version
283
292
  version: 1.3.1
284
293
  requirements: []
285
- rubygems_version: 3.3.4
294
+ rubygems_version: 3.3.7
286
295
  signing_key:
287
296
  specification_version: 4
288
297
  summary: Ruby based framework for working with Apache Kafka
metadata.gz.sig CHANGED
Binary file
@@ -1,18 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module Karafka
4
- # ActiveJob related Karafka stuff
5
- module ActiveJob
6
- # Routing extensions for ActiveJob
7
- module RoutingExtensions
8
- # This method simplifies routes definition for ActiveJob topics / queues by auto-injecting
9
- # the consumer class
10
- # @param name [String, Symbol] name of the topic where ActiveJobs jobs should go
11
- def active_job_topic(name)
12
- topic(name) do
13
- consumer App.config.internal.active_job.consumer
14
- end
15
- end
16
- end
17
- end
18
- end