karafka 2.3.4 → 2.4.0.beta1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (126) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.github/workflows/ci.yml +12 -38
  4. data/CHANGELOG.md +56 -2
  5. data/Gemfile +6 -3
  6. data/Gemfile.lock +25 -23
  7. data/bin/integrations +1 -1
  8. data/config/locales/errors.yml +21 -2
  9. data/config/locales/pro_errors.yml +16 -1
  10. data/karafka.gemspec +4 -2
  11. data/lib/active_job/queue_adapters/karafka_adapter.rb +2 -0
  12. data/lib/karafka/admin/configs/config.rb +81 -0
  13. data/lib/karafka/admin/configs/resource.rb +88 -0
  14. data/lib/karafka/admin/configs.rb +103 -0
  15. data/lib/karafka/admin.rb +201 -100
  16. data/lib/karafka/base_consumer.rb +2 -2
  17. data/lib/karafka/cli/info.rb +9 -7
  18. data/lib/karafka/cli/server.rb +7 -7
  19. data/lib/karafka/cli/topics/align.rb +109 -0
  20. data/lib/karafka/cli/topics/base.rb +66 -0
  21. data/lib/karafka/cli/topics/create.rb +35 -0
  22. data/lib/karafka/cli/topics/delete.rb +30 -0
  23. data/lib/karafka/cli/topics/migrate.rb +31 -0
  24. data/lib/karafka/cli/topics/plan.rb +169 -0
  25. data/lib/karafka/cli/topics/repartition.rb +41 -0
  26. data/lib/karafka/cli/topics/reset.rb +18 -0
  27. data/lib/karafka/cli/topics.rb +13 -123
  28. data/lib/karafka/connection/client.rb +55 -37
  29. data/lib/karafka/connection/listener.rb +22 -17
  30. data/lib/karafka/connection/proxy.rb +93 -4
  31. data/lib/karafka/connection/status.rb +14 -2
  32. data/lib/karafka/contracts/config.rb +14 -1
  33. data/lib/karafka/contracts/topic.rb +1 -1
  34. data/lib/karafka/deserializers/headers.rb +15 -0
  35. data/lib/karafka/deserializers/key.rb +15 -0
  36. data/lib/karafka/deserializers/payload.rb +16 -0
  37. data/lib/karafka/embedded.rb +2 -0
  38. data/lib/karafka/helpers/async.rb +5 -2
  39. data/lib/karafka/helpers/colorize.rb +6 -0
  40. data/lib/karafka/instrumentation/callbacks/oauthbearer_token_refresh.rb +29 -0
  41. data/lib/karafka/instrumentation/logger_listener.rb +23 -3
  42. data/lib/karafka/instrumentation/notifications.rb +10 -0
  43. data/lib/karafka/instrumentation/vendors/appsignal/client.rb +16 -2
  44. data/lib/karafka/instrumentation/vendors/kubernetes/liveness_listener.rb +20 -0
  45. data/lib/karafka/messages/batch_metadata.rb +1 -1
  46. data/lib/karafka/messages/builders/batch_metadata.rb +1 -1
  47. data/lib/karafka/messages/builders/message.rb +10 -6
  48. data/lib/karafka/messages/message.rb +2 -1
  49. data/lib/karafka/messages/metadata.rb +20 -4
  50. data/lib/karafka/messages/parser.rb +1 -1
  51. data/lib/karafka/pro/base_consumer.rb +12 -23
  52. data/lib/karafka/pro/encryption/cipher.rb +7 -3
  53. data/lib/karafka/pro/encryption/contracts/config.rb +1 -0
  54. data/lib/karafka/pro/encryption/errors.rb +4 -1
  55. data/lib/karafka/pro/encryption/messages/middleware.rb +13 -11
  56. data/lib/karafka/pro/encryption/messages/parser.rb +22 -20
  57. data/lib/karafka/pro/encryption/setup/config.rb +5 -0
  58. data/lib/karafka/pro/iterator/expander.rb +2 -1
  59. data/lib/karafka/pro/iterator/tpl_builder.rb +38 -0
  60. data/lib/karafka/pro/iterator.rb +28 -2
  61. data/lib/karafka/pro/loader.rb +3 -0
  62. data/lib/karafka/pro/processing/coordinator.rb +15 -2
  63. data/lib/karafka/pro/processing/expansions_selector.rb +2 -0
  64. data/lib/karafka/pro/processing/jobs_queue.rb +122 -5
  65. data/lib/karafka/pro/processing/periodic_job/consumer.rb +67 -0
  66. data/lib/karafka/pro/processing/piping/consumer.rb +126 -0
  67. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom.rb +1 -1
  68. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom_vp.rb +1 -1
  69. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_mom.rb +1 -1
  70. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_mom_vp.rb +1 -1
  71. data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom.rb +1 -1
  72. data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom_vp.rb +1 -1
  73. data/lib/karafka/pro/processing/strategies/aj/dlq_mom.rb +1 -1
  74. data/lib/karafka/pro/processing/strategies/aj/dlq_mom_vp.rb +1 -1
  75. data/lib/karafka/pro/processing/strategies/aj/lrj_mom_vp.rb +2 -0
  76. data/lib/karafka/pro/processing/strategies/default.rb +5 -1
  77. data/lib/karafka/pro/processing/strategies/dlq/default.rb +21 -5
  78. data/lib/karafka/pro/processing/strategies/lrj/default.rb +2 -0
  79. data/lib/karafka/pro/processing/strategies/lrj/mom.rb +2 -0
  80. data/lib/karafka/pro/processing/subscription_groups_coordinator.rb +52 -0
  81. data/lib/karafka/pro/routing/features/direct_assignments/config.rb +27 -0
  82. data/lib/karafka/pro/routing/features/direct_assignments/contracts/consumer_group.rb +53 -0
  83. data/lib/karafka/pro/routing/features/direct_assignments/contracts/topic.rb +108 -0
  84. data/lib/karafka/pro/routing/features/direct_assignments/subscription_group.rb +77 -0
  85. data/lib/karafka/pro/routing/features/direct_assignments/topic.rb +69 -0
  86. data/lib/karafka/pro/routing/features/direct_assignments.rb +25 -0
  87. data/lib/karafka/pro/routing/features/patterns/builder.rb +1 -1
  88. data/lib/karafka/pro/routing/features/swarm/contracts/routing.rb +76 -0
  89. data/lib/karafka/pro/routing/features/swarm/contracts/topic.rb +16 -5
  90. data/lib/karafka/pro/routing/features/swarm/topic.rb +25 -2
  91. data/lib/karafka/pro/routing/features/swarm.rb +11 -0
  92. data/lib/karafka/pro/swarm/liveness_listener.rb +20 -0
  93. data/lib/karafka/processing/coordinator.rb +17 -8
  94. data/lib/karafka/processing/coordinators_buffer.rb +5 -2
  95. data/lib/karafka/processing/executor.rb +6 -2
  96. data/lib/karafka/processing/executors_buffer.rb +5 -2
  97. data/lib/karafka/processing/jobs_queue.rb +9 -4
  98. data/lib/karafka/processing/strategies/aj_dlq_mom.rb +1 -1
  99. data/lib/karafka/processing/strategies/default.rb +7 -1
  100. data/lib/karafka/processing/strategies/dlq.rb +17 -2
  101. data/lib/karafka/processing/workers_batch.rb +4 -1
  102. data/lib/karafka/routing/builder.rb +6 -2
  103. data/lib/karafka/routing/consumer_group.rb +2 -1
  104. data/lib/karafka/routing/features/dead_letter_queue/config.rb +5 -0
  105. data/lib/karafka/routing/features/dead_letter_queue/contracts/topic.rb +8 -0
  106. data/lib/karafka/routing/features/dead_letter_queue/topic.rb +10 -2
  107. data/lib/karafka/routing/features/deserializers/config.rb +18 -0
  108. data/lib/karafka/routing/features/deserializers/contracts/topic.rb +31 -0
  109. data/lib/karafka/routing/features/deserializers/topic.rb +51 -0
  110. data/lib/karafka/routing/features/deserializers.rb +11 -0
  111. data/lib/karafka/routing/proxy.rb +9 -14
  112. data/lib/karafka/routing/router.rb +11 -2
  113. data/lib/karafka/routing/subscription_group.rb +9 -1
  114. data/lib/karafka/routing/topic.rb +0 -1
  115. data/lib/karafka/runner.rb +1 -1
  116. data/lib/karafka/setup/config.rb +50 -9
  117. data/lib/karafka/status.rb +7 -8
  118. data/lib/karafka/swarm/supervisor.rb +16 -2
  119. data/lib/karafka/templates/karafka.rb.erb +28 -1
  120. data/lib/karafka/version.rb +1 -1
  121. data.tar.gz.sig +0 -0
  122. metadata +38 -12
  123. metadata.gz.sig +0 -0
  124. data/lib/karafka/routing/consumer_mapper.rb +0 -23
  125. data/lib/karafka/serialization/json/deserializer.rb +0 -19
  126. data/lib/karafka/time_trackers/partition_usage.rb +0 -56
@@ -19,17 +19,36 @@ module Karafka
19
19
  #
20
20
  # Aside from the OSS queue capabilities it allows for jobless locking for advanced schedulers
21
21
  class JobsQueue < Karafka::Processing::JobsQueue
22
+ include Core::Helpers::Time
23
+
22
24
  attr_accessor :in_processing
23
25
 
26
+ # How long should we keep async lock (31 years)
27
+ WAIT_TIMEOUT = 10_000_000_000
28
+
29
+ private_constant :WAIT_TIMEOUT
30
+
24
31
  # @return [Karafka::Pro::Processing::JobsQueue]
25
32
  def initialize
26
33
  super
27
34
 
28
35
  @in_waiting = Hash.new { |h, k| h[k] = [] }
36
+ @locks = Hash.new { |h, k| h[k] = {} }
37
+ @async_locking = false
29
38
 
30
39
  @statistics[:waiting] = 0
31
40
  end
32
41
 
42
+ # Registers semaphore and a lock hash
43
+ #
44
+ # @param group_id [String]
45
+ def register(group_id)
46
+ super
47
+ @mutex.synchronize do
48
+ @locks[group_id]
49
+ end
50
+ end
51
+
33
52
  # Method that allows us to lock queue on a given subscription group without enqueuing the a
34
53
  # job. This can be used when building complex schedulers that want to postpone enqueuing
35
54
  # before certain conditions are met.
@@ -64,6 +83,48 @@ module Karafka
64
83
  end
65
84
  end
66
85
 
86
+ # Allows for explicit locking of the queue of a given subscription group.
87
+ #
88
+ # This can be used for cross-topic synchronization.
89
+ #
90
+ # @param group_id [String] id of the group we want to lock
91
+ # @param lock_id [Object] unique id we want to use to identify our lock
92
+ # @param timeout [Integer] number of ms how long this lock should be valid. Useful for
93
+ # auto-expiring locks used to delay further processing without explicit pausing on
94
+ # the consumer
95
+ #
96
+ # @note We do not raise `Errors::JobsQueueSynchronizationError` similar to `#lock` here
97
+ # because we want to have ability to prolong time limited locks
98
+ def lock_async(group_id, lock_id, timeout: WAIT_TIMEOUT)
99
+ return if @queue.closed?
100
+
101
+ @async_locking = true
102
+
103
+ @mutex.synchronize do
104
+ @locks[group_id][lock_id] = monotonic_now + timeout
105
+
106
+ # We need to tick so our new time sensitive lock can reload time constraints on sleep
107
+ tick(group_id)
108
+ end
109
+ end
110
+
111
+ # Allows for explicit unlocking of locked queue of a group
112
+ #
113
+ # @param group_id [String] id of the group we want to unlock
114
+ # @param lock_id [Object] unique id we want to use to identify our lock
115
+ #
116
+ def unlock_async(group_id, lock_id)
117
+ @mutex.synchronize do
118
+ if @locks[group_id].delete(lock_id)
119
+ tick(group_id)
120
+
121
+ return
122
+ end
123
+
124
+ raise(Errors::JobsQueueSynchronizationError, [group_id, lock_id])
125
+ end
126
+ end
127
+
67
128
  # Clears the processing states for a provided group. Useful when a recovery happens and we
68
129
  # need to clean up state but only for a given subscription group.
69
130
  #
@@ -74,6 +135,8 @@ module Karafka
74
135
 
75
136
  @statistics[:waiting] -= @in_waiting[group_id].size
76
137
  @in_waiting[group_id].clear
138
+ @locks[group_id].clear
139
+ @async_locking = false
77
140
 
78
141
  # We unlock it just in case it was blocked when clearing started
79
142
  tick(group_id)
@@ -87,21 +150,75 @@ module Karafka
87
150
  def empty?(group_id)
88
151
  @mutex.synchronize do
89
152
  @in_processing[group_id].empty? &&
90
- @in_waiting[group_id].empty?
153
+ @in_waiting[group_id].empty? &&
154
+ !locked_async?(group_id)
155
+ end
156
+ end
157
+
158
+ # Blocks when there are things in the queue in a given group and waits until all the
159
+ # blocking jobs from a given group are completed or any of the locks times out
160
+ # @param group_id [String] id of the group in which jobs we're interested.
161
+ # @see `Karafka::Processing::JobsQueue`
162
+ #
163
+ # @note Because checking that async locking is on happens on regular ticking, first lock
164
+ # on a group can take up to one tick. That is expected.
165
+ #
166
+ # @note This implementation takes into consideration temporary async locks that can happen.
167
+ # Thanks to the fact that we use the minimum lock time as a timeout, we do not have to
168
+ # wait a whole ticking period to unlock async locks.
169
+ def wait(group_id)
170
+ return super unless @async_locking
171
+
172
+ # We do not generalize this flow because this one is more expensive as it has to allocate
173
+ # extra objects. That's why we only use it when locks are actually in use
174
+ base_interval = tick_interval / 1_000.0
175
+
176
+ while wait?(group_id)
177
+ yield if block_given?
178
+
179
+ now = monotonic_now
180
+
181
+ wait_times = @locks[group_id].values.map! do |lock_time|
182
+ # Convert ms to seconds, seconds are required by Ruby queue engine
183
+ (lock_time - now) / 1_000
184
+ end
185
+
186
+ wait_times.delete_if(&:negative?)
187
+ wait_times << base_interval
188
+
189
+ @semaphores.fetch(group_id).pop(timeout: wait_times.min)
91
190
  end
92
191
  end
93
192
 
94
193
  private
95
194
 
195
+ # Tells us if given group is locked
196
+ #
197
+ # @param group_id [String] id of the group in which we're interested.
198
+ # @return [Boolean] true if there are any active locks on the group, otherwise false
199
+ def locked_async?(group_id)
200
+ return false unless @async_locking
201
+
202
+ group = @locks[group_id]
203
+
204
+ return false if group.empty?
205
+
206
+ now = monotonic_now
207
+
208
+ group.delete_if { |_, wait_timeout| wait_timeout < now }
209
+
210
+ !group.empty?
211
+ end
212
+
96
213
  # @param group_id [String] id of the group in which jobs we're interested.
97
214
  # @return [Boolean] should we keep waiting or not
98
215
  # @note We do not wait for non-blocking jobs. Their flow should allow for `poll` running
99
216
  # as they may exceed `max.poll.interval`
100
217
  def wait?(group_id)
101
- !(
102
- @in_processing[group_id].all?(&:non_blocking?) &&
103
- @in_waiting[group_id].all?(&:non_blocking?)
104
- )
218
+ return true unless @in_processing[group_id].all?(&:non_blocking?)
219
+ return true unless @in_waiting[group_id].all?(&:non_blocking?)
220
+
221
+ locked_async?(group_id)
105
222
  end
106
223
  end
107
224
  end
@@ -0,0 +1,67 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This Karafka component is a Pro component under a commercial license.
4
+ # This Karafka component is NOT licensed under LGPL.
5
+ #
6
+ # All of the commercial components are present in the lib/karafka/pro directory of this
7
+ # repository and their usage requires commercial license agreement.
8
+ #
9
+ # Karafka has also commercial-friendly license, commercial support and commercial components.
10
+ #
11
+ # By sending a pull request to the pro components, you are agreeing to transfer the copyright of
12
+ # your code to Maciej Mensfeld.
13
+
14
+ module Karafka
15
+ module Pro
16
+ module Processing
17
+ # Namespace for periodic jobs related processing APIs
18
+ module PeriodicJob
19
+ # Consumer extra methods useful only when periodic jobs are in use
20
+ module Consumer
21
+ class << self
22
+ # Defines an empty `#tick` method if not present
23
+ #
24
+ # We define it that way due to our injection strategy flow.
25
+ #
26
+ # @param consumer_singleton_class [Karafka::BaseConsumer] consumer singleton class
27
+ # that is being enriched with periodic jobs API
28
+ def included(consumer_singleton_class)
29
+ # Do not define empty tick method on consumer if it already exists
30
+ # We only define it when it does not exist to have empty periodic ticking
31
+ #
32
+ # We need to check both cases (public and private) since user is not expected to
33
+ # have this method public
34
+ return if consumer_singleton_class.instance_methods.include?(:tick)
35
+ return if consumer_singleton_class.private_instance_methods.include?(:tick)
36
+
37
+ # Create empty ticking method
38
+ consumer_singleton_class.class_eval do
39
+ def tick; end
40
+ end
41
+ end
42
+ end
43
+
44
+ # Runs the on-schedule tick periodic operations
45
+ # This method is an alias but is part of the naming convention used for other flows, this
46
+ # is why we do not reference the `handle_before_schedule_tick` directly
47
+ def on_before_schedule_tick
48
+ handle_before_schedule_tick
49
+ end
50
+
51
+ # Used by the executor to trigger consumer tick
52
+ # @private
53
+ def on_tick
54
+ handle_tick
55
+ rescue StandardError => e
56
+ Karafka.monitor.instrument(
57
+ 'error.occurred',
58
+ error: e,
59
+ caller: self,
60
+ type: 'consumer.tick.error'
61
+ )
62
+ end
63
+ end
64
+ end
65
+ end
66
+ end
67
+ end
@@ -0,0 +1,126 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This Karafka component is a Pro component under a commercial license.
4
+ # This Karafka component is NOT licensed under LGPL.
5
+ #
6
+ # All of the commercial components are present in the lib/karafka/pro directory of this
7
+ # repository and their usage requires commercial license agreement.
8
+ #
9
+ # Karafka has also commercial-friendly license, commercial support and commercial components.
10
+ #
11
+ # By sending a pull request to the pro components, you are agreeing to transfer the copyright of
12
+ # your code to Maciej Mensfeld.
13
+
14
+ module Karafka
15
+ module Pro
16
+ module Processing
17
+ # All code needed for messages piping in Karafka
18
+ module Piping
19
+ # Consumer piping functionality
20
+ #
21
+ # It provides way to pipe data in a consistent way with extra traceability headers similar
22
+ # to those in the enhanced DLQ.
23
+ module Consumer
24
+ # Empty hash to save on memory allocations
25
+ EMPTY_HASH = {}.freeze
26
+
27
+ private_constant :EMPTY_HASH
28
+
29
+ # Pipes given message to the provided topic with expected details. Useful for
30
+ # pass-through operations where deserialization is not needed. Upon usage it will include
31
+ # all the original headers + meta headers about the source of message.
32
+ #
33
+ # @param topic [String, Symbol] where we want to send the message
34
+ # @param message [Karafka::Messages::Message] original message to pipe
35
+ #
36
+ # @note It will NOT deserialize the payload so it is fast
37
+ #
38
+ # @note We assume that there can be different number of partitions in the target topic,
39
+ # this is why we use `key` based on the original topic partition number and not the
40
+ # partition id itself. This will not utilize partitions beyond the number of partitions
41
+ # of original topic, but will accommodate for topics with less partitions.
42
+ def pipe_async(topic:, message:)
43
+ produce_async(
44
+ build_pipe_message(topic: topic, message: message)
45
+ )
46
+ end
47
+
48
+ # Sync version of pipe for one message
49
+ #
50
+ # @param topic [String, Symbol] where we want to send the message
51
+ # @param message [Karafka::Messages::Message] original message to pipe
52
+ # @see [#pipe_async]
53
+ def pipe_sync(topic:, message:)
54
+ produce_sync(
55
+ build_pipe_message(topic: topic, message: message)
56
+ )
57
+ end
58
+
59
+ # Async multi-message pipe
60
+ #
61
+ # @param topic [String, Symbol] where we want to send the message
62
+ # @param messages [Array<Karafka::Messages::Message>] original messages to pipe
63
+ #
64
+ # @note If transactional producer in use and dispatch is not wrapped with a transaction,
65
+ # it will automatically wrap the dispatch with a transaction
66
+ def pipe_many_async(topic:, messages:)
67
+ messages = messages.map do |message|
68
+ build_pipe_message(topic: topic, message: message)
69
+ end
70
+
71
+ produce_many_async(messages)
72
+ end
73
+
74
+ # Sync multi-message pipe
75
+ #
76
+ # @param topic [String, Symbol] where we want to send the message
77
+ # @param messages [Array<Karafka::Messages::Message>] original messages to pipe
78
+ #
79
+ # @note If transactional producer in use and dispatch is not wrapped with a transaction,
80
+ # it will automatically wrap the dispatch with a transaction
81
+ def pipe_many_sync(topic:, messages:)
82
+ messages = messages.map do |message|
83
+ build_pipe_message(topic: topic, message: message)
84
+ end
85
+
86
+ produce_many_sync(messages)
87
+ end
88
+
89
+ private
90
+
91
+ # @param topic [String, Symbol] where we want to send the message
92
+ # @param message [Karafka::Messages::Message] original message to pipe
93
+ # @return [Hash] hash with message to pipe.
94
+ #
95
+ # @note If you need to alter this, please define the `#enhance_pipe_message` method
96
+ def build_pipe_message(topic:, message:)
97
+ original_partition = message.partition.to_s
98
+
99
+ pipe_message = {
100
+ topic: topic,
101
+ key: original_partition,
102
+ payload: message.raw_payload,
103
+ headers: message.headers.merge(
104
+ 'original_topic' => message.topic,
105
+ 'original_partition' => original_partition,
106
+ 'original_offset' => message.offset.to_s,
107
+ 'original_consumer_group' => self.topic.consumer_group.id
108
+ )
109
+ }
110
+
111
+ # Optional method user can define in consumer to enhance the dlq message hash with
112
+ # some extra details if needed or to replace payload, etc
113
+ if respond_to?(:enhance_pipe_message, true)
114
+ enhance_pipe_message(
115
+ pipe_message,
116
+ message
117
+ )
118
+ end
119
+
120
+ pipe_message
121
+ end
122
+ end
123
+ end
124
+ end
125
+ end
126
+ end
@@ -55,7 +55,7 @@ module Karafka
55
55
  apply_dlq_flow do
56
56
  skippable_message, = find_skippable_message
57
57
  dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
58
- mark_as_consumed(skippable_message)
58
+ mark_dispatched_to_dlq(skippable_message)
59
59
  end
60
60
  end
61
61
  end
@@ -61,7 +61,7 @@ module Karafka
61
61
  apply_dlq_flow do
62
62
  skippable_message, = find_skippable_message
63
63
  dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
64
- mark_as_consumed(skippable_message)
64
+ mark_dispatched_to_dlq(skippable_message)
65
65
  end
66
66
  end
67
67
  end
@@ -57,7 +57,7 @@ module Karafka
57
57
  # We can commit the offset here because we know that we skip it "forever" and
58
58
  # since AJ consumer commits the offset after each job, we also know that the
59
59
  # previous job was successful
60
- mark_as_consumed(skippable_message)
60
+ mark_dispatched_to_dlq(skippable_message)
61
61
  end
62
62
  end
63
63
  end
@@ -55,7 +55,7 @@ module Karafka
55
55
  # We can commit the offset here because we know that we skip it "forever" and
56
56
  # since AJ consumer commits the offset after each job, we also know that the
57
57
  # previous job was successful
58
- mark_as_consumed(skippable_message)
58
+ mark_dispatched_to_dlq(skippable_message)
59
59
  end
60
60
  end
61
61
  end
@@ -51,7 +51,7 @@ module Karafka
51
51
  apply_dlq_flow do
52
52
  skippable_message, = find_skippable_message
53
53
  dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
54
- mark_as_consumed(skippable_message)
54
+ mark_dispatched_to_dlq(skippable_message)
55
55
  end
56
56
  end
57
57
  end
@@ -55,7 +55,7 @@ module Karafka
55
55
  apply_dlq_flow do
56
56
  skippable_message, = find_skippable_message
57
57
  dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
58
- mark_as_consumed(skippable_message)
58
+ mark_dispatched_to_dlq(skippable_message)
59
59
  end
60
60
  end
61
61
  end
@@ -49,7 +49,7 @@ module Karafka
49
49
  # We can commit the offset here because we know that we skip it "forever" and
50
50
  # since AJ consumer commits the offset after each job, we also know that the
51
51
  # previous job was successful
52
- mark_as_consumed(skippable_message)
52
+ mark_dispatched_to_dlq(skippable_message)
53
53
  end
54
54
  end
55
55
  end
@@ -54,7 +54,7 @@ module Karafka
54
54
  # Aj::DlqMom
55
55
  skippable_message, = find_skippable_message
56
56
  dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
57
- mark_as_consumed(skippable_message)
57
+ mark_dispatched_to_dlq(skippable_message)
58
58
  end
59
59
  end
60
60
  end
@@ -73,6 +73,8 @@ module Karafka
73
73
  Karafka.monitor.instrument('consumer.revoked', caller: self) do
74
74
  revoked
75
75
  end
76
+ ensure
77
+ coordinator.decrement(:revoked)
76
78
  end
77
79
  end
78
80
  end
@@ -220,7 +220,7 @@ module Karafka
220
220
  ensure
221
221
  # We need to decrease number of jobs that this coordinator coordinates as it has
222
222
  # finished
223
- coordinator.decrement
223
+ coordinator.decrement(:consume)
224
224
  end
225
225
 
226
226
  # Standard flow without any features
@@ -254,6 +254,8 @@ module Karafka
254
254
  Karafka.monitor.instrument('consumer.revoked', caller: self) do
255
255
  revoked
256
256
  end
257
+ ensure
258
+ coordinator.decrement(:revoked)
257
259
  end
258
260
 
259
261
  # No action needed for the tick standard flow
@@ -269,6 +271,8 @@ module Karafka
269
271
  Karafka.monitor.instrument('consumer.ticked', caller: self) do
270
272
  tick
271
273
  end
274
+ ensure
275
+ coordinator.decrement(:periodic)
272
276
  end
273
277
  end
274
278
  end
@@ -111,7 +111,8 @@ module Karafka
111
111
  # should not be cleaned as it should go to the DLQ
112
112
  raise(Cleaner::Errors::MessageCleanedError) if skippable_message.cleaned?
113
113
 
114
- producer.produce_async(
114
+ producer.public_send(
115
+ topic.dead_letter_queue.dispatch_method,
115
116
  build_dlq_message(
116
117
  skippable_message
117
118
  )
@@ -134,7 +135,7 @@ module Karafka
134
135
 
135
136
  dispatch = lambda do
136
137
  dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
137
- mark_as_consumed(skippable_message)
138
+ mark_dispatched_to_dlq(skippable_message)
138
139
  end
139
140
 
140
141
  if dispatch_in_a_transaction?
@@ -157,7 +158,8 @@ module Karafka
157
158
  'original_topic' => topic.name,
158
159
  'original_partition' => original_partition,
159
160
  'original_offset' => skippable_message.offset.to_s,
160
- 'original_consumer_group' => topic.consumer_group.id
161
+ 'original_consumer_group' => topic.consumer_group.id,
162
+ 'original_attempts' => attempt.to_s
161
163
  )
162
164
  }
163
165
 
@@ -210,14 +212,28 @@ module Karafka
210
212
  raise Karafka::UnsupportedCaseError, flow
211
213
  end
212
214
 
215
+ yield
216
+
213
217
  # We reset the pause to indicate we will now consider it as "ok".
214
218
  coordinator.pause_tracker.reset
215
219
 
216
- yield
217
-
218
220
  # Always backoff after DLQ dispatch even on skip to prevent overloads on errors
219
221
  pause(coordinator.seek_offset, nil, false)
220
222
  end
223
+
224
+ # Marks message that went to DLQ (if applicable) based on the requested method
225
+ # @param skippable_message [Karafka::Messages::Message]
226
+ def mark_dispatched_to_dlq(skippable_message)
227
+ case topic.dead_letter_queue.marking_method
228
+ when :mark_as_consumed
229
+ mark_as_consumed(skippable_message)
230
+ when :mark_as_consumed!
231
+ mark_as_consumed!(skippable_message)
232
+ else
233
+ # This should never happen. Bug if encountered. Please report
234
+ raise Karafka::Errors::UnsupportedCaseError
235
+ end
236
+ end
221
237
  end
222
238
  end
223
239
  end
@@ -76,6 +76,8 @@ module Karafka
76
76
  Karafka.monitor.instrument('consumer.revoked', caller: self) do
77
77
  revoked
78
78
  end
79
+ ensure
80
+ coordinator.decrement(:revoked)
79
81
  end
80
82
 
81
83
  # Allows for LRJ to synchronize its work. It may be needed because LRJ can run
@@ -68,6 +68,8 @@ module Karafka
68
68
  Karafka.monitor.instrument('consumer.revoked', caller: self) do
69
69
  revoked
70
70
  end
71
+ ensure
72
+ coordinator.decrement(:revoked)
71
73
  end
72
74
  end
73
75
  end
@@ -0,0 +1,52 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This Karafka component is a Pro component under a commercial license.
4
+ # This Karafka component is NOT licensed under LGPL.
5
+ #
6
+ # All of the commercial components are present in the lib/karafka/pro directory of this
7
+ # repository and their usage requires commercial license agreement.
8
+ #
9
+ # Karafka has also commercial-friendly license, commercial support and commercial components.
10
+ #
11
+ # By sending a pull request to the pro components, you are agreeing to transfer the copyright of
12
+ # your code to Maciej Mensfeld.
13
+
14
+ module Karafka
15
+ module Pro
16
+ module Processing
17
+ # Uses the jobs queue API to lock (pause) and unlock (resume) operations of a given
18
+ # subscription group. It is abstracted away from jobs queue on this layer because we do
19
+ # not want to introduce jobs queue as a concept to the consumers layer
20
+ class SubscriptionGroupsCoordinator
21
+ include Singleton
22
+
23
+ # @param subscription_group [Karafka::Routing::SubscriptionGroup] subscription group we
24
+ # want to pause
25
+ # @param lock_id [Object] key we want to use if we want to set multiple locks on the same
26
+ # subscription group
27
+ # @param kwargs [Object] Any keyword arguments accepted by the jobs queue lock.
28
+ def pause(subscription_group, lock_id = nil, **kwargs)
29
+ jobs_queue.lock_async(
30
+ subscription_group.id,
31
+ lock_id,
32
+ **kwargs
33
+ )
34
+ end
35
+
36
+ # @param subscription_group [Karafka::Routing::SubscriptionGroup] subscription group we
37
+ # want to resume
38
+ # @param lock_id [Object] lock id (if it was used to pause)
39
+ def resume(subscription_group, lock_id = nil)
40
+ jobs_queue.unlock_async(subscription_group.id, lock_id)
41
+ end
42
+
43
+ private
44
+
45
+ # @return [Karafka::Pro::Processing::JobsQueue]
46
+ def jobs_queue
47
+ @jobs_queue ||= Karafka::Server.jobs_queue
48
+ end
49
+ end
50
+ end
51
+ end
52
+ end
@@ -0,0 +1,27 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This Karafka component is a Pro component under a commercial license.
4
+ # This Karafka component is NOT licensed under LGPL.
5
+ #
6
+ # All of the commercial components are present in the lib/karafka/pro directory of this
7
+ # repository and their usage requires commercial license agreement.
8
+ #
9
+ # Karafka has also commercial-friendly license, commercial support and commercial components.
10
+ #
11
+ # By sending a pull request to the pro components, you are agreeing to transfer the copyright of
12
+ # your code to Maciej Mensfeld.
13
+
14
+ module Karafka
15
+ module Pro
16
+ module Routing
17
+ module Features
18
+ class DirectAssignments < Base
19
+ # Direct assignments feature configuration
20
+ Config = Struct.new(:active, :partitions, keyword_init: true) do
21
+ alias_method :active?, :active
22
+ end
23
+ end
24
+ end
25
+ end
26
+ end
27
+ end