karafka 2.3.1 → 2.3.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (70) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.rspec +2 -0
  4. data/CHANGELOG.md +12 -0
  5. data/Gemfile.lock +6 -6
  6. data/bin/integrations +2 -1
  7. data/bin/rspecs +6 -2
  8. data/config/locales/errors.yml +30 -8
  9. data/config/locales/pro_errors.yml +2 -0
  10. data/docker-compose.yml +1 -1
  11. data/lib/karafka/app.rb +14 -0
  12. data/lib/karafka/cli/base.rb +19 -0
  13. data/lib/karafka/cli/server.rb +62 -76
  14. data/lib/karafka/cli/swarm.rb +30 -0
  15. data/lib/karafka/constraints.rb +3 -3
  16. data/lib/karafka/contracts/config.rb +19 -0
  17. data/lib/karafka/errors.rb +12 -0
  18. data/lib/karafka/helpers/config_importer.rb +30 -0
  19. data/lib/karafka/instrumentation/logger_listener.rb +31 -0
  20. data/lib/karafka/instrumentation/notifications.rb +9 -0
  21. data/lib/karafka/instrumentation/vendors/datadog/logger_listener.rb +2 -0
  22. data/lib/karafka/instrumentation/vendors/kubernetes/base_listener.rb +72 -0
  23. data/lib/karafka/instrumentation/vendors/kubernetes/liveness_listener.rb +11 -40
  24. data/lib/karafka/instrumentation/vendors/kubernetes/swarm_liveness_listener.rb +54 -0
  25. data/lib/karafka/pro/active_job/job_options_contract.rb +1 -1
  26. data/lib/karafka/pro/base_consumer.rb +16 -0
  27. data/lib/karafka/pro/connection/manager.rb +6 -1
  28. data/lib/karafka/pro/processing/coordinator.rb +13 -3
  29. data/lib/karafka/pro/processing/coordinators/errors_tracker.rb +74 -0
  30. data/lib/karafka/pro/processing/coordinators/filters_applier.rb +107 -0
  31. data/lib/karafka/pro/processing/coordinators/virtual_offset_manager.rb +180 -0
  32. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom.rb +5 -7
  33. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom_vp.rb +5 -7
  34. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_mom.rb +8 -10
  35. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_mom_vp.rb +8 -16
  36. data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom.rb +5 -7
  37. data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom_vp.rb +5 -7
  38. data/lib/karafka/pro/processing/strategies/aj/dlq_mom.rb +8 -10
  39. data/lib/karafka/pro/processing/strategies/aj/dlq_mom_vp.rb +7 -9
  40. data/lib/karafka/pro/processing/strategies/dlq/default.rb +36 -10
  41. data/lib/karafka/pro/processing/strategies/dlq/ftr.rb +3 -7
  42. data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj.rb +4 -8
  43. data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_mom.rb +6 -9
  44. data/lib/karafka/pro/processing/strategies/dlq/ftr_mom.rb +5 -15
  45. data/lib/karafka/pro/processing/strategies/dlq/lrj.rb +4 -8
  46. data/lib/karafka/pro/processing/strategies/dlq/lrj_mom.rb +6 -9
  47. data/lib/karafka/pro/processing/strategies/dlq/mom.rb +10 -20
  48. data/lib/karafka/pro/processing/strategies/vp/default.rb +7 -0
  49. data/lib/karafka/pro/routing/features/dead_letter_queue/contracts/topic.rb +6 -0
  50. data/lib/karafka/pro/routing/features/dead_letter_queue/topic.rb +39 -0
  51. data/lib/karafka/pro/swarm/liveness_listener.rb +171 -0
  52. data/lib/karafka/process.rb +27 -1
  53. data/lib/karafka/routing/features/dead_letter_queue/config.rb +2 -0
  54. data/lib/karafka/routing/subscription_group.rb +31 -9
  55. data/lib/karafka/server.rb +11 -13
  56. data/lib/karafka/setup/config.rb +41 -2
  57. data/lib/karafka/status.rb +4 -2
  58. data/lib/karafka/swarm/liveness_listener.rb +55 -0
  59. data/lib/karafka/swarm/manager.rb +217 -0
  60. data/lib/karafka/swarm/node.rb +179 -0
  61. data/lib/karafka/swarm/pidfd.rb +131 -0
  62. data/lib/karafka/swarm/supervisor.rb +184 -0
  63. data/lib/karafka/swarm.rb +27 -0
  64. data/lib/karafka/version.rb +1 -1
  65. data/lib/karafka.rb +1 -1
  66. data.tar.gz.sig +0 -0
  67. metadata +17 -4
  68. metadata.gz.sig +0 -0
  69. data/lib/karafka/pro/processing/filters_applier.rb +0 -105
  70. data/lib/karafka/pro/processing/virtual_offset_manager.rb +0 -177
@@ -51,14 +51,12 @@ module Karafka
51
51
  else
52
52
  resume
53
53
  end
54
- elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
55
- retry_after_pause
56
54
  else
57
- coordinator.pause_tracker.reset
58
- skippable_message, = find_skippable_message
59
- dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
60
- mark_as_consumed(skippable_message)
61
- pause(coordinator.seek_offset, nil, false)
55
+ apply_dlq_flow do
56
+ skippable_message, = find_skippable_message
57
+ dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
58
+ mark_as_consumed(skippable_message)
59
+ end
62
60
  end
63
61
  end
64
62
  end
@@ -57,14 +57,12 @@ module Karafka
57
57
  else
58
58
  resume
59
59
  end
60
- elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
61
- retry_after_pause
62
60
  else
63
- coordinator.pause_tracker.reset
64
- skippable_message, = find_skippable_message
65
- dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
66
- mark_as_consumed(skippable_message)
67
- pause(coordinator.seek_offset, nil, false)
61
+ apply_dlq_flow do
62
+ skippable_message, = find_skippable_message
63
+ dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
64
+ mark_as_consumed(skippable_message)
65
+ end
68
66
  end
69
67
  end
70
68
  end
@@ -44,8 +44,6 @@ module Karafka
44
44
  return if coordinator.manual_pause?
45
45
 
46
46
  handle_post_filtering
47
- elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
48
- retry_after_pause
49
47
  # If we've reached number of retries that we could, we need to skip the first
50
48
  # message that was not marked as consumed, pause and continue, while also moving
51
49
  # this message to the dead topic.
@@ -53,14 +51,14 @@ module Karafka
53
51
  # For a Mom setup, this means, that user has to manage the checkpointing by
54
52
  # himself. If no checkpointing is ever done, we end up with an endless loop.
55
53
  else
56
- coordinator.pause_tracker.reset
57
- skippable_message, = find_skippable_message
58
- dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
59
- # We can commit the offset here because we know that we skip it "forever" and
60
- # since AJ consumer commits the offset after each job, we also know that the
61
- # previous job was successful
62
- mark_as_consumed(skippable_message)
63
- pause(coordinator.seek_offset, nil, false)
54
+ apply_dlq_flow do
55
+ skippable_message, = find_skippable_message
56
+ dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
57
+ # We can commit the offset here because we know that we skip it "forever" and
58
+ # since AJ consumer commits the offset after each job, we also know that the
59
+ # previous job was successful
60
+ mark_as_consumed(skippable_message)
61
+ end
64
62
  end
65
63
  end
66
64
  end
@@ -48,23 +48,15 @@ module Karafka
48
48
  mark_as_consumed(last_group_message)
49
49
 
50
50
  handle_post_filtering
51
- elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
52
- retry_after_pause
53
- # If we've reached number of retries that we could, we need to skip the first
54
- # message that was not marked as consumed, pause and continue, while also moving
55
- # this message to the dead topic.
56
- #
57
- # For a Mom setup, this means, that user has to manage the checkpointing by
58
- # himself. If no checkpointing is ever done, we end up with an endless loop.
59
51
  else
60
- coordinator.pause_tracker.reset
61
- skippable_message, = find_skippable_message
62
- dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
63
- # We can commit the offset here because we know that we skip it "forever" and
64
- # since AJ consumer commits the offset after each job, we also know that the
65
- # previous job was successful
66
- mark_as_consumed(skippable_message)
67
- pause(coordinator.seek_offset, nil, false)
52
+ apply_dlq_flow do
53
+ skippable_message, = find_skippable_message
54
+ dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
55
+ # We can commit the offset here because we know that we skip it "forever" and
56
+ # since AJ consumer commits the offset after each job, we also know that the
57
+ # previous job was successful
58
+ mark_as_consumed(skippable_message)
59
+ end
68
60
  end
69
61
  end
70
62
  end
@@ -47,14 +47,12 @@ module Karafka
47
47
  seek(coordinator.seek_offset, false) unless revoked?
48
48
 
49
49
  resume
50
- elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
51
- retry_after_pause
52
50
  else
53
- coordinator.pause_tracker.reset
54
- skippable_message, = find_skippable_message
55
- dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
56
- mark_as_consumed(skippable_message)
57
- pause(coordinator.seek_offset, nil, false)
51
+ apply_dlq_flow do
52
+ skippable_message, = find_skippable_message
53
+ dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
54
+ mark_as_consumed(skippable_message)
55
+ end
58
56
  end
59
57
  end
60
58
  end
@@ -51,14 +51,12 @@ module Karafka
51
51
  seek(coordinator.seek_offset, false) unless revoked?
52
52
 
53
53
  resume
54
- elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
55
- retry_after_pause
56
54
  else
57
- coordinator.pause_tracker.reset
58
- skippable_message, = find_skippable_message
59
- dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
60
- mark_as_consumed(skippable_message)
61
- pause(coordinator.seek_offset, nil, false)
55
+ apply_dlq_flow do
56
+ skippable_message, = find_skippable_message
57
+ dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
58
+ mark_as_consumed(skippable_message)
59
+ end
62
60
  end
63
61
  end
64
62
  end
@@ -42,17 +42,15 @@ module Karafka
42
42
  if coordinator.success?
43
43
  # Do NOT commit offsets, they are comitted after each job in the AJ consumer.
44
44
  coordinator.pause_tracker.reset
45
- elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
46
- retry_after_pause
47
45
  else
48
- coordinator.pause_tracker.reset
49
- skippable_message, = find_skippable_message
50
- dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
51
- # We can commit the offset here because we know that we skip it "forever" and
52
- # since AJ consumer commits the offset after each job, we also know that the
53
- # previous job was successful
54
- mark_as_consumed(skippable_message)
55
- pause(coordinator.seek_offset, nil, false)
46
+ apply_dlq_flow do
47
+ skippable_message, = find_skippable_message
48
+ dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
49
+ # We can commit the offset here because we know that we skip it "forever" and
50
+ # since AJ consumer commits the offset after each job, we also know that the
51
+ # previous job was successful
52
+ mark_as_consumed(skippable_message)
53
+ end
56
54
  end
57
55
  end
58
56
  end
@@ -48,16 +48,14 @@ module Karafka
48
48
  return if revoked?
49
49
 
50
50
  mark_as_consumed(last_group_message)
51
- elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
52
- retry_after_pause
53
51
  else
54
- # Here we are in a collapsed state, hence we can apply the same logic as
55
- # Aj::DlqMom
56
- coordinator.pause_tracker.reset
57
- skippable_message, = find_skippable_message
58
- dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
59
- mark_as_consumed(skippable_message)
60
- pause(coordinator.seek_offset, nil, false)
52
+ apply_dlq_flow do
53
+ # Here we are in a collapsed state, hence we can apply the same logic as
54
+ # Aj::DlqMom
55
+ skippable_message, = find_skippable_message
56
+ dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
57
+ mark_as_consumed(skippable_message)
58
+ end
61
59
  end
62
60
  end
63
61
  end
@@ -76,16 +76,10 @@ module Karafka
76
76
  return if coordinator.manual_pause?
77
77
 
78
78
  mark_as_consumed(last_group_message)
79
- elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
80
- retry_after_pause
81
- # If we've reached number of retries that we could, we need to skip the first
82
- # message that was not marked as consumed, pause and continue, while also moving
83
- # this message to the dead topic
84
79
  else
85
- # We reset the pause to indicate we will now consider it as "ok".
86
- coordinator.pause_tracker.reset
87
- dispatch_if_needed_and_mark_as_consumed
88
- pause(coordinator.seek_offset, nil, false)
80
+ apply_dlq_flow do
81
+ dispatch_if_needed_and_mark_as_consumed
82
+ end
89
83
  end
90
84
  end
91
85
  end
@@ -183,7 +177,10 @@ module Karafka
183
177
  # topic is set to false, we will skip the dispatch, effectively ignoring the broken
184
178
  # message without taking any action.
185
179
  def dispatch_to_dlq?
186
- topic.dead_letter_queue.topic
180
+ return false unless topic.dead_letter_queue.topic
181
+ return false unless @_dispatch_to_dlq
182
+
183
+ true
187
184
  end
188
185
 
189
186
  # @return [Boolean] should we use a transaction to move the data to the DLQ.
@@ -192,6 +189,35 @@ module Karafka
192
189
  def dispatch_in_a_transaction?
193
190
  producer.transactional? && topic.dead_letter_queue.transactional?
194
191
  end
192
+
193
+ # Runs the DLQ strategy and based on it it performs certain operations
194
+ #
195
+ # In case of `:skip` and `:dispatch` will run the exact flow provided in a block
196
+ # In case of `:retry` always `#retry_after_pause` is applied
197
+ def apply_dlq_flow
198
+ flow = topic.dead_letter_queue.strategy.call(errors_tracker, attempt)
199
+
200
+ case flow
201
+ when :retry
202
+ retry_after_pause
203
+
204
+ return
205
+ when :skip
206
+ @_dispatch_to_dlq = false
207
+ when :dispatch
208
+ @_dispatch_to_dlq = true
209
+ else
210
+ raise Karafka::UnsupportedCaseError, flow
211
+ end
212
+
213
+ # We reset the pause to indicate we will now consider it as "ok".
214
+ coordinator.pause_tracker.reset
215
+
216
+ yield
217
+
218
+ # Always backoff after DLQ dispatch even on skip to prevent overloads on errors
219
+ pause(coordinator.seek_offset, nil, false)
220
+ end
195
221
  end
196
222
  end
197
223
  end
@@ -42,14 +42,10 @@ module Karafka
42
42
  mark_as_consumed(last_group_message)
43
43
 
44
44
  handle_post_filtering
45
- elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
46
- retry_after_pause
47
45
  else
48
- coordinator.pause_tracker.reset
49
-
50
- dispatch_if_needed_and_mark_as_consumed
51
-
52
- pause(coordinator.seek_offset, nil, false)
46
+ apply_dlq_flow do
47
+ dispatch_if_needed_and_mark_as_consumed
48
+ end
53
49
  end
54
50
  end
55
51
  end
@@ -53,16 +53,12 @@ module Karafka
53
53
  else
54
54
  resume
55
55
  end
56
- elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
57
- retry_after_pause
58
56
  else
59
- coordinator.pause_tracker.reset
60
-
61
- return resume if revoked?
57
+ apply_dlq_flow do
58
+ return resume if revoked?
62
59
 
63
- dispatch_if_needed_and_mark_as_consumed
64
-
65
- pause(coordinator.seek_offset, nil, false)
60
+ dispatch_if_needed_and_mark_as_consumed
61
+ end
66
62
  end
67
63
  end
68
64
  end
@@ -48,18 +48,15 @@ module Karafka
48
48
  else
49
49
  resume
50
50
  end
51
- elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
52
- retry_after_pause
53
51
  else
54
- coordinator.pause_tracker.reset
55
-
56
- return resume if revoked?
52
+ apply_dlq_flow do
53
+ return resume if revoked?
57
54
 
58
- skippable_message, _marked = find_skippable_message
59
- dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
55
+ skippable_message, _marked = find_skippable_message
56
+ dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
60
57
 
61
- coordinator.seek_offset = skippable_message.offset + 1
62
- pause(coordinator.seek_offset, nil, false)
58
+ coordinator.seek_offset = skippable_message.offset + 1
59
+ end
63
60
  end
64
61
  end
65
62
  end
@@ -41,23 +41,13 @@ module Karafka
41
41
  return if coordinator.manual_pause?
42
42
 
43
43
  handle_post_filtering
44
- elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
45
- retry_after_pause
46
- # If we've reached number of retries that we could, we need to skip the first
47
- # message that was not marked as consumed, pause and continue, while also moving
48
- # this message to the dead topic.
49
- #
50
- # For a Mom setup, this means, that user has to manage the checkpointing by
51
- # himself. If no checkpointing is ever done, we end up with an endless loop.
52
44
  else
53
- # We reset the pause to indicate we will now consider it as "ok".
54
- coordinator.pause_tracker.reset
55
-
56
- skippable_message, _marked = find_skippable_message
57
- dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
45
+ apply_dlq_flow do
46
+ skippable_message, _marked = find_skippable_message
47
+ dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
58
48
 
59
- coordinator.seek_offset = skippable_message.offset + 1
60
- pause(coordinator.seek_offset, nil, false)
49
+ coordinator.seek_offset = skippable_message.offset + 1
50
+ end
61
51
  end
62
52
  end
63
53
  end
@@ -42,16 +42,12 @@ module Karafka
42
42
  seek(coordinator.seek_offset, false) unless revoked? || coordinator.manual_seek?
43
43
 
44
44
  resume
45
- elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
46
- retry_after_pause
47
45
  else
48
- coordinator.pause_tracker.reset
49
-
50
- return resume if revoked?
51
-
52
- dispatch_if_needed_and_mark_as_consumed
46
+ apply_dlq_flow do
47
+ return resume if revoked?
53
48
 
54
- pause(coordinator.seek_offset, nil, false)
49
+ dispatch_if_needed_and_mark_as_consumed
50
+ end
55
51
  end
56
52
  end
57
53
  end
@@ -42,18 +42,15 @@ module Karafka
42
42
  end
43
43
 
44
44
  resume
45
- elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
46
- retry_after_pause
47
45
  else
48
- coordinator.pause_tracker.reset
49
-
50
- return resume if revoked?
46
+ apply_dlq_flow do
47
+ return resume if revoked?
51
48
 
52
- skippable_message, _marked = find_skippable_message
53
- dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
49
+ skippable_message, _marked = find_skippable_message
50
+ dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
54
51
 
55
- coordinator.seek_offset = skippable_message.offset + 1
56
- pause(coordinator.seek_offset, nil, false)
52
+ coordinator.seek_offset = skippable_message.offset + 1
53
+ end
57
54
  end
58
55
  end
59
56
  end
@@ -35,28 +35,18 @@ module Karafka
35
35
 
36
36
  if coordinator.success?
37
37
  coordinator.pause_tracker.reset
38
- elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
39
- retry_after_pause
40
- # If we've reached number of retries that we could, we need to skip the first
41
- # message that was not marked as consumed, pause and continue, while also moving
42
- # this message to the dead topic.
43
- #
44
- # For a Mom setup, this means, that user has to manage the checkpointing by
45
- # himself. If no checkpointing is ever done, we end up with an endless loop.
46
38
  else
47
- # We reset the pause to indicate we will now consider it as "ok".
48
- coordinator.pause_tracker.reset
49
-
50
- skippable_message, = find_skippable_message
51
- dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
39
+ apply_dlq_flow do
40
+ skippable_message, = find_skippable_message
41
+ dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
52
42
 
53
- # Save the next offset we want to go with after moving given message to DLQ
54
- # Without this, we would not be able to move forward and we would end up
55
- # in an infinite loop trying to un-pause from the message we've already processed
56
- # Of course, since it's a MoM a rebalance or kill, will move it back as no
57
- # offsets are being committed
58
- coordinator.seek_offset = skippable_message.offset + 1
59
- pause(coordinator.seek_offset, nil, false)
43
+ # Save the next offset we want to go with after moving given message to DLQ
44
+ # Without this, we would not be able to move forward and we would end up
45
+ # in an infinite loop trying to un-pause from the message we've already
46
+ # processed. Of course, since it's a MoM a rebalance or kill, will move it back
47
+ # as no offsets are being committed
48
+ coordinator.seek_offset = skippable_message.offset + 1
49
+ end
60
50
  end
61
51
  end
62
52
  end
@@ -155,6 +155,13 @@ module Karafka
155
155
  def handle_before_schedule_consume
156
156
  super
157
157
 
158
+ # We should not register offsets in virtual manager when in collapse as virtual
159
+ # manager is not used then for offsets materialization.
160
+ #
161
+ # If we would do so, it would cause increased storage in cases of endless errors
162
+ # that are being retried in collapse without a DLQ.
163
+ return if collapsed?
164
+
158
165
  coordinator.virtual_offset_manager.register(
159
166
  messages.map(&:offset)
160
167
  )
@@ -28,6 +28,12 @@ module Karafka
28
28
  ).fetch('en').fetch('validations').fetch('topic')
29
29
  end
30
30
 
31
+ nested(:dead_letter_queue) do
32
+ # We use strategy based DLQ for every case in Pro
33
+ # For default (when no strategy) a default `max_retries` based strategy is used
34
+ required(:strategy) { |val| val.respond_to?(:call) }
35
+ end
36
+
31
37
  # Make sure that when we use virtual partitions with DLQ, at least one retry is set
32
38
  # We cannot use VP with DLQ without retries as we in order to provide ordering
33
39
  # warranties on errors with VP, we need to collapse the VPs concurrency and retry
@@ -0,0 +1,39 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This Karafka component is a Pro component under a commercial license.
4
+ # This Karafka component is NOT licensed under LGPL.
5
+ #
6
+ # All of the commercial components are present in the lib/karafka/pro directory of this
7
+ # repository and their usage requires commercial license agreement.
8
+ #
9
+ # Karafka has also commercial-friendly license, commercial support and commercial components.
10
+ #
11
+ # By sending a pull request to the pro components, you are agreeing to transfer the copyright of
12
+ # your code to Maciej Mensfeld.
13
+
14
+ module Karafka
15
+ module Pro
16
+ module Routing
17
+ module Features
18
+ class DeadLetterQueue < Base
19
+ # Expansions to the topic API in DLQ
20
+ module Topic
21
+ # @param strategy [#call, nil] Strategy we want to use or nil if a default strategy
22
+ # (same as in OSS) should be applied
23
+ # @param args [Hash] OSS DLQ arguments
24
+ def dead_letter_queue(strategy: nil, **args)
25
+ return @dead_letter_queue if @dead_letter_queue
26
+
27
+ super(**args).tap do |config|
28
+ # If explicit strategy is not provided, use the default approach from OSS
29
+ config.strategy = strategy || lambda do |_errors_tracker, attempt|
30
+ attempt > config.max_retries ? :dispatch : :retry
31
+ end
32
+ end
33
+ end
34
+ end
35
+ end
36
+ end
37
+ end
38
+ end
39
+ end