karafka 2.0.38 → 2.0.39

Sign up to get free protection for your applications and to get access to all the features.
Files changed (113) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.github/workflows/ci.yml +1 -1
  4. data/.ruby-version +1 -1
  5. data/CHANGELOG.md +28 -0
  6. data/Gemfile.lock +4 -4
  7. data/bin/integrations +1 -1
  8. data/config/locales/errors.yml +0 -7
  9. data/config/locales/pro_errors.yml +18 -0
  10. data/lib/karafka/base_consumer.rb +35 -55
  11. data/lib/karafka/connection/listener.rb +15 -10
  12. data/lib/karafka/errors.rb +0 -3
  13. data/lib/karafka/instrumentation/logger_listener.rb +44 -3
  14. data/lib/karafka/instrumentation/notifications.rb +4 -0
  15. data/lib/karafka/pro/active_job/consumer.rb +10 -1
  16. data/lib/karafka/pro/processing/coordinator.rb +13 -4
  17. data/lib/karafka/pro/processing/filters/base.rb +61 -0
  18. data/lib/karafka/pro/processing/filters/delayer.rb +70 -0
  19. data/lib/karafka/pro/processing/filters/expirer.rb +51 -0
  20. data/lib/karafka/pro/processing/filters/throttler.rb +84 -0
  21. data/lib/karafka/pro/processing/filters_applier.rb +100 -0
  22. data/lib/karafka/pro/processing/jobs_builder.rb +7 -3
  23. data/lib/karafka/pro/processing/scheduler.rb +24 -7
  24. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom.rb +68 -0
  25. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom_vp.rb +74 -0
  26. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_mom.rb +72 -0
  27. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_mom_vp.rb +76 -0
  28. data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom.rb +62 -0
  29. data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom_vp.rb +68 -0
  30. data/lib/karafka/pro/processing/strategies/aj/dlq_mom.rb +64 -0
  31. data/lib/karafka/pro/processing/strategies/aj/dlq_mom_vp.rb +69 -0
  32. data/lib/karafka/pro/processing/strategies/aj/ftr_lrj_mom.rb +38 -0
  33. data/lib/karafka/pro/processing/strategies/aj/ftr_lrj_mom_vp.rb +64 -0
  34. data/lib/karafka/pro/processing/strategies/aj/ftr_mom.rb +38 -0
  35. data/lib/karafka/pro/processing/strategies/aj/ftr_mom_vp.rb +58 -0
  36. data/lib/karafka/pro/processing/strategies/{dlq_lrj_vp.rb → aj/lrj_mom.rb} +14 -13
  37. data/lib/karafka/pro/processing/strategies/aj/lrj_mom_vp.rb +77 -0
  38. data/lib/karafka/pro/processing/strategies/aj/mom.rb +36 -0
  39. data/lib/karafka/pro/processing/strategies/aj/mom_vp.rb +52 -0
  40. data/lib/karafka/pro/processing/strategies/dlq/default.rb +131 -0
  41. data/lib/karafka/pro/processing/strategies/dlq/ftr.rb +61 -0
  42. data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj.rb +75 -0
  43. data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_mom.rb +74 -0
  44. data/lib/karafka/pro/processing/strategies/{mom.rb → dlq/ftr_lrj_vp.rb} +16 -19
  45. data/lib/karafka/pro/processing/strategies/dlq/ftr_mom.rb +73 -0
  46. data/lib/karafka/pro/processing/strategies/dlq/ftr_vp.rb +39 -0
  47. data/lib/karafka/pro/processing/strategies/dlq/lrj.rb +63 -0
  48. data/lib/karafka/pro/processing/strategies/dlq/lrj_mom.rb +66 -0
  49. data/lib/karafka/pro/processing/strategies/dlq/lrj_vp.rb +38 -0
  50. data/lib/karafka/pro/processing/strategies/dlq/mom.rb +67 -0
  51. data/lib/karafka/pro/processing/strategies/dlq/vp.rb +39 -0
  52. data/lib/karafka/pro/processing/strategies/ftr/default.rb +104 -0
  53. data/lib/karafka/pro/processing/strategies/ftr/vp.rb +40 -0
  54. data/lib/karafka/pro/processing/strategies/lrj/default.rb +85 -0
  55. data/lib/karafka/pro/processing/strategies/lrj/ftr.rb +69 -0
  56. data/lib/karafka/pro/processing/strategies/lrj/ftr_mom.rb +67 -0
  57. data/lib/karafka/pro/processing/strategies/{vp.rb → lrj/ftr_vp.rb} +15 -13
  58. data/lib/karafka/pro/processing/strategies/lrj/mom.rb +78 -0
  59. data/lib/karafka/pro/processing/strategies/{aj_lrj_mom.rb → lrj/vp.rb} +13 -12
  60. data/lib/karafka/pro/processing/strategies/mom/default.rb +46 -0
  61. data/lib/karafka/pro/processing/strategies/mom/ftr.rb +53 -0
  62. data/lib/karafka/pro/processing/strategies/vp/default.rb +53 -0
  63. data/lib/karafka/pro/processing/{strategies/lrj_vp.rb → strategies.rb} +1 -13
  64. data/lib/karafka/pro/processing/strategy_selector.rb +44 -18
  65. data/lib/karafka/pro/{processing/strategies/aj_mom.rb → routing/features/delaying/config.rb} +7 -13
  66. data/lib/karafka/pro/routing/features/delaying/contract.rb +38 -0
  67. data/lib/karafka/pro/routing/features/delaying/topic.rb +59 -0
  68. data/lib/karafka/pro/routing/features/delaying.rb +29 -0
  69. data/lib/karafka/pro/routing/features/expiring/config.rb +27 -0
  70. data/lib/karafka/pro/routing/features/expiring/contract.rb +38 -0
  71. data/lib/karafka/pro/routing/features/expiring/topic.rb +59 -0
  72. data/lib/karafka/pro/routing/features/expiring.rb +27 -0
  73. data/lib/karafka/pro/routing/features/filtering/config.rb +40 -0
  74. data/lib/karafka/pro/routing/features/filtering/contract.rb +41 -0
  75. data/lib/karafka/pro/routing/features/filtering/topic.rb +51 -0
  76. data/lib/karafka/pro/routing/features/filtering.rb +27 -0
  77. data/lib/karafka/pro/routing/features/long_running_job/contract.rb +1 -1
  78. data/lib/karafka/pro/routing/features/throttling/config.rb +32 -0
  79. data/lib/karafka/pro/routing/features/throttling/contract.rb +41 -0
  80. data/lib/karafka/pro/routing/features/throttling/topic.rb +69 -0
  81. data/lib/karafka/pro/routing/features/throttling.rb +30 -0
  82. data/lib/karafka/processing/coordinator.rb +60 -30
  83. data/lib/karafka/processing/coordinators_buffer.rb +5 -1
  84. data/lib/karafka/processing/executor.rb +23 -16
  85. data/lib/karafka/processing/executors_buffer.rb +10 -26
  86. data/lib/karafka/processing/jobs/consume.rb +2 -4
  87. data/lib/karafka/processing/jobs/idle.rb +24 -0
  88. data/lib/karafka/processing/jobs_builder.rb +2 -3
  89. data/lib/karafka/processing/result.rb +5 -0
  90. data/lib/karafka/processing/strategies/aj_dlq_mom.rb +1 -1
  91. data/lib/karafka/processing/strategies/base.rb +5 -0
  92. data/lib/karafka/processing/strategies/default.rb +50 -0
  93. data/lib/karafka/processing/strategies/dlq.rb +13 -4
  94. data/lib/karafka/processing/strategies/dlq_mom.rb +8 -3
  95. data/lib/karafka/processing/strategy_selector.rb +27 -10
  96. data/lib/karafka/version.rb +1 -1
  97. data/renovate.json +6 -0
  98. data.tar.gz.sig +0 -0
  99. metadata +66 -22
  100. metadata.gz.sig +0 -0
  101. data/lib/karafka/pro/processing/strategies/aj_dlq_lrj_mom.rb +0 -42
  102. data/lib/karafka/pro/processing/strategies/aj_dlq_lrj_mom_vp.rb +0 -70
  103. data/lib/karafka/pro/processing/strategies/aj_dlq_mom.rb +0 -62
  104. data/lib/karafka/pro/processing/strategies/aj_dlq_mom_vp.rb +0 -68
  105. data/lib/karafka/pro/processing/strategies/aj_lrj_mom_vp.rb +0 -75
  106. data/lib/karafka/pro/processing/strategies/aj_mom_vp.rb +0 -62
  107. data/lib/karafka/pro/processing/strategies/dlq.rb +0 -120
  108. data/lib/karafka/pro/processing/strategies/dlq_lrj.rb +0 -65
  109. data/lib/karafka/pro/processing/strategies/dlq_lrj_mom.rb +0 -62
  110. data/lib/karafka/pro/processing/strategies/dlq_mom.rb +0 -62
  111. data/lib/karafka/pro/processing/strategies/dlq_vp.rb +0 -37
  112. data/lib/karafka/pro/processing/strategies/lrj.rb +0 -83
  113. data/lib/karafka/pro/processing/strategies/lrj_mom.rb +0 -73
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 1b9653385cf5a3b1e27eae06d53b9761c9a1f265252f721773258459eb3df1e7
4
- data.tar.gz: c0af983ab0539e8463bf2612068a6b261de1325078c3e8600b0d6df0f596d100
3
+ metadata.gz: 12fe8a47dc0ab16b0f7783424cd1aa043c2d2b228b4f4164f1cecefe604269d9
4
+ data.tar.gz: 9fa3bae282770dd67503c41ef4b73a27a38bfcff3bf472ddd63753d14d03614f
5
5
  SHA512:
6
- metadata.gz: d9000a8f71d7fff762db5f567956f6ea68e436b428014c509ae233730c9f75fd6ac311e51b0022999dfdce64362c86dab6912ce549378d9def231e5749961140
7
- data.tar.gz: f980261b5ada2f46efbf919aac86ab63da5bccce26639b9e7d98c07c6012cc3c727189a548627687092ee2802aca8df3d5459bcdcc8d9d29b35f2d6da92a64fc
6
+ metadata.gz: 9e6536c90a411a0b42337f73c00d9f454028366f42eabb1b7f40902181bcbcfd43258741d6fc51c6e29046b9ee1f8598755440d28a00ca96104a61a8095c20c2
7
+ data.tar.gz: be75dd1bfa744187f770f2e1f0deeedfba4f3fb1b824d6bab91f056f96e60a33498429e35ea22841404def0935e584db0df4289d2818631ace2e597d28785960
checksums.yaml.gz.sig CHANGED
Binary file
@@ -62,7 +62,7 @@ jobs:
62
62
  run: \curl -sSL https://api.coditsu.io/run/ci | bash
63
63
 
64
64
  specs:
65
- timeout-minutes: 30
65
+ timeout-minutes: 45
66
66
  runs-on: ubuntu-latest
67
67
  needs: diffend
68
68
  strategy:
data/.ruby-version CHANGED
@@ -1 +1 @@
1
- 3.2.1
1
+ 3.2.2
data/CHANGELOG.md CHANGED
@@ -1,5 +1,33 @@
1
1
  # Karafka framework changelog
2
2
 
3
+ ## 2.0.39 (2023-04-11)
4
+ - **[Feature]** Provide ability to throttle/limit number of messages processed in a time unit (#1203)
5
+ - **[Feature]** Provide Delayed Topics (#1000)
6
+ - **[Feature]** Provide ability to expire messages (expiring topics)
7
+ - **[Feature]** Provide ability to apply filters after messages are polled and before enqueued. This is a generic filter API for any usage.
8
+ - [Improvement] When using ActiveJob with Virtual Partitions, Karafka will stop if collectively VPs are failing. This minimizes number of jobs that will be collectively re-processed.
9
+ - [Improvement] `#retrying?` method has been added to consumers to provide ability to check, that we're reprocessing data after a failure. This is useful for branching out processing based on errors.
10
+ - [Improvement] Track active_job_id in instrumentation (#1372)
11
+ - [Improvement] Introduce new housekeeping job type called `Idle` for non-consumption execution flows.
12
+ - [Improvement] Change how a manual offset management works with Long-Running Jobs. Use the last message offset to move forward instead of relying on the last message marked as consumed for a scenario where no message is marked.
13
+ - [Improvement] Prioritize in Pro non-consumption jobs execution over consumption despite LJF. This will ensure, that housekeeping as well as other non-consumption events are not saturated when running a lot of work.
14
+ - [Improvement] Normalize the DLQ behaviour with MoM. Always pause on dispatch for all the strategies.
15
+ - [Improvement] Improve the manual offset management and DLQ behaviour when no markings occur for OSS.
16
+ - [Improvement] Do not early stop ActiveJob work running under virtual partitions to prevent extensive reprocessing.
17
+ - [Improvement] Drastically increase number of scenarios covered by integration specs (OSS and Pro).
18
+ - [Improvement] Introduce a `Coordinator#synchronize` lock for cross virtual partitions operations.
19
+ - [Fix] Do not resume partition that is not paused.
20
+ - [Fix] Fix `LoggerListener` cases where logs would not include caller id (when available)
21
+ - [Fix] Fix not working benchmark tests.
22
+ - [Fix] Fix a case where when using manual offset management with a user pause would ignore the pause and seek to the next message.
23
+ - [Fix] Fix a case where dead letter queue would go into an infinite loop on message with first ever offset if the first ever offset would not recover.
24
+ - [Fix] Make sure to resume always for all LRJ strategies on revocation.
25
+ - [Refactor] Make sure that coordinator is topic aware. Needed for throttling, delayed processing and expired jobs.
26
+ - [Refactor] Put Pro strategies into namespaces to better organize multiple combinations.
27
+ - [Refactor] Do not rely on messages metadata for internal topic and partition operations like `#seek` so they can run independently from the consumption flow.
28
+ - [Refactor] Hold a single topic/partition reference on a coordinator instead of in executor, coordinator and consumer.
29
+ - [Refactor] Move `#mark_as_consumed` and `#mark_as_consumed!`into `Strategies::Default` to be able to introduce marking for virtual partitions.
30
+
3
31
  ## 2.0.38 (2023-03-27)
4
32
  - [Improvement] Introduce `Karafka::Admin#read_watermark_offsets` to get low and high watermark offsets values.
5
33
  - [Improvement] Track active_job_id in instrumentation (#1372)
data/Gemfile.lock CHANGED
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- karafka (2.0.38)
4
+ karafka (2.0.39)
5
5
  karafka-core (>= 2.0.12, < 3.0.0)
6
6
  thor (>= 0.20)
7
7
  waterdrop (>= 2.4.10, < 3.0.0)
@@ -48,7 +48,7 @@ GEM
48
48
  rspec-expectations (3.12.2)
49
49
  diff-lcs (>= 1.2.0, < 2.0)
50
50
  rspec-support (~> 3.12.0)
51
- rspec-mocks (3.12.3)
51
+ rspec-mocks (3.12.5)
52
52
  diff-lcs (>= 1.2.0, < 2.0)
53
53
  rspec-support (~> 3.12.0)
54
54
  rspec-support (3.12.0)
@@ -61,7 +61,7 @@ GEM
61
61
  thor (1.2.1)
62
62
  tzinfo (2.0.6)
63
63
  concurrent-ruby (~> 1.0)
64
- waterdrop (2.5.0)
64
+ waterdrop (2.5.1)
65
65
  karafka-core (>= 2.0.12, < 3.0.0)
66
66
  zeitwerk (~> 2.3)
67
67
  zeitwerk (2.6.7)
@@ -79,4 +79,4 @@ DEPENDENCIES
79
79
  simplecov
80
80
 
81
81
  BUNDLED WITH
82
- 2.4.7
82
+ 2.4.10
data/bin/integrations CHANGED
@@ -25,7 +25,7 @@ ROOT_PATH = Pathname.new(File.expand_path(File.join(File.dirname(__FILE__), '../
25
25
  # we limit it. Locally we can run a lot of those, as many of them have sleeps and do not use a lot
26
26
  # of CPU. Locally we also cannot go beyond certain limit due to how often and how many topics we
27
27
  # create in Kafka. With an overloaded system, we start getting timeouts.
28
- CONCURRENCY = ENV.key?('CI') ? 4 : Etc.nprocessors * 2
28
+ CONCURRENCY = ENV.key?('CI') ? 5 : Etc.nprocessors * 3
29
29
 
30
30
  # How may bytes do we want to keep from the stdout in the buffer for when we need to print it
31
31
  MAX_BUFFER_OUTPUT = 51_200
@@ -72,10 +72,3 @@ en:
72
72
  test:
73
73
  missing: needs to be present
74
74
  id_format: needs to be a String
75
-
76
- pro_topic:
77
- virtual_partitions.partitioner_respond_to_call: needs to be defined and needs to respond to `#call`
78
- virtual_partitions.max_partitions_format: needs to be equal or more than 1
79
- manual_offset_management_not_with_virtual_partitions: cannot be used together with Virtual Partitions
80
- long_running_job.active_format: needs to be either true or false
81
- dead_letter_queue_not_with_virtual_partitions: cannot be used together with Virtual Partitions
@@ -3,10 +3,28 @@ en:
3
3
  topic:
4
4
  virtual_partitions.partitioner_respond_to_call: needs to be defined and needs to respond to `#call`
5
5
  virtual_partitions.max_partitions_format: needs to be equal or more than 1
6
+
6
7
  manual_offset_management_not_with_virtual_partitions: cannot be used together with Virtual Partitions
8
+
7
9
  long_running_job.active_format: needs to be either true or false
10
+
8
11
  dead_letter_queue_with_virtual_partitions: when using Dead Letter Queue with Virtual Partitions, at least one retry is required.
9
12
 
13
+ throttling.active_format: needs to be either true or false
14
+ throttling.limit_format: needs to be equal or more than 1
15
+ throttling.interval_format: needs to be equal or more than 1
16
+
17
+ filtering.active_missing: needs to be present
18
+ filtering.factory_format: 'needs to respond to #call'
19
+ filtering.factories_format: 'needs to contain only factories responding to #call'
20
+ filtering.active_format: 'needs to be boolean'
21
+
22
+ expiring.ttl_format: 'needs to be equal or more than 0 and an integer'
23
+ expiring.active_format: 'needs to be boolean'
24
+
25
+ delaying.delay_format: 'needs to be equal or more than 0 and an integer'
26
+ delaying.active_format: 'needs to be boolean'
27
+
10
28
  config:
11
29
  encryption.active_format: 'needs to be either true or false'
12
30
  encryption.public_key_invalid: 'is not a valid public RSA key'
@@ -7,11 +7,13 @@ module Karafka
7
7
  # Allow for consumer instance tagging for instrumentation
8
8
  include ::Karafka::Core::Taggable
9
9
 
10
+ extend Forwardable
11
+
12
+ def_delegators :@coordinator, :topic, :partition
13
+
10
14
  # @return [String] id of the current consumer
11
15
  attr_reader :id
12
16
  # @return [Karafka::Routing::Topic] topic to which a given consumer is subscribed
13
- attr_accessor :topic
14
- # @return [Karafka::Messages::Messages] current messages batch
15
17
  attr_accessor :messages
16
18
  # @return [Karafka::Connection::Client] kafka connection client
17
19
  attr_accessor :client
@@ -97,6 +99,20 @@ module Karafka
97
99
  )
98
100
  end
99
101
 
102
+ # Trigger method for running on idle runs without messages
103
+ #
104
+ # @private
105
+ def on_idle
106
+ handle_idle
107
+ rescue StandardError => e
108
+ Karafka.monitor.instrument(
109
+ 'error.occurred',
110
+ error: e,
111
+ caller: self,
112
+ type: 'consumer.idle.error'
113
+ )
114
+ end
115
+
100
116
  # Trigger method for running on partition revocation.
101
117
  #
102
118
  # @private
@@ -143,51 +159,6 @@ module Karafka
143
159
  # some teardown procedures (closing file handler, etc).
144
160
  def shutdown; end
145
161
 
146
- # Marks message as consumed in an async way.
147
- #
148
- # @param message [Messages::Message] last successfully processed message.
149
- # @return [Boolean] true if we were able to mark the offset, false otherwise. False indicates
150
- # that we were not able and that we have lost the partition.
151
- #
152
- # @note We keep track of this offset in case we would mark as consumed and got error when
153
- # processing another message. In case like this we do not pause on the message we've already
154
- # processed but rather at the next one. This applies to both sync and async versions of this
155
- # method.
156
- def mark_as_consumed(message)
157
- # Ignore earlier offsets than the one we alread committed
158
- return true if coordinator.seek_offset > message.offset
159
-
160
- unless client.mark_as_consumed(message)
161
- coordinator.revoke
162
-
163
- return false
164
- end
165
-
166
- coordinator.seek_offset = message.offset + 1
167
-
168
- true
169
- end
170
-
171
- # Marks message as consumed in a sync way.
172
- #
173
- # @param message [Messages::Message] last successfully processed message.
174
- # @return [Boolean] true if we were able to mark the offset, false otherwise. False indicates
175
- # that we were not able and that we have lost the partition.
176
- def mark_as_consumed!(message)
177
- # Ignore earlier offsets than the one we alread committed
178
- return true if coordinator.seek_offset > message.offset
179
-
180
- unless client.mark_as_consumed!(message)
181
- coordinator.revoke
182
-
183
- return false
184
- end
185
-
186
- coordinator.seek_offset = message.offset + 1
187
-
188
- true
189
- end
190
-
191
162
  # Pauses processing on a given offset for the current topic partition
192
163
  #
193
164
  # After given partition is resumed, it will continue processing from the given offset
@@ -201,8 +172,8 @@ module Karafka
201
172
  timeout ? coordinator.pause_tracker.pause(timeout) : coordinator.pause_tracker.pause
202
173
 
203
174
  client.pause(
204
- messages.metadata.topic,
205
- messages.metadata.partition,
175
+ topic.name,
176
+ partition,
206
177
  offset
207
178
  )
208
179
 
@@ -213,8 +184,8 @@ module Karafka
213
184
  'consumer.consuming.pause',
214
185
  caller: self,
215
186
  manual: manual_pause,
216
- topic: messages.metadata.topic,
217
- partition: messages.metadata.partition,
187
+ topic: topic.name,
188
+ partition: partition,
218
189
  offset: offset,
219
190
  timeout: coordinator.pause_tracker.current_timeout,
220
191
  attempt: coordinator.pause_tracker.attempt
@@ -223,6 +194,8 @@ module Karafka
223
194
 
224
195
  # Resumes processing of the current topic partition
225
196
  def resume
197
+ return unless coordinator.pause_tracker.paused?
198
+
226
199
  # This is sufficient to expire a partition pause, as with it will be resumed by the listener
227
200
  # thread before the next poll.
228
201
  coordinator.pause_tracker.expire
@@ -234,8 +207,8 @@ module Karafka
234
207
  def seek(offset)
235
208
  client.seek(
236
209
  Karafka::Messages::Seek.new(
237
- messages.metadata.topic,
238
- messages.metadata.partition,
210
+ topic.name,
211
+ partition,
239
212
  offset
240
213
  )
241
214
  )
@@ -248,6 +221,13 @@ module Karafka
248
221
  coordinator.revoked?
249
222
  end
250
223
 
224
+ # @return [Boolean] are we retrying processing after an error. This can be used to provide a
225
+ # different flow after there is an error, for example for resources cleanup, small manual
226
+ # backoff or different instrumentation tracking.
227
+ def retrying?
228
+ coordinator.pause_tracker.attempt.positive?
229
+ end
230
+
251
231
  # Pauses the processing from the last offset to retry on given message
252
232
  # @private
253
233
  def retry_after_pause
@@ -258,8 +238,8 @@ module Karafka
258
238
  Karafka.monitor.instrument(
259
239
  'consumer.consuming.retry',
260
240
  caller: self,
261
- topic: messages.metadata.topic,
262
- partition: messages.metadata.partition,
241
+ topic: topic.name,
242
+ partition: partition,
263
243
  offset: coordinator.seek_offset,
264
244
  timeout: coordinator.pause_tracker.current_timeout,
265
245
  attempt: coordinator.pause_tracker.attempt
@@ -25,7 +25,7 @@ module Karafka
25
25
  @consumer_group_coordinator = consumer_group_coordinator
26
26
  @subscription_group = subscription_group
27
27
  @jobs_queue = jobs_queue
28
- @coordinators = Processing::CoordinatorsBuffer.new
28
+ @coordinators = Processing::CoordinatorsBuffer.new(subscription_group.topics)
29
29
  @client = Client.new(@subscription_group)
30
30
  @executors = Processing::ExecutorsBuffer.new(@client, subscription_group)
31
31
  @jobs_builder = proc_config.jobs_builder
@@ -234,7 +234,7 @@ module Karafka
234
234
  def build_and_schedule_shutdown_jobs
235
235
  jobs = []
236
236
 
237
- @executors.each do |_, _, executor|
237
+ @executors.each do |executor|
238
238
  job = @jobs_builder.shutdown(executor)
239
239
  job.before_enqueue
240
240
  jobs << job
@@ -263,20 +263,25 @@ module Karafka
263
263
 
264
264
  @messages_buffer.each do |topic, partition, messages|
265
265
  coordinator = @coordinators.find_or_create(topic, partition)
266
-
267
266
  # Start work coordination for this topic partition
268
267
  coordinator.start(messages)
269
268
 
270
- @partitioner.call(topic, messages, coordinator) do |group_id, partition_messages|
271
- # Count the job we're going to create here
272
- coordinator.increment
273
- executor = @executors.find_or_create(topic, partition, group_id)
274
- job = @jobs_builder.consume(executor, partition_messages, coordinator)
275
- job.before_enqueue
276
- jobs << job
269
+ # We do not increment coordinator for idle job because it's not a user related one
270
+ # and it will not go through a standard lifecycle. Same applies to revoked and shutdown
271
+ if messages.empty?
272
+ executor = @executors.find_or_create(topic, partition, 0, coordinator)
273
+ jobs << @jobs_builder.idle(executor)
274
+ else
275
+ @partitioner.call(topic, messages, coordinator) do |group_id, partition_messages|
276
+ executor = @executors.find_or_create(topic, partition, group_id, coordinator)
277
+ coordinator.increment
278
+ jobs << @jobs_builder.consume(executor, partition_messages)
279
+ end
277
280
  end
278
281
  end
279
282
 
283
+ jobs.each(&:before_enqueue)
284
+
280
285
  @scheduler.schedule_consumption(@jobs_queue, jobs)
281
286
  end
282
287
 
@@ -46,8 +46,5 @@ module Karafka
46
46
 
47
47
  # This should never happen. Please open an issue if it does.
48
48
  StrategyNotFoundError = Class.new(BaseError)
49
-
50
- # This should never happen. Please open an issue if it does.
51
- SkipMessageNotFoundError = Class.new(BaseError)
52
49
  end
53
50
  end
@@ -170,13 +170,51 @@ module Karafka
170
170
  #
171
171
  # @param event [Karafka::Core::Monitoring::Event] event details including payload
172
172
  def on_dead_letter_queue_dispatched(event)
173
+ consumer = event[:caller]
174
+ topic = consumer.topic.name
173
175
  message = event[:message]
174
176
  offset = message.offset
175
- topic = event[:caller].topic.name
176
- dlq_topic = event[:caller].topic.dead_letter_queue.topic
177
+ dlq_topic = consumer.topic.dead_letter_queue.topic
177
178
  partition = message.partition
178
179
 
179
- info "Dispatched message #{offset} from #{topic}/#{partition} to DLQ topic: #{dlq_topic}"
180
+ info <<~MSG.tr("\n", ' ').strip!
181
+ [#{consumer.id}] Dispatched message #{offset}
182
+ from #{topic}/#{partition}
183
+ to DLQ topic: #{dlq_topic}
184
+ MSG
185
+ end
186
+
187
+ # Logs info about throttling event
188
+ #
189
+ # @param event [Karafka::Core::Monitoring::Event] event details including payload
190
+ def on_filtering_throttled(event)
191
+ consumer = event[:caller]
192
+ topic = consumer.topic.name
193
+ # Here we get last message before throttle
194
+ message = event[:message]
195
+ partition = message.partition
196
+ offset = message.offset
197
+
198
+ info <<~MSG.tr("\n", ' ').strip!
199
+ [#{consumer.id}] Throttled and will resume
200
+ from message #{offset}
201
+ on #{topic}/#{partition}
202
+ MSG
203
+ end
204
+
205
+ # @param event [Karafka::Core::Monitoring::Event] event details including payload
206
+ def on_filtering_seek(event)
207
+ consumer = event[:caller]
208
+ topic = consumer.topic.name
209
+ # Message to which we seek
210
+ message = event[:message]
211
+ partition = message.partition
212
+ offset = message.offset
213
+
214
+ info <<~MSG.tr("\n", ' ').strip!
215
+ [#{consumer.id}] Post-filtering seeking to message #{offset}
216
+ on #{topic}/#{partition}
217
+ MSG
180
218
  end
181
219
 
182
220
  # There are many types of errors that can occur in many places, but we provide a single
@@ -203,6 +241,9 @@ module Karafka
203
241
  when 'consumer.after_consume.error'
204
242
  error "Consumer after consume failed due to an error: #{error}"
205
243
  error details
244
+ when 'consumer.idle.error'
245
+ error "Consumer idle failed due to an error: #{error}"
246
+ error details
206
247
  when 'consumer.shutdown.error'
207
248
  error "Consumer on shutdown failed due to an error: #{error}"
208
249
  error details
@@ -39,6 +39,7 @@ module Karafka
39
39
  consumer.consumed
40
40
  consumer.consuming.pause
41
41
  consumer.consuming.retry
42
+ consumer.idle
42
43
  consumer.revoke
43
44
  consumer.revoked
44
45
  consumer.shutting_down
@@ -46,6 +47,9 @@ module Karafka
46
47
 
47
48
  dead_letter_queue.dispatched
48
49
 
50
+ filtering.throttled
51
+ filtering.seek
52
+
49
53
  process.notice_signal
50
54
 
51
55
  statistics.emitted
@@ -29,7 +29,16 @@ module Karafka
29
29
  # If for any reason we've lost this partition, not worth iterating over new messages
30
30
  # as they are no longer ours
31
31
  break if revoked?
32
- break if Karafka::App.stopping?
32
+
33
+ # We cannot early stop when running virtual partitions because the intermediate state
34
+ # would force us not to commit the offsets. This would cause extensive
35
+ # double-processing
36
+ break if Karafka::App.stopping? && !topic.virtual_partitions?
37
+
38
+ # Break if we already know, that one of virtual partitions has failed and we will
39
+ # be restarting processing all together after all VPs are done. This will minimize
40
+ # number of jobs that will be re-processed
41
+ break if topic.virtual_partitions? && failing?
33
42
 
34
43
  consume_job(message)
35
44
 
@@ -17,6 +17,8 @@ module Karafka
17
17
  # Pro coordinator that provides extra orchestration methods useful for parallel processing
18
18
  # within the same partition
19
19
  class Coordinator < ::Karafka::Processing::Coordinator
20
+ attr_reader :filter
21
+
20
22
  # @param args [Object] anything the base coordinator accepts
21
23
  def initialize(*args)
22
24
  super
@@ -24,6 +26,7 @@ module Karafka
24
26
  @executed = []
25
27
  @flow_lock = Mutex.new
26
28
  @collapser = Collapser.new
29
+ @filter = FiltersApplier.new(self)
27
30
  end
28
31
 
29
32
  # Starts the coordination process
@@ -34,10 +37,10 @@ module Karafka
34
37
 
35
38
  @collapser.refresh!(messages.first.offset)
36
39
 
37
- @mutex.synchronize do
38
- @executed.clear
39
- @last_message = messages.last
40
- end
40
+ @filter.apply!(messages)
41
+
42
+ @executed.clear
43
+ @last_message = messages.last
41
44
  end
42
45
 
43
46
  # Sets the consumer failure status and additionally starts the collapse until
@@ -54,6 +57,12 @@ module Karafka
54
57
  @collapser.collapsed?
55
58
  end
56
59
 
60
+ # @return [Boolean] did any of the filters apply any logic that would cause use to run
61
+ # the filtering flow
62
+ def filtered?
63
+ @filter.applied?
64
+ end
65
+
57
66
  # @return [Boolean] is the coordinated work finished or not
58
67
  def finished?
59
68
  @running_jobs.zero?
@@ -0,0 +1,61 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This Karafka component is a Pro component under a commercial license.
4
+ # This Karafka component is NOT licensed under LGPL.
5
+ #
6
+ # All of the commercial components are present in the lib/karafka/pro directory of this
7
+ # repository and their usage requires commercial license agreement.
8
+ #
9
+ # Karafka has also commercial-friendly license, commercial support and commercial components.
10
+ #
11
+ # By sending a pull request to the pro components, you are agreeing to transfer the copyright of
12
+ # your code to Maciej Mensfeld.
13
+
14
+ module Karafka
15
+ module Pro
16
+ module Processing
17
+ module Filters
18
+ # Base for all the filters.
19
+ # All filters (including custom) need to use this API.
20
+ #
21
+ # Due to the fact, that filters can limit data in such a way, that we need to pause or
22
+ # seek (throttling for example), the api is not just "remove some things from batch" but
23
+ # also provides ways to control the post-filtering operations that may be needed.
24
+ class Base
25
+ # @return [Karafka::Messages::Message, nil] the message that we want to use as a cursor
26
+ # one to pause or seek or nil if not applicable.
27
+ attr_reader :cursor
28
+
29
+ include Karafka::Core::Helpers::Time
30
+
31
+ def initialize
32
+ @applied = false
33
+ @cursor = nil
34
+ end
35
+
36
+ # @param messages [Array<Karafka::Messages::Message>] array with messages. Please keep
37
+ # in mind, this may already be partial due to execution of previous filters.
38
+ def apply!(messages)
39
+ raise NotImplementedError, 'Implement in a subclass'
40
+ end
41
+
42
+ # @return [Symbol] filter post-execution action on consumer. Either `:skip`, `:pause` or
43
+ # `:seek`.
44
+ def action
45
+ :skip
46
+ end
47
+
48
+ # @return [Boolean] did this filter change messages in any way
49
+ def applied?
50
+ @applied
51
+ end
52
+
53
+ # @return [Integer] default timeout for pausing (if applicable)
54
+ def timeout
55
+ 0
56
+ end
57
+ end
58
+ end
59
+ end
60
+ end
61
+ end
@@ -0,0 +1,70 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This Karafka component is a Pro component under a commercial license.
4
+ # This Karafka component is NOT licensed under LGPL.
5
+ #
6
+ # All of the commercial components are present in the lib/karafka/pro directory of this
7
+ # repository and their usage requires commercial license agreement.
8
+ #
9
+ # Karafka has also commercial-friendly license, commercial support and commercial components.
10
+ #
11
+ # By sending a pull request to the pro components, you are agreeing to transfer the copyright of
12
+ # your code to Maciej Mensfeld.
13
+
14
+ module Karafka
15
+ module Pro
16
+ module Processing
17
+ module Filters
18
+ # A filter that allows us to delay processing by pausing until time is right.
19
+ class Delayer < Base
20
+ # @param delay [Integer] ms delay / minimum age of each message we want to process
21
+ def initialize(delay)
22
+ super()
23
+
24
+ @delay = delay
25
+ end
26
+
27
+ # Removes too old messages
28
+ #
29
+ # @param messages [Array<Karafka::Messages::Message>]
30
+ def apply!(messages)
31
+ @applied = false
32
+ @cursor = nil
33
+
34
+ # Time on message is in seconds with ms precision, so we need to convert the ttl that
35
+ # is in ms to this format
36
+ border = ::Time.now.utc - @delay / 1_000.to_f
37
+
38
+ messages.delete_if do |message|
39
+ too_young = message.timestamp > border
40
+
41
+ if too_young
42
+ @applied = true
43
+
44
+ @cursor ||= message
45
+ end
46
+
47
+ @applied
48
+ end
49
+ end
50
+
51
+ # @return [Integer] timeout delay in ms
52
+ def timeout
53
+ return 0 unless @cursor
54
+
55
+ timeout = (@delay / 1_000.to_f) - (::Time.now.utc - @cursor.timestamp)
56
+
57
+ timeout <= 0 ? 0 : timeout * 1_000
58
+ end
59
+
60
+ # @return [Symbol] action to take on post-filtering
61
+ def action
62
+ return :skip unless applied?
63
+
64
+ timeout <= 0 ? :seek : :pause
65
+ end
66
+ end
67
+ end
68
+ end
69
+ end
70
+ end