karafka 2.2.12 → 2.2.14

Sign up to get free protection for your applications and to get access to all the features.
Files changed (57) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/CHANGELOG.md +141 -121
  4. data/Gemfile.lock +10 -10
  5. data/config/locales/errors.yml +2 -1
  6. data/docker-compose.yml +2 -0
  7. data/lib/karafka/admin.rb +109 -3
  8. data/lib/karafka/app.rb +7 -0
  9. data/lib/karafka/base_consumer.rb +23 -30
  10. data/lib/karafka/connection/client.rb +13 -10
  11. data/lib/karafka/connection/consumer_group_coordinator.rb +3 -3
  12. data/lib/karafka/connection/listener.rb +18 -10
  13. data/lib/karafka/connection/listeners_batch.rb +6 -1
  14. data/lib/karafka/contracts/config.rb +2 -1
  15. data/lib/karafka/instrumentation/assignments_tracker.rb +96 -0
  16. data/lib/karafka/instrumentation/callbacks/rebalance.rb +10 -7
  17. data/lib/karafka/instrumentation/logger_listener.rb +0 -9
  18. data/lib/karafka/instrumentation/notifications.rb +6 -3
  19. data/lib/karafka/instrumentation/vendors/datadog/logger_listener.rb +2 -2
  20. data/lib/karafka/pro/instrumentation/performance_tracker.rb +85 -0
  21. data/lib/karafka/pro/loader.rb +3 -2
  22. data/lib/karafka/pro/processing/coordinator.rb +12 -6
  23. data/lib/karafka/pro/processing/jobs_queue.rb +109 -0
  24. data/lib/karafka/pro/processing/schedulers/base.rb +127 -0
  25. data/lib/karafka/pro/processing/schedulers/default.rb +109 -0
  26. data/lib/karafka/pro/processing/strategies/aj/lrj_mom_vp.rb +1 -1
  27. data/lib/karafka/pro/processing/strategies/default.rb +3 -1
  28. data/lib/karafka/pro/processing/strategies/lrj/default.rb +10 -1
  29. data/lib/karafka/pro/processing/strategies/lrj/mom.rb +1 -1
  30. data/lib/karafka/pro/processing/strategies/vp/default.rb +9 -5
  31. data/lib/karafka/processing/coordinator.rb +13 -7
  32. data/lib/karafka/processing/executor.rb +27 -3
  33. data/lib/karafka/processing/executors_buffer.rb +3 -3
  34. data/lib/karafka/processing/jobs/base.rb +19 -2
  35. data/lib/karafka/processing/jobs/consume.rb +3 -3
  36. data/lib/karafka/processing/jobs/idle.rb +5 -0
  37. data/lib/karafka/processing/jobs/revoked.rb +5 -0
  38. data/lib/karafka/processing/jobs/shutdown.rb +5 -0
  39. data/lib/karafka/processing/jobs_queue.rb +45 -17
  40. data/lib/karafka/processing/schedulers/default.rb +41 -0
  41. data/lib/karafka/processing/strategies/base.rb +13 -4
  42. data/lib/karafka/processing/strategies/default.rb +17 -5
  43. data/lib/karafka/processing/worker.rb +4 -1
  44. data/lib/karafka/routing/builder.rb +32 -17
  45. data/lib/karafka/routing/proxy.rb +4 -3
  46. data/lib/karafka/routing/subscription_group.rb +11 -6
  47. data/lib/karafka/routing/topics.rb +1 -1
  48. data/lib/karafka/runner.rb +1 -1
  49. data/lib/karafka/setup/config.rb +5 -1
  50. data/lib/karafka/version.rb +1 -1
  51. data/lib/karafka.rb +0 -1
  52. data.tar.gz.sig +0 -0
  53. metadata +8 -5
  54. metadata.gz.sig +0 -0
  55. data/lib/karafka/pro/performance_tracker.rb +0 -84
  56. data/lib/karafka/pro/processing/scheduler.rb +0 -75
  57. data/lib/karafka/processing/scheduler.rb +0 -22
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 2ee648826503a1b1841a97e368ec2894a16eadc3509270d2a5dbbbe9ee703b3a
4
- data.tar.gz: 915285e224ab6dcaa4b2f75e2b6aa52f4f6eb0f613b8b06efe78e2ef4ff3f514
3
+ metadata.gz: 69d8242fa695121f63b2e582d7a0b97f090d58f82047513c450f3b21107703b3
4
+ data.tar.gz: a9fb3db88cc6fbb3a25db24e95b8010a0b01f7ab09fc2f54d201e311581db9a5
5
5
  SHA512:
6
- metadata.gz: 1f7f109c533a98a46306be62a2172432f0d18af7003e401d3a894aa356bc2cae2622ba4c323bfdd230a66f0ae544a7cfb61ee0168b396e2e809e408a657eecb6
7
- data.tar.gz: d10de0ca361236c35bed27ca3c5db13e9e245805412f85c2d8d4e6a140fe088025403be7a65e1d97831613f02032bfe3fb2194c5ec7f6a880bc7ddc67a112813
6
+ metadata.gz: 83e22a8317f10328c11f3f4ac4c90109ecebb7f1ca0b089da2875c4e0700b58338adfb6b7c70e30df6fedecb26e2aaa4a11df347cc0bd898781adf709ad7a87c
7
+ data.tar.gz: 15eb23000600be7d2f2c49316ae8d3355ddef4ab2d9f75585a5b63ea0f8b27a87f473d8fe5fcf926a5815f319413541f6822fa15660fc7962bb5baf31771f00a
checksums.yaml.gz.sig CHANGED
Binary file
data/CHANGELOG.md CHANGED
@@ -1,19 +1,39 @@
1
1
  # Karafka framework changelog
2
2
 
3
+ ## 2.2.14 (2023-12-07)
4
+ - **[Feature]** Provide `Karafka::Admin#delete_consumer_group` and `Karafka::Admin#seek_consumer_group`.
5
+ - **[Feature]** Provide `Karafka::App.assignments` that will return real-time assignments tracking.
6
+ - [Enhancement] Make sure that the Scheduling API is thread-safe by default and allow for lock-less schedulers when schedulers are stateless.
7
+ - [Enhancement] "Blockless" topics with defaults
8
+ - [Enhancement] Provide a `finished?` method to the jobs for advanced reference based job schedulers.
9
+ - [Enhancement] Provide `client.reset` notification event.
10
+ - [Enhancement] Remove all usage of concurrent-ruby from Karafka
11
+ - [Change] Replace single #before_schedule with appropriate methods and events for scheduling various types of work. This is needed as we may run different framework logic on those and, second, for accurate job tracking with advanced schedulers.
12
+ - [Change] Rename `before_enqueue` to `before_schedule` to reflect what it does and when (internal).
13
+ - [Change] Remove not needed error catchers for strategies code. This code if errors, should be considered critical and should not be silenced.
14
+ - [Change] Remove not used notifications events.
15
+
16
+ ## 2.2.13 (2023-11-17)
17
+ - **[Feature]** Introduce low-level extended Scheduling API for granular control of schedulers and jobs execution [Pro].
18
+ - [Enhancement] Use separate lock for user-facing synchronization.
19
+ - [Enhancement] Instrument `consumer.before_enqueue`.
20
+ - [Enhancement] Limit usage of `concurrent-ruby` (plan to remove it as a dependency fully)
21
+ - [Enhancement] Provide `#synchronize` API same as in VPs for LRJs to allow for lifecycle events and consumption synchronization.
22
+
3
23
  ## 2.2.12 (2023-11-09)
4
- - [Improvement] Rewrite the polling engine to update statistics and error callbacks despite longer non LRJ processing or long `max_wait_time` setups. This change provides stability to the statistics and background error emitting making them time-reliable.
5
- - [Improvement] Auto-update Inline Insights if new insights are present for all consumers and not only LRJ (OSS and Pro).
6
- - [Improvement] Alias `#insights` with `#inline_insights` and `#insights?` with `#inline_insights?`
24
+ - [Enhancement] Rewrite the polling engine to update statistics and error callbacks despite longer non LRJ processing or long `max_wait_time` setups. This change provides stability to the statistics and background error emitting making them time-reliable.
25
+ - [Enhancement] Auto-update Inline Insights if new insights are present for all consumers and not only LRJ (OSS and Pro).
26
+ - [Enhancement] Alias `#insights` with `#inline_insights` and `#insights?` with `#inline_insights?`
7
27
 
8
28
  ## 2.2.11 (2023-11-03)
9
- - [Improvement] Allow marking as consumed in the user `#synchronize` block.
10
- - [Improvement] Make whole Pro VP marking as consumed concurrency safe for both async and sync scenarios.
11
- - [Improvement] Provide new alias to `karafka server`, that is: `karafka consumer`.
29
+ - [Enhancement] Allow marking as consumed in the user `#synchronize` block.
30
+ - [Enhancement] Make whole Pro VP marking as consumed concurrency safe for both async and sync scenarios.
31
+ - [Enhancement] Provide new alias to `karafka server`, that is: `karafka consumer`.
12
32
 
13
33
  ## 2.2.10 (2023-11-02)
14
- - [Improvement] Allow for running `#pause` without specifying the offset (provide offset or `:consecutive`). This allows for pausing on the consecutive message (last received + 1), so after resume we will get last message received + 1 effectively not using `#seek` and not purging `librdafka` buffer preserving on networking. Please be mindful that this uses notion of last message passed from **librdkafka**, and not the last one available in the consumer (`messages.last`). While for regular cases they will be the same, when using things like DLQ, LRJs, VPs or Filtering API, those may not be the same.
15
- - [Improvement] **Drastically** improve network efficiency of operating with LRJ by using the `:consecutive` offset as default strategy for running LRJs without moving the offset in place and purging the data.
16
- - [Improvement] Do not "seek in place". When pausing and/or seeking to the same location as the current position, do nothing not to purge buffers and not to move to the same place where we are.
34
+ - [Enhancement] Allow for running `#pause` without specifying the offset (provide offset or `:consecutive`). This allows for pausing on the consecutive message (last received + 1), so after resume we will get last message received + 1 effectively not using `#seek` and not purging `librdafka` buffer preserving on networking. Please be mindful that this uses notion of last message passed from **librdkafka**, and not the last one available in the consumer (`messages.last`). While for regular cases they will be the same, when using things like DLQ, LRJs, VPs or Filtering API, those may not be the same.
35
+ - [Enhancement] **Drastically** improve network efficiency of operating with LRJ by using the `:consecutive` offset as default strategy for running LRJs without moving the offset in place and purging the data.
36
+ - [Enhancement] Do not "seek in place". When pausing and/or seeking to the same location as the current position, do nothing not to purge buffers and not to move to the same place where we are.
17
37
  - [Fix] Pattern regexps should not be part of declaratives even when configured.
18
38
 
19
39
  ### Upgrade Notes
@@ -21,13 +41,13 @@
21
41
  In the latest Karafka release, there are no breaking changes. However, please note the updates to #pause and #seek. If you spot any issues, please report them immediately. Your feedback is crucial.
22
42
 
23
43
  ## 2.2.9 (2023-10-24)
24
- - [Improvement] Allow using negative offset references in `Karafka::Admin#read_topic`.
44
+ - [Enhancement] Allow using negative offset references in `Karafka::Admin#read_topic`.
25
45
  - [Change] Make sure that WaterDrop `2.6.10` or higher is used with this release to support transactions fully and the Web-UI.
26
46
 
27
47
  ## 2.2.8 (2023-10-20)
28
48
  - **[Feature]** Introduce Appsignal integration for errors and metrics tracking.
29
- - [Improvement] Expose `#synchronize` for VPs to allow for locks when cross-VP consumers work is needed.
30
- - [Improvement] Provide `#collapse_until!` direct consumer API to allow for collapsed virtual partitions consumer operations together with the Filtering API for advanced use-cases.
49
+ - [Enhancement] Expose `#synchronize` for VPs to allow for locks when cross-VP consumers work is needed.
50
+ - [Enhancement] Provide `#collapse_until!` direct consumer API to allow for collapsed virtual partitions consumer operations together with the Filtering API for advanced use-cases.
31
51
  - [Refactor] Reorganize how rebalance events are propagated from `librdkafka` to Karafka. Replace `connection.client.rebalance_callback` with `rebalance.partitions_assigned` and `rebalance.partitions_revoked`. Introduce two extra events: `rebalance.partitions_assign` and `rebalance.partitions_revoke` to handle pre-rebalance future work.
32
52
  - [Refactor] Remove `thor` as a CLI layer and rely on Ruby `OptParser`
33
53
 
@@ -129,31 +149,31 @@ If you want to maintain the `2.1` behavior, that is `karafka_admin` admin group,
129
149
 
130
150
  ## 2.1.9 (2023-08-06)
131
151
  - **[Feature]** Introduce ability to customize pause strategy on a per topic basis (Pro).
132
- - [Improvement] Disable the extensive messages logging in the default `karafka.rb` template.
152
+ - [Enhancement] Disable the extensive messages logging in the default `karafka.rb` template.
133
153
  - [Change] Require `waterdrop` `>= 2.6.6` due to extra `LoggerListener` API.
134
154
 
135
155
  ## 2.1.8 (2023-07-29)
136
- - [Improvement] Introduce `Karafka::BaseConsumer#used?` method to indicate, that at least one invocation of `#consume` took or will take place. This can be used as a replacement to the non-direct `messages.count` check for shutdown and revocation to ensure, that the consumption took place or is taking place (in case of running LRJ).
137
- - [Improvement] Make `messages#to_a` return copy of the underlying array to prevent scenarios, where the mutation impacts offset management.
138
- - [Improvement] Mitigate a librdkafka `cooperative-sticky` rebalance crash issue.
139
- - [Improvement] Provide ability to overwrite `consumer_persistence` per subscribed topic. This is mostly useful for plugins and extensions developers.
156
+ - [Enhancement] Introduce `Karafka::BaseConsumer#used?` method to indicate, that at least one invocation of `#consume` took or will take place. This can be used as a replacement to the non-direct `messages.count` check for shutdown and revocation to ensure, that the consumption took place or is taking place (in case of running LRJ).
157
+ - [Enhancement] Make `messages#to_a` return copy of the underlying array to prevent scenarios, where the mutation impacts offset management.
158
+ - [Enhancement] Mitigate a librdkafka `cooperative-sticky` rebalance crash issue.
159
+ - [Enhancement] Provide ability to overwrite `consumer_persistence` per subscribed topic. This is mostly useful for plugins and extensions developers.
140
160
  - [Fix] Fix a case where the performance tracker would crash in case of mutation of messages to an empty state.
141
161
 
142
162
  ## 2.1.7 (2023-07-22)
143
- - [Improvement] Always query for watermarks in the Iterator to improve the initial response time.
144
- - [Improvement] Add `max_wait_time` option to the Iterator.
163
+ - [Enhancement] Always query for watermarks in the Iterator to improve the initial response time.
164
+ - [Enhancement] Add `max_wait_time` option to the Iterator.
145
165
  - [Fix] Fix a case where `Admin#read_topic` would wait for poll interval on non-existing messages instead of early exit.
146
166
  - [Fix] Fix a case where Iterator with per partition offsets with negative lookups would go below the number of available messages.
147
167
  - [Fix] Remove unused constant from Admin module.
148
168
  - [Fix] Add missing `connection.client.rebalance_callback.error` to the `LoggerListener` instrumentation hook.
149
169
 
150
170
  ## 2.1.6 (2023-06-29)
151
- - [Improvement] Provide time support for iterator
152
- - [Improvement] Provide time support for admin `#read_topic`
153
- - [Improvement] Provide time support for consumer `#seek`.
154
- - [Improvement] Remove no longer needed locks for client operations.
155
- - [Improvement] Raise `Karafka::Errors::TopicNotFoundError` when trying to iterate over non-existing topic.
156
- - [Improvement] Ensure that Kafka multi-command operations run under mutex together.
171
+ - [Enhancement] Provide time support for iterator
172
+ - [Enhancement] Provide time support for admin `#read_topic`
173
+ - [Enhancement] Provide time support for consumer `#seek`.
174
+ - [Enhancement] Remove no longer needed locks for client operations.
175
+ - [Enhancement] Raise `Karafka::Errors::TopicNotFoundError` when trying to iterate over non-existing topic.
176
+ - [Enhancement] Ensure that Kafka multi-command operations run under mutex together.
157
177
  - [Change] Require `waterdrop` `>= 2.6.2`
158
178
  - [Change] Require `karafka-core` `>= 2.1.1`
159
179
  - [Refactor] Clean-up iterator code.
@@ -165,13 +185,13 @@ If you want to maintain the `2.1` behavior, that is `karafka_admin` admin group,
165
185
  - [Fix] Make sure, that `#pause` and `#resume` with one underlying connection do not race-condition.
166
186
 
167
187
  ## 2.1.5 (2023-06-19)
168
- - [Improvement] Drastically improve `#revoked?` response quality by checking the real time assignment lost state on librdkafka.
169
- - [Improvement] Improve eviction of saturated jobs that would run on already revoked assignments.
170
- - [Improvement] Expose `#commit_offsets` and `#commit_offsets!` methods in the consumer to provide ability to commit offsets directly to Kafka without having to mark new messages as consumed.
171
- - [Improvement] No longer skip offset commit when no messages marked as consumed as `librdkafka` has fixed the crashes there.
172
- - [Improvement] Remove no longer needed patches.
173
- - [Improvement] Ensure, that the coordinator revocation status is switched upon revocation detection when using `#revoked?`
174
- - [Improvement] Add benchmarks for marking as consumed (sync and async).
188
+ - [Enhancement] Drastically improve `#revoked?` response quality by checking the real time assignment lost state on librdkafka.
189
+ - [Enhancement] Improve eviction of saturated jobs that would run on already revoked assignments.
190
+ - [Enhancement] Expose `#commit_offsets` and `#commit_offsets!` methods in the consumer to provide ability to commit offsets directly to Kafka without having to mark new messages as consumed.
191
+ - [Enhancement] No longer skip offset commit when no messages marked as consumed as `librdkafka` has fixed the crashes there.
192
+ - [Enhancement] Remove no longer needed patches.
193
+ - [Enhancement] Ensure, that the coordinator revocation status is switched upon revocation detection when using `#revoked?`
194
+ - [Enhancement] Add benchmarks for marking as consumed (sync and async).
175
195
  - [Change] Require `karafka-core` `>= 2.1.0`
176
196
  - [Change] Require `waterdrop` `>= 2.6.1`
177
197
 
@@ -195,12 +215,12 @@ If you want to maintain the `2.1` behavior, that is `karafka_admin` admin group,
195
215
  - **[Feature]** Provide ability to use CurrentAttributes with ActiveJob's Karafka adapter (federicomoretti).
196
216
  - **[Feature]** Introduce collective Virtual Partitions offset management.
197
217
  - **[Feature]** Use virtual offsets to filter out messages that would be re-processed upon retries.
198
- - [Improvement] No longer break processing on failing parallel virtual partitions in ActiveJob because it is compensated by virtual marking.
199
- - [Improvement] Always use Virtual offset management for Pro ActiveJobs.
200
- - [Improvement] Do not attempt to mark offsets on already revoked partitions.
201
- - [Improvement] Make sure, that VP components are not injected into non VP strategies.
202
- - [Improvement] Improve complex strategies inheritance flow.
203
- - [Improvement] Optimize offset management for DLQ + MoM feature combinations.
218
+ - [Enhancement] No longer break processing on failing parallel virtual partitions in ActiveJob because it is compensated by virtual marking.
219
+ - [Enhancement] Always use Virtual offset management for Pro ActiveJobs.
220
+ - [Enhancement] Do not attempt to mark offsets on already revoked partitions.
221
+ - [Enhancement] Make sure, that VP components are not injected into non VP strategies.
222
+ - [Enhancement] Improve complex strategies inheritance flow.
223
+ - [Enhancement] Optimize offset management for DLQ + MoM feature combinations.
204
224
  - [Change] Removed `Karafka::Pro::BaseConsumer` in favor of `Karafka::BaseConsumer`. (#1345)
205
225
  - [Fix] Fix for `max_messages` and `max_wait_time` not having reference in errors.yml (#1443)
206
226
 
@@ -212,16 +232,16 @@ If you want to maintain the `2.1` behavior, that is `karafka_admin` admin group,
212
232
 
213
233
  ## 2.0.41 (2023-04-19)
214
234
  - **[Feature]** Provide `Karafka::Pro::Iterator` for anonymous topic/partitions iterations and messages lookups (#1389 and #1427).
215
- - [Improvement] Optimize topic lookup for `read_topic` admin method usage.
216
- - [Improvement] Report via `LoggerListener` information about the partition on which a given job has started and finished.
217
- - [Improvement] Slightly normalize the `LoggerListener` format. Always report partition related operations as followed: `TOPIC_NAME/PARTITION`.
218
- - [Improvement] Do not retry recovery from `unknown_topic_or_part` when Karafka is shutting down as there is no point and no risk of any data losses.
219
- - [Improvement] Report `client.software.name` and `client.software.version` according to `librdkafka` recommendation.
220
- - [Improvement] Report ten longest integration specs after the suite execution.
221
- - [Improvement] Prevent user originating errors related to statistics processing after listener loop crash from potentially crashing the listener loop and hanging Karafka process.
235
+ - [Enhancement] Optimize topic lookup for `read_topic` admin method usage.
236
+ - [Enhancement] Report via `LoggerListener` information about the partition on which a given job has started and finished.
237
+ - [Enhancement] Slightly normalize the `LoggerListener` format. Always report partition related operations as followed: `TOPIC_NAME/PARTITION`.
238
+ - [Enhancement] Do not retry recovery from `unknown_topic_or_part` when Karafka is shutting down as there is no point and no risk of any data losses.
239
+ - [Enhancement] Report `client.software.name` and `client.software.version` according to `librdkafka` recommendation.
240
+ - [Enhancement] Report ten longest integration specs after the suite execution.
241
+ - [Enhancement] Prevent user originating errors related to statistics processing after listener loop crash from potentially crashing the listener loop and hanging Karafka process.
222
242
 
223
243
  ## 2.0.40 (2023-04-13)
224
- - [Improvement] Introduce `Karafka::Messages::Messages#empty?` method to handle Idle related cases where shutdown or revocation would be called on an empty messages set. This method allows for checking if there are any messages in the messages batch.
244
+ - [Enhancement] Introduce `Karafka::Messages::Messages#empty?` method to handle Idle related cases where shutdown or revocation would be called on an empty messages set. This method allows for checking if there are any messages in the messages batch.
225
245
  - [Refactor] Require messages builder to accept partition and do not fetch it from messages.
226
246
  - [Refactor] Use empty messages set for internal APIs (Idle) (so there always is `Karafka::Messages::Messages`)
227
247
  - [Refactor] Allow for empty messages set initialization with -1001 and -1 on metadata (similar to `librdkafka`)
@@ -231,17 +251,17 @@ If you want to maintain the `2.1` behavior, that is `karafka_admin` admin group,
231
251
  - **[Feature]** Provide Delayed Topics (#1000)
232
252
  - **[Feature]** Provide ability to expire messages (expiring topics)
233
253
  - **[Feature]** Provide ability to apply filters after messages are polled and before enqueued. This is a generic filter API for any usage.
234
- - [Improvement] When using ActiveJob with Virtual Partitions, Karafka will stop if collectively VPs are failing. This minimizes number of jobs that will be collectively re-processed.
235
- - [Improvement] `#retrying?` method has been added to consumers to provide ability to check, that we're reprocessing data after a failure. This is useful for branching out processing based on errors.
236
- - [Improvement] Track active_job_id in instrumentation (#1372)
237
- - [Improvement] Introduce new housekeeping job type called `Idle` for non-consumption execution flows.
238
- - [Improvement] Change how a manual offset management works with Long-Running Jobs. Use the last message offset to move forward instead of relying on the last message marked as consumed for a scenario where no message is marked.
239
- - [Improvement] Prioritize in Pro non-consumption jobs execution over consumption despite LJF. This will ensure, that housekeeping as well as other non-consumption events are not saturated when running a lot of work.
240
- - [Improvement] Normalize the DLQ behaviour with MoM. Always pause on dispatch for all the strategies.
241
- - [Improvement] Improve the manual offset management and DLQ behaviour when no markings occur for OSS.
242
- - [Improvement] Do not early stop ActiveJob work running under virtual partitions to prevent extensive reprocessing.
243
- - [Improvement] Drastically increase number of scenarios covered by integration specs (OSS and Pro).
244
- - [Improvement] Introduce a `Coordinator#synchronize` lock for cross virtual partitions operations.
254
+ - [Enhancement] When using ActiveJob with Virtual Partitions, Karafka will stop if collectively VPs are failing. This minimizes number of jobs that will be collectively re-processed.
255
+ - [Enhancement] `#retrying?` method has been added to consumers to provide ability to check, that we're reprocessing data after a failure. This is useful for branching out processing based on errors.
256
+ - [Enhancement] Track active_job_id in instrumentation (#1372)
257
+ - [Enhancement] Introduce new housekeeping job type called `Idle` for non-consumption execution flows.
258
+ - [Enhancement] Change how a manual offset management works with Long-Running Jobs. Use the last message offset to move forward instead of relying on the last message marked as consumed for a scenario where no message is marked.
259
+ - [Enhancement] Prioritize in Pro non-consumption jobs execution over consumption despite LJF. This will ensure, that housekeeping as well as other non-consumption events are not saturated when running a lot of work.
260
+ - [Enhancement] Normalize the DLQ behaviour with MoM. Always pause on dispatch for all the strategies.
261
+ - [Enhancement] Improve the manual offset management and DLQ behaviour when no markings occur for OSS.
262
+ - [Enhancement] Do not early stop ActiveJob work running under virtual partitions to prevent extensive reprocessing.
263
+ - [Enhancement] Drastically increase number of scenarios covered by integration specs (OSS and Pro).
264
+ - [Enhancement] Introduce a `Coordinator#synchronize` lock for cross virtual partitions operations.
245
265
  - [Fix] Do not resume partition that is not paused.
246
266
  - [Fix] Fix `LoggerListener` cases where logs would not include caller id (when available)
247
267
  - [Fix] Fix not working benchmark tests.
@@ -255,10 +275,10 @@ If you want to maintain the `2.1` behavior, that is `karafka_admin` admin group,
255
275
  - [Refactor] Move `#mark_as_consumed` and `#mark_as_consumed!`into `Strategies::Default` to be able to introduce marking for virtual partitions.
256
276
 
257
277
  ## 2.0.38 (2023-03-27)
258
- - [Improvement] Introduce `Karafka::Admin#read_watermark_offsets` to get low and high watermark offsets values.
259
- - [Improvement] Track active_job_id in instrumentation (#1372)
260
- - [Improvement] Improve `#read_topic` reading in case of a compacted partition where the offset is below the low watermark offset. This should optimize reading and should not go beyond the low watermark offset.
261
- - [Improvement] Allow `#read_topic` to accept instance settings to overwrite any settings needed to customize reading behaviours.
278
+ - [Enhancement] Introduce `Karafka::Admin#read_watermark_offsets` to get low and high watermark offsets values.
279
+ - [Enhancement] Track active_job_id in instrumentation (#1372)
280
+ - [Enhancement] Improve `#read_topic` reading in case of a compacted partition where the offset is below the low watermark offset. This should optimize reading and should not go beyond the low watermark offset.
281
+ - [Enhancement] Allow `#read_topic` to accept instance settings to overwrite any settings needed to customize reading behaviours.
262
282
 
263
283
  ## 2.0.37 (2023-03-20)
264
284
  - [Fix] Declarative topics execution on a secondary cluster run topics creation on the primary one (#1365)
@@ -273,7 +293,7 @@ If you want to maintain the `2.1` behavior, that is `karafka_admin` admin group,
273
293
  - **[Feature]** Allow for full topics reset and topics repartitioning via the CLI.
274
294
 
275
295
  ## 2.0.34 (2023-03-04)
276
- - [Improvement] Attach an `embedded` tag to Karafka processes started using the embedded API.
296
+ - [Enhancement] Attach an `embedded` tag to Karafka processes started using the embedded API.
277
297
  - [Change] Renamed `Datadog::Listener` to `Datadog::MetricsListener` for consistency. (#1124)
278
298
 
279
299
  ### Upgrade Notes
@@ -284,10 +304,10 @@ If you want to maintain the `2.1` behavior, that is `karafka_admin` admin group,
284
304
  - **[Feature]** Support `perform_all_later` in ActiveJob adapter for Rails `7.1+`
285
305
  - **[Feature]** Introduce ability to assign and re-assign tags in consumer instances. This can be used for extra instrumentation that is context aware.
286
306
  - **[Feature]** Introduce ability to assign and reassign tags to the `Karafka::Process`.
287
- - [Improvement] When using `ActiveJob` adapter, automatically tag jobs with the name of the `ActiveJob` class that is running inside of the `ActiveJob` consumer.
288
- - [Improvement] Make `::Karafka::Instrumentation::Notifications::EVENTS` list public for anyone wanting to re-bind those into a different notification bus.
289
- - [Improvement] Set `fetch.message.max.bytes` for `Karafka::Admin` to `5MB` to make sure that all data is fetched correctly for Web UI under heavy load (many consumers).
290
- - [Improvement] Introduce a `strict_topics_namespacing` config option to enable/disable the strict topics naming validations. This can be useful when working with pre-existing topics which we cannot or do not want to rename.
307
+ - [Enhancement] When using `ActiveJob` adapter, automatically tag jobs with the name of the `ActiveJob` class that is running inside of the `ActiveJob` consumer.
308
+ - [Enhancement] Make `::Karafka::Instrumentation::Notifications::EVENTS` list public for anyone wanting to re-bind those into a different notification bus.
309
+ - [Enhancement] Set `fetch.message.max.bytes` for `Karafka::Admin` to `5MB` to make sure that all data is fetched correctly for Web UI under heavy load (many consumers).
310
+ - [Enhancement] Introduce a `strict_topics_namespacing` config option to enable/disable the strict topics naming validations. This can be useful when working with pre-existing topics which we cannot or do not want to rename.
291
311
  - [Fix] Karafka monitor is prematurely cached (#1314)
292
312
 
293
313
  ### Upgrade Notes
@@ -318,39 +338,39 @@ end
318
338
 
319
339
  ## 2.0.32 (2023-02-13)
320
340
  - [Fix] Many non-existing topic subscriptions propagate poll errors beyond client
321
- - [Improvement] Ignore `unknown_topic_or_part` errors in dev when `allow.auto.create.topics` is on.
322
- - [Improvement] Optimize temporary errors handling in polling for a better backoff policy
341
+ - [Enhancement] Ignore `unknown_topic_or_part` errors in dev when `allow.auto.create.topics` is on.
342
+ - [Enhancement] Optimize temporary errors handling in polling for a better backoff policy
323
343
 
324
344
  ## 2.0.31 (2023-02-12)
325
345
  - [Feature] Allow for adding partitions via `Admin#create_partitions` API.
326
346
  - [Fix] Do not ignore admin errors upon invalid configuration (#1254)
327
347
  - [Fix] Topic name validation (#1300) - CandyFet
328
- - [Improvement] Increase the `max_wait_timeout` on admin operations to five minutes to make sure no timeout on heavily loaded clusters.
348
+ - [Enhancement] Increase the `max_wait_timeout` on admin operations to five minutes to make sure no timeout on heavily loaded clusters.
329
349
  - [Maintenance] Require `karafka-core` >= `2.0.11` and switch to shared RSpec locator.
330
350
  - [Maintenance] Require `karafka-rdkafka` >= `0.12.1`
331
351
 
332
352
  ## 2.0.30 (2023-01-31)
333
- - [Improvement] Alias `--consumer-groups` with `--include-consumer-groups`
334
- - [Improvement] Alias `--subscription-groups` with `--include-subscription-groups`
335
- - [Improvement] Alias `--topics` with `--include-topics`
336
- - [Improvement] Introduce `--exclude-consumer-groups` for ability to exclude certain consumer groups from running
337
- - [Improvement] Introduce `--exclude-subscription-groups` for ability to exclude certain subscription groups from running
338
- - [Improvement] Introduce `--exclude-topics` for ability to exclude certain topics from running
353
+ - [Enhancement] Alias `--consumer-groups` with `--include-consumer-groups`
354
+ - [Enhancement] Alias `--subscription-groups` with `--include-subscription-groups`
355
+ - [Enhancement] Alias `--topics` with `--include-topics`
356
+ - [Enhancement] Introduce `--exclude-consumer-groups` for ability to exclude certain consumer groups from running
357
+ - [Enhancement] Introduce `--exclude-subscription-groups` for ability to exclude certain subscription groups from running
358
+ - [Enhancement] Introduce `--exclude-topics` for ability to exclude certain topics from running
339
359
 
340
360
  ## 2.0.29 (2023-01-30)
341
- - [Improvement] Make sure, that the `Karafka#producer` instance has the `LoggerListener` enabled in the install template, so Karafka by default prints both consumer and producer info.
342
- - [Improvement] Extract the code loading capabilities of Karafka console from the executable, so web can use it to provide CLI commands.
361
+ - [Enhancement] Make sure, that the `Karafka#producer` instance has the `LoggerListener` enabled in the install template, so Karafka by default prints both consumer and producer info.
362
+ - [Enhancement] Extract the code loading capabilities of Karafka console from the executable, so web can use it to provide CLI commands.
343
363
  - [Fix] Fix for: running karafka console results in NameError with Rails (#1280)
344
364
  - [Fix] Make sure, that the `caller` for async errors is being published.
345
365
  - [Change] Make sure that WaterDrop `2.4.10` or higher is used with this release to support Web-UI.
346
366
 
347
367
  ## 2.0.28 (2023-01-25)
348
368
  - **[Feature]** Provide the ability to use Dead Letter Queue with Virtual Partitions.
349
- - [Improvement] Collapse Virtual Partitions upon retryable error to a single partition. This allows dead letter queue to operate and mitigate issues arising from work virtualization. This removes uncertainties upon errors that can be retried and processed. Affects given topic partition virtualization only for multi-topic and multi-partition parallelization. It also minimizes potential "flickering" where given data set has potentially many corrupted messages. The collapse will last until all the messages from the collective corrupted batch are processed. After that, virtualization will resume.
350
- - [Improvement] Introduce `#collapsed?` consumer method available for consumers using Virtual Partitions.
351
- - [Improvement] Allow for customization of DLQ dispatched message details in Pro (#1266) via the `#enhance_dlq_message` consumer method.
352
- - [Improvement] Include `original_consumer_group` in the DLQ dispatched messages in Pro.
353
- - [Improvement] Use Karafka `client_id` as kafka `client.id` value by default
369
+ - [Enhancement] Collapse Virtual Partitions upon retryable error to a single partition. This allows dead letter queue to operate and mitigate issues arising from work virtualization. This removes uncertainties upon errors that can be retried and processed. Affects given topic partition virtualization only for multi-topic and multi-partition parallelization. It also minimizes potential "flickering" where given data set has potentially many corrupted messages. The collapse will last until all the messages from the collective corrupted batch are processed. After that, virtualization will resume.
370
+ - [Enhancement] Introduce `#collapsed?` consumer method available for consumers using Virtual Partitions.
371
+ - [Enhancement] Allow for customization of DLQ dispatched message details in Pro (#1266) via the `#enhance_dlq_message` consumer method.
372
+ - [Enhancement] Include `original_consumer_group` in the DLQ dispatched messages in Pro.
373
+ - [Enhancement] Use Karafka `client_id` as kafka `client.id` value by default
354
374
 
355
375
  ### Upgrade Notes
356
376
 
@@ -371,14 +391,14 @@ class KarafkaApp < Karafka::App
371
391
 
372
392
  ## 2.0.26 (2023-01-10)
373
393
  - **[Feature]** Allow for disabling given topics by setting `active` to false. It will exclude them from consumption but will allow to have their definitions for using admin APIs, etc.
374
- - [Improvement] Early terminate on `read_topic` when reaching the last offset available on the request time.
375
- - [Improvement] Introduce a `quiet` state that indicates that Karafka is not only moving to quiet mode but actually that it reached it and no work will happen anymore in any of the consumer groups.
376
- - [Improvement] Use Karafka defined routes topics when possible for `read_topic` admin API.
377
- - [Improvement] Introduce `client.pause` and `client.resume` instrumentation hooks for tracking client topic partition pausing and resuming. This is alongside of `consumer.consuming.pause` that can be used to track both manual and automatic pausing with more granular consumer related details. The `client.*` should be used for low level tracking.
378
- - [Improvement] Replace `LoggerListener` pause notification with one based on `client.pause` instead of `consumer.consuming.pause`.
379
- - [Improvement] Expand `LoggerListener` with `client.resume` notification.
380
- - [Improvement] Replace random anonymous subscription groups ids with stable once.
381
- - [Improvement] Add `consumer.consume`, `consumer.revoke` and `consumer.shutting_down` notification events and move the revocation logic calling to strategies.
394
+ - [Enhancement] Early terminate on `read_topic` when reaching the last offset available on the request time.
395
+ - [Enhancement] Introduce a `quiet` state that indicates that Karafka is not only moving to quiet mode but actually that it reached it and no work will happen anymore in any of the consumer groups.
396
+ - [Enhancement] Use Karafka defined routes topics when possible for `read_topic` admin API.
397
+ - [Enhancement] Introduce `client.pause` and `client.resume` instrumentation hooks for tracking client topic partition pausing and resuming. This is alongside of `consumer.consuming.pause` that can be used to track both manual and automatic pausing with more granular consumer related details. The `client.*` should be used for low level tracking.
398
+ - [Enhancement] Replace `LoggerListener` pause notification with one based on `client.pause` instead of `consumer.consuming.pause`.
399
+ - [Enhancement] Expand `LoggerListener` with `client.resume` notification.
400
+ - [Enhancement] Replace random anonymous subscription groups ids with stable once.
401
+ - [Enhancement] Add `consumer.consume`, `consumer.revoke` and `consumer.shutting_down` notification events and move the revocation logic calling to strategies.
382
402
  - [Change] Rename job queue statistics `processing` key to `busy`. No changes needed because naming in the DataDog listener stays the same.
383
403
  - [Fix] Fix proctitle listener state changes reporting on new states.
384
404
  - [Fix] Make sure all files descriptors are closed in the integration specs.
@@ -391,17 +411,17 @@ class KarafkaApp < Karafka::App
391
411
 
392
412
  ## 2.0.24 (2022-12-19)
393
413
  - **[Feature]** Provide out of the box encryption support for Pro.
394
- - [Improvement] Add instrumentation upon `#pause`.
395
- - [Improvement] Add instrumentation upon retries.
396
- - [Improvement] Assign `#id` to consumers similar to other entities for ease of debugging.
397
- - [Improvement] Add retries and pausing to the default `LoggerListener`.
398
- - [Improvement] Introduce a new final `terminated` state that will kick in prior to exit but after all the instrumentation and other things are done.
399
- - [Improvement] Ensure that state transitions are thread-safe and ensure state transitions can occur in one direction.
400
- - [Improvement] Optimize status methods proxying to `Karafka::App`.
401
- - [Improvement] Allow for easier state usage by introducing explicit `#to_s` for reporting.
402
- - [Improvement] Change auto-generated id from `SecureRandom#uuid` to `SecureRandom#hex(6)`
403
- - [Improvement] Emit statistic every 5 seconds by default.
404
- - [Improvement] Introduce general messages parser that can be swapped when needed.
414
+ - [Enhancement] Add instrumentation upon `#pause`.
415
+ - [Enhancement] Add instrumentation upon retries.
416
+ - [Enhancement] Assign `#id` to consumers similar to other entities for ease of debugging.
417
+ - [Enhancement] Add retries and pausing to the default `LoggerListener`.
418
+ - [Enhancement] Introduce a new final `terminated` state that will kick in prior to exit but after all the instrumentation and other things are done.
419
+ - [Enhancement] Ensure that state transitions are thread-safe and ensure state transitions can occur in one direction.
420
+ - [Enhancement] Optimize status methods proxying to `Karafka::App`.
421
+ - [Enhancement] Allow for easier state usage by introducing explicit `#to_s` for reporting.
422
+ - [Enhancement] Change auto-generated id from `SecureRandom#uuid` to `SecureRandom#hex(6)`
423
+ - [Enhancement] Emit statistic every 5 seconds by default.
424
+ - [Enhancement] Introduce general messages parser that can be swapped when needed.
405
425
  - [Fix] Do not trigger code reloading when `consumer_persistence` is enabled.
406
426
  - [Fix] Shutdown producer after all the consumer components are down and the status is stopped. This will ensure, that any instrumentation related Kafka messaging can still operate.
407
427
 
@@ -422,17 +442,17 @@ end
422
442
 
423
443
  ## 2.0.23 (2022-12-07)
424
444
  - [Maintenance] Align with `waterdrop` and `karafka-core`
425
- - [Improvement] Provide `Admin#read_topic` API to get topic data without subscribing.
426
- - [Improvement] Upon an end user `#pause`, do not commit the offset in automatic offset management mode. This will prevent from a scenario where pause is needed but during it a rebalance occurs and a different assigned process starts not from the pause location but from the automatic offset that may be different. This still allows for using the `#mark_as_consumed`.
445
+ - [Enhancement] Provide `Admin#read_topic` API to get topic data without subscribing.
446
+ - [Enhancement] Upon an end user `#pause`, do not commit the offset in automatic offset management mode. This will prevent from a scenario where pause is needed but during it a rebalance occurs and a different assigned process starts not from the pause location but from the automatic offset that may be different. This still allows for using the `#mark_as_consumed`.
427
447
  - [Fix] Fix a scenario where manual `#pause` would be overwritten by a resume initiated by the strategy.
428
448
  - [Fix] Fix a scenario where manual `#pause` in LRJ would cause infinite pause.
429
449
 
430
450
  ## 2.0.22 (2022-12-02)
431
- - [Improvement] Load Pro components upon Karafka require so they can be altered prior to setup.
432
- - [Improvement] Do not run LRJ jobs that were added to the jobs queue but were revoked meanwhile.
433
- - [Improvement] Allow running particular named subscription groups similar to consumer groups.
434
- - [Improvement] Allow running particular topics similar to consumer groups.
435
- - [Improvement] Raise configuration error when trying to run Karafka with options leading to no subscriptions.
451
+ - [Enhancement] Load Pro components upon Karafka require so they can be altered prior to setup.
452
+ - [Enhancement] Do not run LRJ jobs that were added to the jobs queue but were revoked meanwhile.
453
+ - [Enhancement] Allow running particular named subscription groups similar to consumer groups.
454
+ - [Enhancement] Allow running particular topics similar to consumer groups.
455
+ - [Enhancement] Raise configuration error when trying to run Karafka with options leading to no subscriptions.
436
456
  - [Fix] Fix `karafka info` subscription groups count reporting as it was misleading.
437
457
  - [Fix] Allow for defining subscription groups with symbols similar to consumer groups and topics to align the API.
438
458
  - [Fix] Do not allow for an explicit `nil` as a `subscription_group` block argument.
@@ -442,23 +462,23 @@ end
442
462
  - [Fix] Duplicated logs in development environment for Rails when logger set to `$stdout`.
443
463
 
444
464
  ## 20.0.21 (2022-11-25)
445
- - [Improvement] Make revocation jobs for LRJ topics non-blocking to prevent blocking polling when someone uses non-revocation aware LRJ jobs and revocation happens.
465
+ - [Enhancement] Make revocation jobs for LRJ topics non-blocking to prevent blocking polling when someone uses non-revocation aware LRJ jobs and revocation happens.
446
466
 
447
467
  ## 2.0.20 (2022-11-24)
448
- - [Improvement] Support `group.instance.id` assignment (static group membership) for a case where a single consumer group has multiple subscription groups (#1173).
468
+ - [Enhancement] Support `group.instance.id` assignment (static group membership) for a case where a single consumer group has multiple subscription groups (#1173).
449
469
 
450
470
  ## 2.0.19 (2022-11-20)
451
471
  - **[Feature]** Provide ability to skip failing messages without dispatching them to an alternative topic (DLQ).
452
- - [Improvement] Improve the integration with Ruby on Rails by preventing double-require of components.
453
- - [Improvement] Improve stability of the shutdown process upon critical errors.
454
- - [Improvement] Improve stability of the integrations spec suite.
472
+ - [Enhancement] Improve the integration with Ruby on Rails by preventing double-require of components.
473
+ - [Enhancement] Improve stability of the shutdown process upon critical errors.
474
+ - [Enhancement] Improve stability of the integrations spec suite.
455
475
  - [Fix] Fix an issue where upon fast startup of multiple subscription groups from the same consumer group, a ghost queue would be created due to problems in `Concurrent::Hash`.
456
476
 
457
477
  ## 2.0.18 (2022-11-18)
458
478
  - **[Feature]** Support quiet mode via `TSTP` signal. When used, Karafka will finish processing current messages, run `shutdown` jobs, and switch to a quiet mode where no new work is being accepted. At the same time, it will keep the consumer group quiet, and thus no rebalance will be triggered. This can be particularly useful during deployments.
459
- - [Improvement] Trigger `#revoked` for jobs in case revocation would happen during shutdown when jobs are still running. This should ensure, we get a notion of revocation for Pro LRJ jobs even when revocation happening upon shutdown (#1150).
460
- - [Improvement] Stabilize the shutdown procedure for consumer groups with many subscription groups that have non-aligned processing cost per batch.
461
- - [Improvement] Remove double loading of Karafka via Rails railtie.
479
+ - [Enhancement] Trigger `#revoked` for jobs in case revocation would happen during shutdown when jobs are still running. This should ensure, we get a notion of revocation for Pro LRJ jobs even when revocation happening upon shutdown (#1150).
480
+ - [Enhancement] Stabilize the shutdown procedure for consumer groups with many subscription groups that have non-aligned processing cost per batch.
481
+ - [Enhancement] Remove double loading of Karafka via Rails railtie.
462
482
  - [Fix] Fix invalid class references in YARD docs.
463
483
  - [Fix] prevent parallel closing of many clients.
464
484
  - [Fix] fix a case where information about revocation for a combination of LRJ + VP would not be dispatched until all VP work is done.
@@ -487,11 +507,11 @@ end
487
507
  ## 2.0.16 (2022-11-09)
488
508
  - **[Breaking]** Disable the root `manual_offset_management` setting and require it to be configured per topic. This is part of "topic features" configuration extraction for better code organization.
489
509
  - **[Feature]** Introduce **Dead Letter Queue** feature and Pro **Enhanced Dead Letter Queue** feature
490
- - [Improvement] Align attributes available in the instrumentation bus for listener related events.
491
- - [Improvement] Include consumer group id in consumption related events (#1093)
492
- - [Improvement] Delegate pro components loading to Zeitwerk
493
- - [Improvement] Include `Datadog::LoggerListener` for tracking logger data with DataDog (@bruno-b-martins)
494
- - [Improvement] Include `seek_offset` in the `consumer.consume.error` event payload (#1113)
510
+ - [Enhancement] Align attributes available in the instrumentation bus for listener related events.
511
+ - [Enhancement] Include consumer group id in consumption related events (#1093)
512
+ - [Enhancement] Delegate pro components loading to Zeitwerk
513
+ - [Enhancement] Include `Datadog::LoggerListener` for tracking logger data with DataDog (@bruno-b-martins)
514
+ - [Enhancement] Include `seek_offset` in the `consumer.consume.error` event payload (#1113)
495
515
  - [Refactor] Remove unused logger listener event handler.
496
516
  - [Refactor] Internal refactoring of routing validations flow.
497
517
  - [Refactor] Reorganize how routing related features are represented internally to simplify features management.
data/Gemfile.lock CHANGED
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- karafka (2.2.12)
4
+ karafka (2.2.14)
5
5
  karafka-core (>= 2.2.7, < 2.3.0)
6
6
  waterdrop (>= 2.6.11, < 3.0.0)
7
7
  zeitwerk (~> 2.3)
@@ -9,10 +9,10 @@ PATH
9
9
  GEM
10
10
  remote: https://rubygems.org/
11
11
  specs:
12
- activejob (7.1.1)
13
- activesupport (= 7.1.1)
12
+ activejob (7.1.2)
13
+ activesupport (= 7.1.2)
14
14
  globalid (>= 0.3.6)
15
- activesupport (7.1.1)
15
+ activesupport (7.1.2)
16
16
  base64
17
17
  bigdecimal
18
18
  concurrent-ruby (~> 1.0, >= 1.0.2)
@@ -22,17 +22,17 @@ GEM
22
22
  minitest (>= 5.1)
23
23
  mutex_m
24
24
  tzinfo (~> 2.0)
25
- base64 (0.1.1)
25
+ base64 (0.2.0)
26
26
  bigdecimal (3.1.4)
27
27
  byebug (11.1.3)
28
28
  concurrent-ruby (1.2.2)
29
29
  connection_pool (2.4.1)
30
30
  diff-lcs (1.5.0)
31
31
  docile (1.4.0)
32
- drb (2.1.1)
32
+ drb (2.2.0)
33
33
  ruby2_keywords
34
34
  erubi (1.12.0)
35
- factory_bot (6.3.0)
35
+ factory_bot (6.4.2)
36
36
  activesupport (>= 5.0.0)
37
37
  ffi (1.16.3)
38
38
  globalid (1.2.1)
@@ -42,7 +42,7 @@ GEM
42
42
  karafka-core (2.2.7)
43
43
  concurrent-ruby (>= 1.1)
44
44
  karafka-rdkafka (>= 0.13.9, < 0.15.0)
45
- karafka-rdkafka (0.13.9)
45
+ karafka-rdkafka (0.14.1)
46
46
  ffi (~> 1.15)
47
47
  mini_portile2 (~> 2.6)
48
48
  rake (> 12)
@@ -54,10 +54,10 @@ GEM
54
54
  tilt (~> 2.0)
55
55
  mini_portile2 (2.8.5)
56
56
  minitest (5.20.0)
57
- mutex_m (0.1.2)
57
+ mutex_m (0.2.0)
58
58
  rack (3.0.8)
59
59
  rake (13.1.0)
60
- roda (3.73.0)
60
+ roda (3.74.0)
61
61
  rack
62
62
  rspec (3.12.0)
63
63
  rspec-core (~> 3.12.0)
@@ -16,7 +16,8 @@ en:
16
16
  max_wait_time_format: needs to be an integer bigger than 0
17
17
  kafka_format: needs to be a filled hash
18
18
  internal.processing.jobs_builder_format: cannot be nil
19
- internal.processing.scheduler_format: cannot be nil
19
+ internal.processing.jobs_queue_class_format: cannot be nil
20
+ internal.processing.scheduler_class_format: cannot be nil
20
21
  internal.processing.coordinator_class_format: cannot be nil
21
22
  internal.processing.partitioner_class_format: cannot be nil
22
23
  internal.processing.strategy_selector_format: cannot be nil
data/docker-compose.yml CHANGED
@@ -23,3 +23,5 @@ services:
23
23
  KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
24
24
  KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
25
25
  KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
26
+ KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
27
+ KAFKA_AUTHORIZER_CLASS_NAME: org.apache.kafka.metadata.authorizer.StandardAuthorizer