karafka 2.3.2 → 2.4.0.beta1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (132) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.github/workflows/ci.yml +12 -38
  4. data/CHANGELOG.md +65 -0
  5. data/Gemfile +6 -3
  6. data/Gemfile.lock +25 -23
  7. data/README.md +2 -2
  8. data/bin/integrations +1 -1
  9. data/config/locales/errors.yml +24 -2
  10. data/config/locales/pro_errors.yml +19 -0
  11. data/karafka.gemspec +4 -2
  12. data/lib/active_job/queue_adapters/karafka_adapter.rb +2 -0
  13. data/lib/karafka/admin/configs/config.rb +81 -0
  14. data/lib/karafka/admin/configs/resource.rb +88 -0
  15. data/lib/karafka/admin/configs.rb +103 -0
  16. data/lib/karafka/admin.rb +200 -89
  17. data/lib/karafka/base_consumer.rb +2 -2
  18. data/lib/karafka/cli/info.rb +9 -7
  19. data/lib/karafka/cli/server.rb +7 -7
  20. data/lib/karafka/cli/topics/align.rb +109 -0
  21. data/lib/karafka/cli/topics/base.rb +66 -0
  22. data/lib/karafka/cli/topics/create.rb +35 -0
  23. data/lib/karafka/cli/topics/delete.rb +30 -0
  24. data/lib/karafka/cli/topics/migrate.rb +31 -0
  25. data/lib/karafka/cli/topics/plan.rb +169 -0
  26. data/lib/karafka/cli/topics/repartition.rb +41 -0
  27. data/lib/karafka/cli/topics/reset.rb +18 -0
  28. data/lib/karafka/cli/topics.rb +13 -123
  29. data/lib/karafka/connection/client.rb +62 -37
  30. data/lib/karafka/connection/listener.rb +22 -17
  31. data/lib/karafka/connection/proxy.rb +93 -4
  32. data/lib/karafka/connection/status.rb +14 -2
  33. data/lib/karafka/contracts/config.rb +36 -1
  34. data/lib/karafka/contracts/topic.rb +1 -1
  35. data/lib/karafka/deserializers/headers.rb +15 -0
  36. data/lib/karafka/deserializers/key.rb +15 -0
  37. data/lib/karafka/deserializers/payload.rb +16 -0
  38. data/lib/karafka/embedded.rb +2 -0
  39. data/lib/karafka/helpers/async.rb +5 -2
  40. data/lib/karafka/helpers/colorize.rb +6 -0
  41. data/lib/karafka/instrumentation/callbacks/oauthbearer_token_refresh.rb +29 -0
  42. data/lib/karafka/instrumentation/logger_listener.rb +23 -3
  43. data/lib/karafka/instrumentation/notifications.rb +10 -0
  44. data/lib/karafka/instrumentation/vendors/appsignal/client.rb +16 -2
  45. data/lib/karafka/instrumentation/vendors/datadog/metrics_listener.rb +34 -4
  46. data/lib/karafka/instrumentation/vendors/kubernetes/liveness_listener.rb +20 -0
  47. data/lib/karafka/messages/batch_metadata.rb +1 -1
  48. data/lib/karafka/messages/builders/batch_metadata.rb +1 -1
  49. data/lib/karafka/messages/builders/message.rb +10 -6
  50. data/lib/karafka/messages/message.rb +2 -1
  51. data/lib/karafka/messages/metadata.rb +20 -4
  52. data/lib/karafka/messages/parser.rb +1 -1
  53. data/lib/karafka/pro/base_consumer.rb +12 -23
  54. data/lib/karafka/pro/encryption/cipher.rb +7 -3
  55. data/lib/karafka/pro/encryption/contracts/config.rb +1 -0
  56. data/lib/karafka/pro/encryption/errors.rb +4 -1
  57. data/lib/karafka/pro/encryption/messages/middleware.rb +13 -11
  58. data/lib/karafka/pro/encryption/messages/parser.rb +22 -20
  59. data/lib/karafka/pro/encryption/setup/config.rb +5 -0
  60. data/lib/karafka/pro/iterator/expander.rb +2 -1
  61. data/lib/karafka/pro/iterator/tpl_builder.rb +38 -0
  62. data/lib/karafka/pro/iterator.rb +28 -2
  63. data/lib/karafka/pro/loader.rb +3 -0
  64. data/lib/karafka/pro/processing/coordinator.rb +15 -2
  65. data/lib/karafka/pro/processing/expansions_selector.rb +2 -0
  66. data/lib/karafka/pro/processing/jobs_queue.rb +122 -5
  67. data/lib/karafka/pro/processing/periodic_job/consumer.rb +67 -0
  68. data/lib/karafka/pro/processing/piping/consumer.rb +126 -0
  69. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom.rb +1 -1
  70. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom_vp.rb +1 -1
  71. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_mom.rb +1 -1
  72. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_mom_vp.rb +1 -1
  73. data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom.rb +1 -1
  74. data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom_vp.rb +1 -1
  75. data/lib/karafka/pro/processing/strategies/aj/dlq_mom.rb +1 -1
  76. data/lib/karafka/pro/processing/strategies/aj/dlq_mom_vp.rb +1 -1
  77. data/lib/karafka/pro/processing/strategies/aj/lrj_mom_vp.rb +2 -0
  78. data/lib/karafka/pro/processing/strategies/default.rb +5 -1
  79. data/lib/karafka/pro/processing/strategies/dlq/default.rb +21 -5
  80. data/lib/karafka/pro/processing/strategies/lrj/default.rb +2 -0
  81. data/lib/karafka/pro/processing/strategies/lrj/mom.rb +2 -0
  82. data/lib/karafka/pro/processing/subscription_groups_coordinator.rb +52 -0
  83. data/lib/karafka/pro/routing/features/direct_assignments/config.rb +27 -0
  84. data/lib/karafka/pro/routing/features/direct_assignments/contracts/consumer_group.rb +53 -0
  85. data/lib/karafka/pro/routing/features/direct_assignments/contracts/topic.rb +108 -0
  86. data/lib/karafka/pro/routing/features/direct_assignments/subscription_group.rb +77 -0
  87. data/lib/karafka/pro/routing/features/direct_assignments/topic.rb +69 -0
  88. data/lib/karafka/pro/routing/features/direct_assignments.rb +25 -0
  89. data/lib/karafka/pro/routing/features/patterns/builder.rb +1 -1
  90. data/lib/karafka/pro/routing/features/swarm/config.rb +31 -0
  91. data/lib/karafka/pro/routing/features/swarm/contracts/routing.rb +76 -0
  92. data/lib/karafka/pro/routing/features/swarm/contracts/topic.rb +78 -0
  93. data/lib/karafka/pro/routing/features/swarm/topic.rb +77 -0
  94. data/lib/karafka/pro/routing/features/swarm.rb +36 -0
  95. data/lib/karafka/pro/swarm/liveness_listener.rb +20 -0
  96. data/lib/karafka/processing/coordinator.rb +17 -8
  97. data/lib/karafka/processing/coordinators_buffer.rb +5 -2
  98. data/lib/karafka/processing/executor.rb +6 -2
  99. data/lib/karafka/processing/executors_buffer.rb +5 -2
  100. data/lib/karafka/processing/jobs_queue.rb +9 -4
  101. data/lib/karafka/processing/strategies/aj_dlq_mom.rb +1 -1
  102. data/lib/karafka/processing/strategies/default.rb +7 -1
  103. data/lib/karafka/processing/strategies/dlq.rb +17 -2
  104. data/lib/karafka/processing/workers_batch.rb +4 -1
  105. data/lib/karafka/routing/builder.rb +6 -2
  106. data/lib/karafka/routing/consumer_group.rb +2 -1
  107. data/lib/karafka/routing/features/dead_letter_queue/config.rb +5 -0
  108. data/lib/karafka/routing/features/dead_letter_queue/contracts/topic.rb +8 -0
  109. data/lib/karafka/routing/features/dead_letter_queue/topic.rb +10 -2
  110. data/lib/karafka/routing/features/deserializers/config.rb +18 -0
  111. data/lib/karafka/routing/features/deserializers/contracts/topic.rb +31 -0
  112. data/lib/karafka/routing/features/deserializers/topic.rb +51 -0
  113. data/lib/karafka/routing/features/deserializers.rb +11 -0
  114. data/lib/karafka/routing/proxy.rb +9 -14
  115. data/lib/karafka/routing/router.rb +11 -2
  116. data/lib/karafka/routing/subscription_group.rb +22 -1
  117. data/lib/karafka/routing/topic.rb +0 -1
  118. data/lib/karafka/runner.rb +1 -1
  119. data/lib/karafka/setup/config.rb +51 -10
  120. data/lib/karafka/status.rb +7 -8
  121. data/lib/karafka/swarm/manager.rb +15 -3
  122. data/lib/karafka/swarm/node.rb +3 -3
  123. data/lib/karafka/swarm/pidfd.rb +20 -4
  124. data/lib/karafka/swarm/supervisor.rb +25 -8
  125. data/lib/karafka/templates/karafka.rb.erb +28 -1
  126. data/lib/karafka/version.rb +1 -1
  127. data.tar.gz.sig +0 -0
  128. metadata +42 -12
  129. metadata.gz.sig +0 -0
  130. data/lib/karafka/routing/consumer_mapper.rb +0 -23
  131. data/lib/karafka/serialization/json/deserializer.rb +0 -19
  132. data/lib/karafka/time_trackers/partition_usage.rb +0 -56
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: cb47082224d857f3029f9bb8e1b04a35e6b8ed2f7ae75bbe52bf1b778ff56226
4
- data.tar.gz: 53d59fd7e140f5b3e9b89dd3e4af28469bc534074110e2e93fae24c59bf81b88
3
+ metadata.gz: 7c1493774aae6fa07d816d8e76b9d5577f5483e9d93301dc10a8e7d7a86c7045
4
+ data.tar.gz: 32251813e9e3b9a9f7b75554a7b0175c2e1cf30456acb3207464a36587073259
5
5
  SHA512:
6
- metadata.gz: 00e09a345122ad2facaf8adcbb52fae3ce87374083d9e6785a1f07a74c87e53c6b0b3dd32b82566e846d160569aafc55c614013d8c9f95664612150fb51d07b1
7
- data.tar.gz: 283e50a6b3b25579419bdc9b947e5ba802e22a1cd6d0097ab8929c5394d3858461bda15c3e70a0d2d466a8705466ef1d5b24e1a8bfe5ac8e356c64790049c7b0
6
+ metadata.gz: 614cc7507dd865a8efde56e40a77f694efb268c4a877917834a7e23d78175cb9ddb4574ccaac75db86fea1e1de1eae17f4df694251268d90f2822a4868cddf0d
7
+ data.tar.gz: f6ec801b2e47e531bcd4be2fd6e9e8bc01837b6218ffea1b3e187edc450fa519d2fb5f0fa8a69f5b0e67aabff8496aac1dbbc156bb45c2dd02b1ea495fb5710f
checksums.yaml.gz.sig CHANGED
Binary file
@@ -80,8 +80,6 @@ jobs:
80
80
  - '3.1'
81
81
  - '3.1.0'
82
82
  - '3.0'
83
- - '3.0.0'
84
- - '2.7'
85
83
  include:
86
84
  - ruby: '3.3'
87
85
  coverage: 'true'
@@ -90,9 +88,9 @@ jobs:
90
88
  - name: Install package dependencies
91
89
  run: "[ -e $APT_DEPS ] || sudo apt-get install -y --no-install-recommends $APT_DEPS"
92
90
 
93
- - name: Start Kafka with docker-compose
91
+ - name: Start Kafka with docker compose
94
92
  run: |
95
- docker-compose up -d || (sleep 5 && docker-compose up -d)
93
+ docker compose up -d || (sleep 5 && docker compose up -d)
96
94
 
97
95
  - name: Set up Ruby
98
96
  uses: ruby/setup-ruby@v1
@@ -122,7 +120,6 @@ jobs:
122
120
  - '3.2'
123
121
  - '3.1'
124
122
  - '3.0'
125
- - '2.7'
126
123
  steps:
127
124
  - uses: actions/checkout@v4
128
125
  - name: Install package dependencies
@@ -131,9 +128,9 @@ jobs:
131
128
  - name: Remove libzstd-dev to check no supported compressions
132
129
  run: sudo apt-get -y remove libzstd-dev
133
130
 
134
- - name: Start Kafka with docker-compose
131
+ - name: Start Kafka with docker compose
135
132
  run: |
136
- docker-compose up -d || (sleep 5 && docker-compose up -d)
133
+ docker compose up -d || (sleep 5 && docker compose up -d)
137
134
 
138
135
  - name: Set up Ruby
139
136
  uses: ruby/setup-ruby@v1
@@ -147,26 +144,15 @@ jobs:
147
144
 
148
145
  - name: Install latest Bundler
149
146
  run: |
150
- if [[ "$(ruby -v | awk '{print $2}')" == 2.7.8* ]]; then
151
- gem install bundler -v 2.4.22 --no-document
152
- bundle config set version 2.4.22
153
- gem update --system 3.4.22 --no-document
154
- else
155
- gem install bundler --no-document
156
- gem update --system --no-document
157
- fi
147
+ gem install bundler --no-document
148
+ gem update --system --no-document
158
149
 
159
150
  bundle config set without 'tools benchmarks docs'
160
151
 
161
152
  - name: Bundle install
162
153
  run: |
163
154
  bundle config set without development
164
-
165
- if [[ "$(ruby -v | awk '{print $2}')" == 2.7.8* ]]; then
166
- BUNDLER_VERSION=2.4.22 bundle install --jobs 4 --retry 3
167
- else
168
- bundle install --jobs 4 --retry 3
169
- fi
155
+ bundle install --jobs 4 --retry 3
170
156
 
171
157
  - name: Wait for Kafka
172
158
  run: |
@@ -187,15 +173,14 @@ jobs:
187
173
  - '3.2'
188
174
  - '3.1'
189
175
  - '3.0'
190
- - '2.7'
191
176
  steps:
192
177
  - uses: actions/checkout@v4
193
178
  - name: Install package dependencies
194
179
  run: "[ -e $APT_DEPS ] || sudo apt-get install -y --no-install-recommends $APT_DEPS"
195
180
 
196
- - name: Start Kafka with docker-compose
181
+ - name: Start Kafka with docker compose
197
182
  run: |
198
- docker-compose up -d || (sleep 5 && docker-compose up -d)
183
+ docker compose up -d || (sleep 5 && docker compose up -d)
199
184
 
200
185
  - name: Set up Ruby
201
186
  uses: ruby/setup-ruby@v1
@@ -205,26 +190,15 @@ jobs:
205
190
 
206
191
  - name: Install latest Bundler
207
192
  run: |
208
- if [[ "$(ruby -v | awk '{print $2}')" == 2.7.8* ]]; then
209
- gem install bundler -v 2.4.22 --no-document
210
- bundle config set version 2.4.22
211
- gem update --system 3.4.22 --no-document
212
- else
213
- gem install bundler --no-document
214
- gem update --system --no-document
215
- fi
193
+ gem install bundler --no-document
194
+ gem update --system --no-document
216
195
 
217
196
  bundle config set without 'tools benchmarks docs'
218
197
 
219
198
  - name: Bundle install
220
199
  run: |
221
200
  bundle config set without development
222
-
223
- if [[ "$(ruby -v | awk '{print $2}')" == 2.7.8* ]]; then
224
- BUNDLER_VERSION=2.4.22 bundle install --jobs 4 --retry 3
225
- else
226
- bundle install --jobs 4 --retry 3
227
- fi
201
+ bundle install --jobs 4 --retry 3
228
202
 
229
203
  - name: Wait for Kafka
230
204
  run: |
data/CHANGELOG.md CHANGED
@@ -1,5 +1,70 @@
1
1
  # Karafka framework changelog
2
2
 
3
+ ## 2.4.0 (Unreleased)
4
+
5
+ This release contains **BREAKING** changes. Make sure to read and apply upgrade notes.
6
+
7
+ - **[Breaking]** Drop Ruby `2.7` support.
8
+ - **[Breaking]** Drop the concept of consumer group mapping.
9
+ - **[Breaking]** `karafka topics migrate` will now perform declarative topics configuration alignment.
10
+ - **[Breaking]** Replace `deserializer` config with `#deserializers` in routing to support key and lazy header deserializers.
11
+ - **[Breaking]** Rename `Karafka::Serializers::JSON::Deserializer` to `Karafka::Deserializers::Payload` to reflect its role.
12
+ - **[Feature]** Support custom OAuth providers (with a lot of help from bruce-szalwinski-he and hotelengine.com).
13
+ - **[Feature]** Provide `karafka topics alter` for declarative topics alignment.
14
+ - **[Feature]** Introduce ability to use direct assignments (Pro).
15
+ - **[Feature]** Provide consumer piping API (Pro).
16
+ - **[Feature]** Introduce `karafka topics plan` to describe changes that will be applied when migrating.
17
+ - **[Feature]** Introduce ability to use custom message key deserializers.
18
+ - **[Feature]** Introduce ability to use custom message headers deserializers.
19
+ - [Enhancement] Assign names to internal threads for better debuggability when on `TTIN`.
20
+ - [Enhancement] Provide `log_polling` setting to the `Karafka::Instrumentation::LoggerListener` to silence polling in any non-debug mode.
21
+ - [Enhancement] Provide `metadata#message` to be able to retrieve message from metadata.
22
+ - [Enhancement] Include number of attempts prior to DLQ message being dispatched including the dispatch one (Pro).
23
+ - [Enhancement] Provide ability to decide how to dispatch from DLQ (sync / async).
24
+ - [Enhancement] Provide ability to decide how to mark as consumed from DLQ (sync / async).
25
+ - [Enhancement] Allow for usage of a custom Appsignal namespace when logging.
26
+ - [Enhancement] Do not run periodic jobs when LRJ job is running despite polling (LRJ can still start when Periodic runs).
27
+ - [Enhancement] Improve accuracy of periodic jobs and make sure they do not run too early after saturated work.
28
+ - [Enhancement] Introduce ability to async lock other subscription groups polling.
29
+ - [Enhancement] Improve shutdown when using long polling setup (high `max_wait_time`).
30
+ - [Enhancement] Provide `Karafka::Admin#read_lags_with_offsets` for ability to query lags and offsets of a given CG.
31
+ - [Enhancement] Allow direct assignments granular distribution in the Swarm (Pro).
32
+ - [Enhancement] Add a buffer to the supervisor supervision on shutdown to prevent a potential race condition when signal pass lags.
33
+ - [Enhancement] Provide ability to automatically generate and validate fingerprints of encrypted payload.
34
+ - [Enhancement] Support `enable.partition.eof` fast yielding.
35
+ - [Enhancement] Provide `#mark_as_consumed` and `#mark_as_consumed!` to the iterator.
36
+ - [Enhancement] Introduce graceful `#stop` to the iterator instead of recommending of usage of `break`.
37
+ - [Enhancement] Do not run jobs schedulers and other interval based operations on each job queue unlock.
38
+ - [Enhancement] Publish listeners status lifecycle events.
39
+ - [Enhancement] Use proxy wrapper for Admin metadata requests.
40
+ - [Enhancement] Use limited scope topic info data when operating on direct topics instead of full cluster queries.
41
+ - [Enhancement] No longer raise `Karafka::UnsupportedCaseError` for not recognized error types to support dynamic errors reporting.
42
+ - [Change] Do not create new proxy object to Rdkafka with certain low-level operations and re-use existing.
43
+ - [Change] Update `karafka.erb` template with a placeholder for waterdrop and karafka error instrumentation.
44
+ - [Fix] Pro Swarm liveness listener can report incorrect failure when dynamic multiplexing scales down.
45
+ - [Fix] K8s liveness listener can report incorrect failure when dynamic multiplexing scales down.
46
+ - [Fix] Fix a case where connection conductor would not be released during manager state changes.
47
+ - [Fix] Make sure, that all `Admin` operations go through stabilization proxy.
48
+ - [Fix] Fix an issue where coordinator running jobs would not count periodic jobs and revocations.
49
+ - [Fix] Fix a case where critically crashed supervisor would raise incorrect error.
50
+ - [Fix] Re-raise critical supervisor errors before shutdown.
51
+ - [Fix] Fix a case when right-open (infinite) swarm matching would not pass validations.
52
+ - [Fix] Make `#enqueue_all` output compatible with `ActiveJob.perform_all_later` (oozzal)
53
+
54
+ ### Upgrade Notes
55
+
56
+ **PLEASE MAKE SURE TO READ AND APPLY THEM!**
57
+
58
+ Available [here](https://karafka.io/docs/Upgrades-2.4/).
59
+
60
+ ## 2.3.3 (2024-02-26)
61
+ - [Enhancement] Routing based topics allocation for swarm (Pro)
62
+ - [Enhancement] Publish the `-1` shutdown reason status for a non-responding node in swarm.
63
+ - [Enhancement] Allow for using the `distribution` mode for DataDog listener histogram reporting (Aerdayne).
64
+ - [Change] Change `internal.swarm.node_report_timeout` to 60 seconds from 30 seconds to compensate for long pollings.
65
+ - [Fix] Static membership routing evaluation happens too early in swarm.
66
+ - [Fix] Close producer in supervisor prior to forking and warmup to prevent invalid memory states.
67
+
3
68
  ## 2.3.2 (2024-02-16)
4
69
  - **[Feature]** Provide swarm capabilities to OSS and Pro.
5
70
  - **[Feature]** Provide ability to use complex strategies in DLQ (Pro).
data/Gemfile CHANGED
@@ -6,11 +6,14 @@ plugin 'diffend'
6
6
 
7
7
  gemspec
8
8
 
9
- # Karafka gem does not require activejob nor karafka-web to work
9
+ # Karafka gem does not require activejob nor karafka-web to work
10
10
  # They are added here because they are part of the integration suite
11
+ # Since some of those are only needed for some specs, they should never be required automatically
11
12
  group :integrations do
12
- gem 'activejob'
13
- gem 'karafka-web', '>= 0.8.0'
13
+ gem 'activejob', require: false
14
+ gem 'karafka-testing', '>= 2.4.0.beta1', require: false
15
+ gem 'karafka-web', require: false
16
+ gem 'rspec', require: false
14
17
  end
15
18
 
16
19
  group :test do
data/Gemfile.lock CHANGED
@@ -1,18 +1,18 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- karafka (2.3.2)
5
- karafka-core (>= 2.3.0, < 2.4.0)
6
- waterdrop (>= 2.6.12, < 3.0.0)
4
+ karafka (2.4.0.beta1)
5
+ karafka-core (>= 2.4.0.rc1, < 2.5.0)
6
+ waterdrop (>= 2.7.0.rc1, < 3.0.0)
7
7
  zeitwerk (~> 2.3)
8
8
 
9
9
  GEM
10
10
  remote: https://rubygems.org/
11
11
  specs:
12
- activejob (7.1.3)
13
- activesupport (= 7.1.3)
12
+ activejob (7.1.3.2)
13
+ activesupport (= 7.1.3.2)
14
14
  globalid (>= 0.3.6)
15
- activesupport (7.1.3)
15
+ activesupport (7.1.3.2)
16
16
  base64
17
17
  bigdecimal
18
18
  concurrent-ruby (~> 1.0, >= 1.0.2)
@@ -23,28 +23,30 @@ GEM
23
23
  mutex_m
24
24
  tzinfo (~> 2.0)
25
25
  base64 (0.2.0)
26
- bigdecimal (3.1.6)
26
+ bigdecimal (3.1.7)
27
27
  byebug (11.1.3)
28
28
  concurrent-ruby (1.2.3)
29
29
  connection_pool (2.4.1)
30
30
  diff-lcs (1.5.1)
31
31
  docile (1.4.0)
32
- drb (2.2.0)
33
- ruby2_keywords
32
+ drb (2.2.1)
34
33
  erubi (1.12.0)
35
- factory_bot (6.4.5)
34
+ factory_bot (6.4.6)
36
35
  activesupport (>= 5.0.0)
37
36
  ffi (1.16.3)
38
37
  globalid (1.2.1)
39
38
  activesupport (>= 6.1)
40
- i18n (1.14.1)
39
+ i18n (1.14.4)
41
40
  concurrent-ruby (~> 1.0)
42
- karafka-core (2.3.0)
43
- karafka-rdkafka (>= 0.14.8, < 0.15.0)
44
- karafka-rdkafka (0.14.10)
41
+ karafka-core (2.4.0.rc1)
42
+ karafka-rdkafka (>= 0.15.0.rc1, < 0.16.0)
43
+ karafka-rdkafka (0.15.0.rc1)
45
44
  ffi (~> 1.15)
46
45
  mini_portile2 (~> 2.6)
47
46
  rake (> 12)
47
+ karafka-testing (2.4.0.beta1)
48
+ karafka (>= 2.3.0, < 2.5.0)
49
+ waterdrop (>= 2.7.0.beta2)
48
50
  karafka-web (0.8.2)
49
51
  erubi (~> 1.4)
50
52
  karafka (>= 2.3.0, < 2.4.0)
@@ -52,11 +54,11 @@ GEM
52
54
  roda (~> 3.68, >= 3.69)
53
55
  tilt (~> 2.0)
54
56
  mini_portile2 (2.8.5)
55
- minitest (5.21.2)
57
+ minitest (5.22.3)
56
58
  mutex_m (0.2.0)
57
- rack (3.0.9)
59
+ rack (3.0.10)
58
60
  rake (13.1.0)
59
- roda (3.77.0)
61
+ roda (3.78.0)
60
62
  rack
61
63
  rspec (3.13.0)
62
64
  rspec-core (~> 3.13.0)
@@ -70,8 +72,7 @@ GEM
70
72
  rspec-mocks (3.13.0)
71
73
  diff-lcs (>= 1.2.0, < 2.0)
72
74
  rspec-support (~> 3.13.0)
73
- rspec-support (3.13.0)
74
- ruby2_keywords (0.0.5)
75
+ rspec-support (3.13.1)
75
76
  simplecov (0.22.0)
76
77
  docile (~> 1.1)
77
78
  simplecov-html (~> 0.11)
@@ -81,8 +82,8 @@ GEM
81
82
  tilt (2.3.0)
82
83
  tzinfo (2.0.6)
83
84
  concurrent-ruby (~> 1.0)
84
- waterdrop (2.6.14)
85
- karafka-core (>= 2.2.3, < 3.0.0)
85
+ waterdrop (2.7.0.rc1)
86
+ karafka-core (>= 2.4.0.rc1, < 3.0.0)
86
87
  zeitwerk (~> 2.3)
87
88
  zeitwerk (2.6.13)
88
89
 
@@ -95,9 +96,10 @@ DEPENDENCIES
95
96
  byebug
96
97
  factory_bot
97
98
  karafka!
98
- karafka-web (>= 0.8.0)
99
+ karafka-testing (>= 2.4.0.beta1)
100
+ karafka-web
99
101
  rspec
100
102
  simplecov
101
103
 
102
104
  BUNDLED WITH
103
- 2.5.4
105
+ 2.5.6
data/README.md CHANGED
@@ -9,13 +9,13 @@
9
9
  Karafka is a Ruby and Rails multi-threaded efficient Kafka processing framework that:
10
10
 
11
11
  - Has a built-in [Web UI](https://karafka.io/docs/Web-UI-Features/) providing a convenient way to monitor and manage Karafka-based applications.
12
- - Supports parallel processing in [multiple threads](https://karafka.io/docs/Concurrency-and-multithreading) (also for a [single topic partition](https://karafka.io/docs/Pro-Virtual-Partitions) work)
12
+ - Supports parallel processing in [multiple threads](https://karafka.io/docs/Concurrency-and-multithreading) (also for a [single topic partition](https://karafka.io/docs/Pro-Virtual-Partitions) work) and [processes](https://karafka.io/docs/Swarm-Multi-Process).
13
13
  - [Automatically integrates](https://karafka.io/docs/Integrating-with-Ruby-on-Rails-and-other-frameworks#integrating-with-ruby-on-rails) with Ruby on Rails
14
14
  - Has [ActiveJob backend](https://karafka.io/docs/Active-Job) support (including [ordered jobs](https://karafka.io/docs/Pro-Enhanced-Active-Job#ordered-jobs))
15
15
  - Has a seamless [Dead Letter Queue](https://karafka.io/docs/Dead-Letter-Queue/) functionality built-in
16
16
  - Supports in-development [code reloading](https://karafka.io/docs/Auto-reload-of-code-changes-in-development)
17
17
  - Is powered by [librdkafka](https://github.com/edenhill/librdkafka) (the Apache Kafka C/C++ client library)
18
- - Has an out-of the box [StatsD/DataDog monitoring](https://karafka.io/docs/Monitoring-and-logging) with a dashboard template.
18
+ - Has an out-of the box [AppSignal](https://karafka.io/docs/Monitoring-and-Logging/#appsignal-metrics-and-error-tracking) and [StatsD/DataDog](https://karafka.io/docs/Monitoring-and-Logging/#datadog-and-statsd-integration) monitoring with dashboard templates.
19
19
 
20
20
  ```ruby
21
21
  # Define what topics you want to consume with which consumers in karafka.rb
data/bin/integrations CHANGED
@@ -28,7 +28,7 @@ ROOT_PATH = Pathname.new(File.expand_path(File.join(File.dirname(__FILE__), '../
28
28
  CONCURRENCY = ENV.key?('CI') ? 5 : Etc.nprocessors * 3
29
29
 
30
30
  # How may bytes do we want to keep from the stdout in the buffer for when we need to print it
31
- MAX_BUFFER_OUTPUT = 102_400
31
+ MAX_BUFFER_OUTPUT = 307_200
32
32
 
33
33
  # Abstraction around a single test scenario execution process
34
34
  class Scenario
@@ -7,19 +7,25 @@ en:
7
7
 
8
8
  missing: needs to be present
9
9
  client_id_format: 'needs to be a string with a Kafka accepted format'
10
+ group_id_format: 'needs to be a string with a Kafka accepted format'
10
11
  concurrency_format: needs to be an integer bigger than 0
11
- consumer_mapper_format: needs to be present
12
12
  consumer_persistence_format: needs to be either true or false
13
13
  pause_timeout_format: needs to be an integer bigger than 0
14
14
  pause_max_timeout_format: needs to be an integer bigger than 0
15
15
  pause_with_exponential_backoff_format: needs to be either true or false
16
+ strict_topics_namespacing_format: needs to be either true or false
16
17
  shutdown_timeout_format: needs to be an integer bigger than 0
17
18
  max_wait_time_format: needs to be an integer bigger than 0
19
+ max_wait_time_max_wait_time_vs_swarm_node_report_timeout: >
20
+ cannot be more than 80% of internal.swarm.node_report_timeout.
21
+ Decrease max_wait_time or increase node_report_timeout
18
22
  kafka_format: needs to be a filled hash
19
23
  key_must_be_a_symbol: All keys under the kafka settings scope need to be symbols
20
24
  max_timeout_vs_pause_max_timeout: pause_timeout must be less or equal to pause_max_timeout
21
25
  shutdown_timeout_vs_max_wait_time: shutdown_timeout must be more than max_wait_time
22
26
 
27
+ oauth.token_provider_listener_format: 'must be false or respond to #on_oauthbearer_token_refresh'
28
+
23
29
  internal.processing.jobs_builder_format: cannot be nil
24
30
  internal.processing.jobs_queue_class_format: cannot be nil
25
31
  internal.processing.scheduler_class_format: cannot be nil
@@ -52,6 +58,11 @@ en:
52
58
  internal.connection.proxy.committed.timeout_format: needs to be an integer bigger than 0
53
59
  internal.connection.proxy.committed.max_attempts_format: needs to be an integer bigger than 0
54
60
  internal.connection.proxy.committed.wait_time_format: needs to be an integer bigger than 0
61
+ internal.connection.proxy.commit.max_attempts_format: needs to be an integer bigger than 0
62
+ internal.connection.proxy.commit.wait_time_format: needs to be an integer bigger than 0
63
+ internal.connection.proxy.metadata.timeout_format: needs to be an integer bigger than 0
64
+ internal.connection.proxy.metadata.max_attempts_format: needs to be an integer bigger than 0
65
+ internal.connection.proxy.metadata.wait_time_format: needs to be an integer bigger than 0
55
66
 
56
67
  internal.swarm.manager_format: cannot be nil
57
68
  internal.swarm.orphaned_exit_code_format: needs to be an integer bigger or equal to 0
@@ -84,7 +95,7 @@ en:
84
95
  max_messages_format: 'needs to be an integer bigger than 0'
85
96
  max_wait_time_format: 'needs to be an integer bigger than 0'
86
97
  name_format: 'needs to be a string with a Kafka accepted format'
87
- deserializer_format: needs to be present
98
+ deserializers_format: needs to be present
88
99
  consumer_format: needs to be present
89
100
  id_format: 'needs to be a string with a Kafka accepted format'
90
101
  initial_offset_format: needs to be either earliest or latest
@@ -93,20 +104,31 @@ en:
93
104
  manual_offset_management_must_be_enabled: cannot be disabled for ActiveJob topics
94
105
  inline_insights.active_format: needs to be either true or false
95
106
  consumer_active_job_missing: ActiveJob needs to be available
107
+
96
108
  dead_letter_queue.max_retries_format: needs to be equal or bigger than 0
97
109
  dead_letter_queue.topic_format: 'needs to be a string with a Kafka accepted format'
98
110
  dead_letter_queue.active_format: needs to be either true or false
99
111
  dead_letter_queue.independent_format: needs to be either true or false
100
112
  dead_letter_queue.transactional_format: needs to be either true or false
113
+ dead_letter_queue.dispatch_method_format: 'needs to be either #produce_sync or #produce_async'
114
+ dead_letter_queue.marking_method_format: 'needs to be either #mark_as_consumed or #mark_as_consumed!'
115
+
101
116
  active_format: needs to be either true or false
117
+
102
118
  declaratives.partitions_format: needs to be more or equal to 1
103
119
  declaratives.active_format: needs to be true
104
120
  declaratives.replication_factor_format: needs to be more or equal to 1
105
121
  declaratives.details_format: needs to be a hash with only symbol keys
122
+
106
123
  inconsistent_namespacing: |
107
124
  needs to be consistent namespacing style
108
125
  disable this validation by setting config.strict_topics_namespacing to false
109
126
 
127
+ deserializers.active_format: 'needs to be true'
128
+ deserializers.payload_format: 'needs to respond to #call'
129
+ deserializers.key_format: 'needs to respond to #call'
130
+ deserializers.headers_format: 'needs to respond to #call'
131
+
110
132
  consumer_group:
111
133
  missing: needs to be present
112
134
  topics_names_not_unique: all topic names within a single consumer group must be unique
@@ -58,10 +58,24 @@ en:
58
58
  subscription_group_details.multiplexing_boot_format: 'needs to be an integer equal or more than 1'
59
59
  subscription_group_details.multiplexing_boot_not_dynamic: 'needs to be equal to max when not in dynamic mode'
60
60
 
61
+ swarm.active_format: needs to be true
62
+ swarm.nodes_format: needs to be a range, array of nodes ids or a hash with direct assignments
63
+ swarm_nodes_with_non_existent_nodes: includes unreachable nodes ids
64
+
65
+ direct_assignments.active_missing: needs to be present
66
+ direct_assignments.active_format: 'needs to be boolean'
67
+ direct_assignments.partitions_missing: 'needs to be present'
68
+ direct_assignments.partitions_format: 'needs to be true, list of partitions or a range of partitions (finite)'
69
+ direct_assignments_active_but_empty: 'cannot be empty and active at the same time'
70
+ direct_assignments_swarm_not_complete: 'cannot have partitions that are assigned but not allocated'
71
+ direct_assignments_swarm_overbooked: 'cannot allocate partitions in swarm that were not assigned'
72
+ direct_assignments_patterns_active: 'patterns cannot be used with direct assignments'
73
+
61
74
  consumer_group:
62
75
  patterns_format: must be an array with hashes
63
76
  patterns_missing: needs to be present
64
77
  patterns_regexps_not_unique: 'must be unique within consumer group'
78
+ direct_assignments_homogenous: 'single consumer group cannot mix regular and direct assignments'
65
79
 
66
80
  pattern:
67
81
  regexp_format: must be a regular expression
@@ -78,6 +92,11 @@ en:
78
92
  encryption.version_format: must be a non-empty string
79
93
  encryption.public_key_format: 'is not a valid public RSA key'
80
94
  encryption.private_keys_invalid: 'contains an invalid private RSA key string'
95
+ encryption.fingerprinter_missing: 'needs to be false or respond to #hexdigest method'
96
+ encryption.fingerprinter_format: 'needs to be false or respond to #hexdigest method'
81
97
 
82
98
  patterns.ttl_format: needs to be an integer bigger than 0
83
99
  patterns.ttl_missing: needs to be present
100
+
101
+ routing:
102
+ swarm_nodes_not_used: 'At least one of the nodes has no assignments'
data/karafka.gemspec CHANGED
@@ -21,10 +21,12 @@ Gem::Specification.new do |spec|
21
21
  without having to focus on things that are not your business domain.
22
22
  DESC
23
23
 
24
- spec.add_dependency 'karafka-core', '>= 2.3.0', '< 2.4.0'
25
- spec.add_dependency 'waterdrop', '>= 2.6.12', '< 3.0.0'
24
+ spec.add_dependency 'karafka-core', '>= 2.4.0.rc1', '< 2.5.0'
25
+ spec.add_dependency 'waterdrop', '>= 2.7.0.rc1', '< 3.0.0'
26
26
  spec.add_dependency 'zeitwerk', '~> 2.3'
27
27
 
28
+ spec.required_ruby_version = '>= 3.0.0'
29
+
28
30
  if $PROGRAM_NAME.end_with?('gem')
29
31
  spec.signing_key = File.expand_path('~/.ssh/gem-private_key.pem')
30
32
  end
@@ -16,8 +16,10 @@ module ActiveJob
16
16
 
17
17
  # Enqueues multiple jobs in one go
18
18
  # @param jobs [Array<Object>] jobs that we want to enqueue
19
+ # @return [Integer] number of jobs enqueued (required by Rails)
19
20
  def enqueue_all(jobs)
20
21
  ::Karafka::App.config.internal.active_job.dispatcher.dispatch_many(jobs)
22
+ jobs.size
21
23
  end
22
24
 
23
25
  # Raises info, that Karafka backend does not support scheduling jobs
@@ -0,0 +1,81 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Admin
5
+ module Configs
6
+ # Represents a single config entry that is related to a resource
7
+ class Config
8
+ attr_reader :name, :value, :synonyms
9
+
10
+ class << self
11
+ # Creates a single config entry from the Rdkafka config result entry
12
+ #
13
+ # @param rd_kafka_config [Rdkafka::Admin::ConfigBindingResult]
14
+ # @return [Config]
15
+ def from_rd_kafka(rd_kafka_config)
16
+ new(
17
+ name: rd_kafka_config.name,
18
+ value: rd_kafka_config.value,
19
+ read_only: rd_kafka_config.read_only,
20
+ default: rd_kafka_config.default,
21
+ sensitive: rd_kafka_config.sensitive,
22
+ synonym: rd_kafka_config.synonym,
23
+ synonyms: rd_kafka_config.synonyms.map do |rd_kafka_synonym|
24
+ from_rd_kafka(rd_kafka_synonym)
25
+ end
26
+ )
27
+ end
28
+ end
29
+
30
+ # Creates new config instance either for reading or as part of altering operation
31
+ #
32
+ # @param name [String] config name
33
+ # @param value [String] config value
34
+ # @param default [Integer] 1 if default
35
+ # @param read_only [Integer] 1 if read only
36
+ # @param sensitive [Integer] 1 if sensitive
37
+ # @param synonym [Integer] 1 if synonym
38
+ # @param synonyms [Array] given config synonyms (if any)
39
+ #
40
+ # @note For alter operations only `name` and `value` are needed
41
+ def initialize(
42
+ name:,
43
+ value:,
44
+ default: -1,
45
+ read_only: -1,
46
+ sensitive: -1,
47
+ synonym: -1,
48
+ synonyms: []
49
+ )
50
+ @name = name
51
+ @value = value
52
+ @synonyms = []
53
+ @default = default
54
+ @read_only = read_only
55
+ @sensitive = sensitive
56
+ @synonym = synonym
57
+ @synonyms = synonyms
58
+ end
59
+
60
+ # @return [Boolean] Is the config property is set to its default value on the broker
61
+ def default? = @default.positive?
62
+
63
+ # @return [Boolean] Is the config property is read-only on the broker
64
+ def read_only? = @read_only.positive?
65
+
66
+ # @return [Boolean] if the config property contains sensitive information (such as
67
+ # security configuration
68
+ def sensitive? = @sensitive.positive?
69
+
70
+ # @return [Boolean] is this entry is a synonym
71
+ def synonym? = @synonym.positive?
72
+
73
+ # @return [Hash] hash that we can use to operate with rdkafka
74
+ def to_native_hash = {
75
+ name: name,
76
+ value: value
77
+ }.freeze
78
+ end
79
+ end
80
+ end
81
+ end