karafka 2.3.3 → 2.4.0.beta2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/.github/workflows/ci.yml +12 -38
- data/CHANGELOG.md +59 -0
- data/Gemfile +6 -3
- data/Gemfile.lock +29 -27
- data/bin/integrations +1 -1
- data/config/locales/errors.yml +21 -2
- data/config/locales/pro_errors.yml +16 -1
- data/karafka.gemspec +4 -2
- data/lib/active_job/queue_adapters/karafka_adapter.rb +2 -0
- data/lib/karafka/admin/configs/config.rb +81 -0
- data/lib/karafka/admin/configs/resource.rb +88 -0
- data/lib/karafka/admin/configs.rb +103 -0
- data/lib/karafka/admin.rb +211 -90
- data/lib/karafka/base_consumer.rb +2 -2
- data/lib/karafka/cli/info.rb +9 -7
- data/lib/karafka/cli/server.rb +7 -7
- data/lib/karafka/cli/topics/align.rb +109 -0
- data/lib/karafka/cli/topics/base.rb +66 -0
- data/lib/karafka/cli/topics/create.rb +35 -0
- data/lib/karafka/cli/topics/delete.rb +30 -0
- data/lib/karafka/cli/topics/migrate.rb +31 -0
- data/lib/karafka/cli/topics/plan.rb +169 -0
- data/lib/karafka/cli/topics/repartition.rb +41 -0
- data/lib/karafka/cli/topics/reset.rb +18 -0
- data/lib/karafka/cli/topics.rb +13 -123
- data/lib/karafka/connection/client.rb +55 -37
- data/lib/karafka/connection/listener.rb +22 -17
- data/lib/karafka/connection/proxy.rb +93 -4
- data/lib/karafka/connection/status.rb +14 -2
- data/lib/karafka/constraints.rb +3 -3
- data/lib/karafka/contracts/config.rb +14 -1
- data/lib/karafka/contracts/topic.rb +1 -1
- data/lib/karafka/deserializers/headers.rb +15 -0
- data/lib/karafka/deserializers/key.rb +15 -0
- data/lib/karafka/deserializers/payload.rb +16 -0
- data/lib/karafka/embedded.rb +2 -0
- data/lib/karafka/helpers/async.rb +5 -2
- data/lib/karafka/helpers/colorize.rb +6 -0
- data/lib/karafka/instrumentation/callbacks/oauthbearer_token_refresh.rb +29 -0
- data/lib/karafka/instrumentation/logger_listener.rb +23 -3
- data/lib/karafka/instrumentation/notifications.rb +10 -0
- data/lib/karafka/instrumentation/vendors/appsignal/client.rb +16 -2
- data/lib/karafka/instrumentation/vendors/kubernetes/liveness_listener.rb +20 -0
- data/lib/karafka/messages/batch_metadata.rb +1 -1
- data/lib/karafka/messages/builders/batch_metadata.rb +1 -1
- data/lib/karafka/messages/builders/message.rb +10 -6
- data/lib/karafka/messages/message.rb +2 -1
- data/lib/karafka/messages/metadata.rb +20 -4
- data/lib/karafka/messages/parser.rb +1 -1
- data/lib/karafka/pro/base_consumer.rb +12 -23
- data/lib/karafka/pro/encryption/cipher.rb +7 -3
- data/lib/karafka/pro/encryption/contracts/config.rb +1 -0
- data/lib/karafka/pro/encryption/errors.rb +4 -1
- data/lib/karafka/pro/encryption/messages/middleware.rb +13 -11
- data/lib/karafka/pro/encryption/messages/parser.rb +22 -20
- data/lib/karafka/pro/encryption/setup/config.rb +5 -0
- data/lib/karafka/pro/iterator/expander.rb +2 -1
- data/lib/karafka/pro/iterator/tpl_builder.rb +38 -0
- data/lib/karafka/pro/iterator.rb +28 -2
- data/lib/karafka/pro/loader.rb +3 -0
- data/lib/karafka/pro/processing/coordinator.rb +15 -2
- data/lib/karafka/pro/processing/expansions_selector.rb +2 -0
- data/lib/karafka/pro/processing/jobs_queue.rb +122 -5
- data/lib/karafka/pro/processing/periodic_job/consumer.rb +67 -0
- data/lib/karafka/pro/processing/piping/consumer.rb +126 -0
- data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom.rb +1 -1
- data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom_vp.rb +1 -1
- data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_mom.rb +1 -1
- data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_mom_vp.rb +1 -1
- data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom.rb +1 -1
- data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom_vp.rb +1 -1
- data/lib/karafka/pro/processing/strategies/aj/dlq_mom.rb +1 -1
- data/lib/karafka/pro/processing/strategies/aj/dlq_mom_vp.rb +1 -1
- data/lib/karafka/pro/processing/strategies/aj/lrj_mom_vp.rb +2 -0
- data/lib/karafka/pro/processing/strategies/default.rb +5 -1
- data/lib/karafka/pro/processing/strategies/dlq/default.rb +21 -5
- data/lib/karafka/pro/processing/strategies/lrj/default.rb +2 -0
- data/lib/karafka/pro/processing/strategies/lrj/mom.rb +2 -0
- data/lib/karafka/pro/processing/subscription_groups_coordinator.rb +52 -0
- data/lib/karafka/pro/routing/features/direct_assignments/config.rb +27 -0
- data/lib/karafka/pro/routing/features/direct_assignments/contracts/consumer_group.rb +53 -0
- data/lib/karafka/pro/routing/features/direct_assignments/contracts/topic.rb +108 -0
- data/lib/karafka/pro/routing/features/direct_assignments/subscription_group.rb +77 -0
- data/lib/karafka/pro/routing/features/direct_assignments/topic.rb +69 -0
- data/lib/karafka/pro/routing/features/direct_assignments.rb +25 -0
- data/lib/karafka/pro/routing/features/patterns/builder.rb +1 -1
- data/lib/karafka/pro/routing/features/swarm/contracts/routing.rb +76 -0
- data/lib/karafka/pro/routing/features/swarm/contracts/topic.rb +16 -5
- data/lib/karafka/pro/routing/features/swarm/topic.rb +25 -2
- data/lib/karafka/pro/routing/features/swarm.rb +11 -0
- data/lib/karafka/pro/swarm/liveness_listener.rb +20 -0
- data/lib/karafka/processing/coordinator.rb +17 -8
- data/lib/karafka/processing/coordinators_buffer.rb +5 -2
- data/lib/karafka/processing/executor.rb +6 -2
- data/lib/karafka/processing/executors_buffer.rb +5 -2
- data/lib/karafka/processing/jobs_queue.rb +9 -4
- data/lib/karafka/processing/strategies/aj_dlq_mom.rb +1 -1
- data/lib/karafka/processing/strategies/default.rb +7 -1
- data/lib/karafka/processing/strategies/dlq.rb +17 -2
- data/lib/karafka/processing/workers_batch.rb +4 -1
- data/lib/karafka/routing/builder.rb +6 -2
- data/lib/karafka/routing/consumer_group.rb +2 -1
- data/lib/karafka/routing/features/dead_letter_queue/config.rb +5 -0
- data/lib/karafka/routing/features/dead_letter_queue/contracts/topic.rb +8 -0
- data/lib/karafka/routing/features/dead_letter_queue/topic.rb +10 -2
- data/lib/karafka/routing/features/deserializers/config.rb +18 -0
- data/lib/karafka/routing/features/deserializers/contracts/topic.rb +31 -0
- data/lib/karafka/routing/features/deserializers/topic.rb +51 -0
- data/lib/karafka/routing/features/deserializers.rb +11 -0
- data/lib/karafka/routing/proxy.rb +9 -14
- data/lib/karafka/routing/router.rb +11 -2
- data/lib/karafka/routing/subscription_group.rb +9 -1
- data/lib/karafka/routing/topic.rb +0 -1
- data/lib/karafka/runner.rb +1 -1
- data/lib/karafka/setup/config.rb +50 -9
- data/lib/karafka/status.rb +7 -8
- data/lib/karafka/swarm/supervisor.rb +16 -2
- data/lib/karafka/templates/karafka.rb.erb +28 -1
- data/lib/karafka/version.rb +1 -1
- data.tar.gz.sig +0 -0
- metadata +38 -12
- metadata.gz.sig +0 -0
- data/lib/karafka/routing/consumer_mapper.rb +0 -23
- data/lib/karafka/serialization/json/deserializer.rb +0 -19
- data/lib/karafka/time_trackers/partition_usage.rb +0 -56
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 39388f4dff605df6b699b8f8db42062fe8bdece1dcdc86548e91da28956f4eae
|
|
4
|
+
data.tar.gz: b58710b9e0d1264db8bddc7ff4540fcf697c2c4ca68c43de4fba6f79cbca4b8e
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 4e43f34d90881642a1a9ad3ffd74d8ff909c68f8eb54cafd8de2e2fb878c2cf3bfdd9506931d379441138afa5a6a29344a69aa744439182e5aef74e71d99dcf8
|
|
7
|
+
data.tar.gz: 984315866f3b801601970f4da01a7e31ca8a3dde35601fa8906335b1d1b9a002b2ba7b96c3e765a8c9c3081ff441d5937f9a9506b30ff86c1a8a0683ed8516d3
|
checksums.yaml.gz.sig
CHANGED
|
Binary file
|
data/.github/workflows/ci.yml
CHANGED
|
@@ -80,8 +80,6 @@ jobs:
|
|
|
80
80
|
- '3.1'
|
|
81
81
|
- '3.1.0'
|
|
82
82
|
- '3.0'
|
|
83
|
-
- '3.0.0'
|
|
84
|
-
- '2.7'
|
|
85
83
|
include:
|
|
86
84
|
- ruby: '3.3'
|
|
87
85
|
coverage: 'true'
|
|
@@ -90,9 +88,9 @@ jobs:
|
|
|
90
88
|
- name: Install package dependencies
|
|
91
89
|
run: "[ -e $APT_DEPS ] || sudo apt-get install -y --no-install-recommends $APT_DEPS"
|
|
92
90
|
|
|
93
|
-
- name: Start Kafka with docker
|
|
91
|
+
- name: Start Kafka with docker compose
|
|
94
92
|
run: |
|
|
95
|
-
docker
|
|
93
|
+
docker compose up -d || (sleep 5 && docker compose up -d)
|
|
96
94
|
|
|
97
95
|
- name: Set up Ruby
|
|
98
96
|
uses: ruby/setup-ruby@v1
|
|
@@ -122,7 +120,6 @@ jobs:
|
|
|
122
120
|
- '3.2'
|
|
123
121
|
- '3.1'
|
|
124
122
|
- '3.0'
|
|
125
|
-
- '2.7'
|
|
126
123
|
steps:
|
|
127
124
|
- uses: actions/checkout@v4
|
|
128
125
|
- name: Install package dependencies
|
|
@@ -131,9 +128,9 @@ jobs:
|
|
|
131
128
|
- name: Remove libzstd-dev to check no supported compressions
|
|
132
129
|
run: sudo apt-get -y remove libzstd-dev
|
|
133
130
|
|
|
134
|
-
- name: Start Kafka with docker
|
|
131
|
+
- name: Start Kafka with docker compose
|
|
135
132
|
run: |
|
|
136
|
-
docker
|
|
133
|
+
docker compose up -d || (sleep 5 && docker compose up -d)
|
|
137
134
|
|
|
138
135
|
- name: Set up Ruby
|
|
139
136
|
uses: ruby/setup-ruby@v1
|
|
@@ -147,26 +144,15 @@ jobs:
|
|
|
147
144
|
|
|
148
145
|
- name: Install latest Bundler
|
|
149
146
|
run: |
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
bundle config set version 2.4.22
|
|
153
|
-
gem update --system 3.4.22 --no-document
|
|
154
|
-
else
|
|
155
|
-
gem install bundler --no-document
|
|
156
|
-
gem update --system --no-document
|
|
157
|
-
fi
|
|
147
|
+
gem install bundler --no-document
|
|
148
|
+
gem update --system --no-document
|
|
158
149
|
|
|
159
150
|
bundle config set without 'tools benchmarks docs'
|
|
160
151
|
|
|
161
152
|
- name: Bundle install
|
|
162
153
|
run: |
|
|
163
154
|
bundle config set without development
|
|
164
|
-
|
|
165
|
-
if [[ "$(ruby -v | awk '{print $2}')" == 2.7.8* ]]; then
|
|
166
|
-
BUNDLER_VERSION=2.4.22 bundle install --jobs 4 --retry 3
|
|
167
|
-
else
|
|
168
|
-
bundle install --jobs 4 --retry 3
|
|
169
|
-
fi
|
|
155
|
+
bundle install --jobs 4 --retry 3
|
|
170
156
|
|
|
171
157
|
- name: Wait for Kafka
|
|
172
158
|
run: |
|
|
@@ -187,15 +173,14 @@ jobs:
|
|
|
187
173
|
- '3.2'
|
|
188
174
|
- '3.1'
|
|
189
175
|
- '3.0'
|
|
190
|
-
- '2.7'
|
|
191
176
|
steps:
|
|
192
177
|
- uses: actions/checkout@v4
|
|
193
178
|
- name: Install package dependencies
|
|
194
179
|
run: "[ -e $APT_DEPS ] || sudo apt-get install -y --no-install-recommends $APT_DEPS"
|
|
195
180
|
|
|
196
|
-
- name: Start Kafka with docker
|
|
181
|
+
- name: Start Kafka with docker compose
|
|
197
182
|
run: |
|
|
198
|
-
docker
|
|
183
|
+
docker compose up -d || (sleep 5 && docker compose up -d)
|
|
199
184
|
|
|
200
185
|
- name: Set up Ruby
|
|
201
186
|
uses: ruby/setup-ruby@v1
|
|
@@ -205,26 +190,15 @@ jobs:
|
|
|
205
190
|
|
|
206
191
|
- name: Install latest Bundler
|
|
207
192
|
run: |
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
bundle config set version 2.4.22
|
|
211
|
-
gem update --system 3.4.22 --no-document
|
|
212
|
-
else
|
|
213
|
-
gem install bundler --no-document
|
|
214
|
-
gem update --system --no-document
|
|
215
|
-
fi
|
|
193
|
+
gem install bundler --no-document
|
|
194
|
+
gem update --system --no-document
|
|
216
195
|
|
|
217
196
|
bundle config set without 'tools benchmarks docs'
|
|
218
197
|
|
|
219
198
|
- name: Bundle install
|
|
220
199
|
run: |
|
|
221
200
|
bundle config set without development
|
|
222
|
-
|
|
223
|
-
if [[ "$(ruby -v | awk '{print $2}')" == 2.7.8* ]]; then
|
|
224
|
-
BUNDLER_VERSION=2.4.22 bundle install --jobs 4 --retry 3
|
|
225
|
-
else
|
|
226
|
-
bundle install --jobs 4 --retry 3
|
|
227
|
-
fi
|
|
201
|
+
bundle install --jobs 4 --retry 3
|
|
228
202
|
|
|
229
203
|
- name: Wait for Kafka
|
|
230
204
|
run: |
|
data/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,64 @@
|
|
|
1
1
|
# Karafka framework changelog
|
|
2
2
|
|
|
3
|
+
## 2.4.0 (Unreleased)
|
|
4
|
+
|
|
5
|
+
This release contains **BREAKING** changes. Make sure to read and apply upgrade notes.
|
|
6
|
+
|
|
7
|
+
- **[Breaking]** Drop Ruby `2.7` support.
|
|
8
|
+
- **[Breaking]** Drop the concept of consumer group mapping.
|
|
9
|
+
- **[Breaking]** `karafka topics migrate` will now perform declarative topics configuration alignment.
|
|
10
|
+
- **[Breaking]** Replace `deserializer` config with `#deserializers` in routing to support key and lazy header deserializers.
|
|
11
|
+
- **[Breaking]** Rename `Karafka::Serializers::JSON::Deserializer` to `Karafka::Deserializers::Payload` to reflect its role.
|
|
12
|
+
- **[Feature]** Support custom OAuth providers (with a lot of help from bruce-szalwinski-he and hotelengine.com).
|
|
13
|
+
- **[Feature]** Provide `karafka topics alter` for declarative topics alignment.
|
|
14
|
+
- **[Feature]** Introduce ability to use direct assignments (Pro).
|
|
15
|
+
- **[Feature]** Provide consumer piping API (Pro).
|
|
16
|
+
- **[Feature]** Introduce `karafka topics plan` to describe changes that will be applied when migrating.
|
|
17
|
+
- **[Feature]** Introduce ability to use custom message key deserializers.
|
|
18
|
+
- **[Feature]** Introduce ability to use custom message headers deserializers.
|
|
19
|
+
- **[Feature]** Provide `Karafka::Admin::Configs` API for cluster and topics configuration management.
|
|
20
|
+
- [Enhancement] Assign names to internal threads for better debuggability when on `TTIN`.
|
|
21
|
+
- [Enhancement] Provide `log_polling` setting to the `Karafka::Instrumentation::LoggerListener` to silence polling in any non-debug mode.
|
|
22
|
+
- [Enhancement] Provide `metadata#message` to be able to retrieve message from metadata.
|
|
23
|
+
- [Enhancement] Include number of attempts prior to DLQ message being dispatched including the dispatch one (Pro).
|
|
24
|
+
- [Enhancement] Provide ability to decide how to dispatch from DLQ (sync / async).
|
|
25
|
+
- [Enhancement] Provide ability to decide how to mark as consumed from DLQ (sync / async).
|
|
26
|
+
- [Enhancement] Allow for usage of a custom Appsignal namespace when logging.
|
|
27
|
+
- [Enhancement] Do not run periodic jobs when LRJ job is running despite polling (LRJ can still start when Periodic runs).
|
|
28
|
+
- [Enhancement] Improve accuracy of periodic jobs and make sure they do not run too early after saturated work.
|
|
29
|
+
- [Enhancement] Introduce ability to async lock other subscription groups polling.
|
|
30
|
+
- [Enhancement] Improve shutdown when using long polling setup (high `max_wait_time`).
|
|
31
|
+
- [Enhancement] Provide `Karafka::Admin#read_lags_with_offsets` for ability to query lags and offsets of a given CG.
|
|
32
|
+
- [Enhancement] Allow direct assignments granular distribution in the Swarm (Pro).
|
|
33
|
+
- [Enhancement] Add a buffer to the supervisor supervision on shutdown to prevent a potential race condition when signal pass lags.
|
|
34
|
+
- [Enhancement] Provide ability to automatically generate and validate fingerprints of encrypted payload.
|
|
35
|
+
- [Enhancement] Support `enable.partition.eof` fast yielding.
|
|
36
|
+
- [Enhancement] Provide `#mark_as_consumed` and `#mark_as_consumed!` to the iterator.
|
|
37
|
+
- [Enhancement] Introduce graceful `#stop` to the iterator instead of recommending of usage of `break`.
|
|
38
|
+
- [Enhancement] Do not run jobs schedulers and other interval based operations on each job queue unlock.
|
|
39
|
+
- [Enhancement] Publish listeners status lifecycle events.
|
|
40
|
+
- [Enhancement] Use proxy wrapper for Admin metadata requests.
|
|
41
|
+
- [Enhancement] Use limited scope topic info data when operating on direct topics instead of full cluster queries.
|
|
42
|
+
- [Enhancement] No longer raise `Karafka::UnsupportedCaseError` for not recognized error types to support dynamic errors reporting.
|
|
43
|
+
- [Change] Do not create new proxy object to Rdkafka with certain low-level operations and re-use existing.
|
|
44
|
+
- [Change] Update `karafka.erb` template with a placeholder for waterdrop and karafka error instrumentation.
|
|
45
|
+
- [Fix] Pro Swarm liveness listener can report incorrect failure when dynamic multiplexing scales down.
|
|
46
|
+
- [Fix] K8s liveness listener can report incorrect failure when dynamic multiplexing scales down.
|
|
47
|
+
- [Fix] Fix a case where connection conductor would not be released during manager state changes.
|
|
48
|
+
- [Fix] Make sure, that all `Admin` operations go through stabilization proxy.
|
|
49
|
+
- [Fix] Fix an issue where coordinator running jobs would not count periodic jobs and revocations.
|
|
50
|
+
- [Fix] Fix a case where critically crashed supervisor would raise incorrect error.
|
|
51
|
+
- [Fix] Re-raise critical supervisor errors before shutdown.
|
|
52
|
+
- [Fix] Fix a case when right-open (infinite) swarm matching would not pass validations.
|
|
53
|
+
- [Fix] Make `#enqueue_all` output compatible with `ActiveJob.perform_all_later` (oozzal)
|
|
54
|
+
- [Fix] Seek consumer group on a topic level is updating only recent partition.
|
|
55
|
+
|
|
56
|
+
### Upgrade Notes
|
|
57
|
+
|
|
58
|
+
**PLEASE MAKE SURE TO READ AND APPLY THEM!**
|
|
59
|
+
|
|
60
|
+
Available [here](https://karafka.io/docs/Upgrades-2.4/).
|
|
61
|
+
|
|
3
62
|
## 2.3.3 (2024-02-26)
|
|
4
63
|
- [Enhancement] Routing based topics allocation for swarm (Pro)
|
|
5
64
|
- [Enhancement] Publish the `-1` shutdown reason status for a non-responding node in swarm.
|
data/Gemfile
CHANGED
|
@@ -6,11 +6,14 @@ plugin 'diffend'
|
|
|
6
6
|
|
|
7
7
|
gemspec
|
|
8
8
|
|
|
9
|
-
# Karafka gem does not require activejob nor karafka-web
|
|
9
|
+
# Karafka gem does not require activejob nor karafka-web to work
|
|
10
10
|
# They are added here because they are part of the integration suite
|
|
11
|
+
# Since some of those are only needed for some specs, they should never be required automatically
|
|
11
12
|
group :integrations do
|
|
12
|
-
gem 'activejob'
|
|
13
|
-
gem 'karafka-
|
|
13
|
+
gem 'activejob', require: false
|
|
14
|
+
gem 'karafka-testing', '>= 2.4.0.rc1', require: false
|
|
15
|
+
gem 'karafka-web', '>= 0.9.0.rc1', require: false
|
|
16
|
+
gem 'rspec', require: false
|
|
14
17
|
end
|
|
15
18
|
|
|
16
19
|
group :test do
|
data/Gemfile.lock
CHANGED
|
@@ -1,18 +1,18 @@
|
|
|
1
1
|
PATH
|
|
2
2
|
remote: .
|
|
3
3
|
specs:
|
|
4
|
-
karafka (2.
|
|
5
|
-
karafka-core (>= 2.
|
|
6
|
-
waterdrop (>= 2.
|
|
4
|
+
karafka (2.4.0.beta2)
|
|
5
|
+
karafka-core (>= 2.4.0.rc1, < 2.5.0)
|
|
6
|
+
waterdrop (>= 2.7.0.rc1, < 3.0.0)
|
|
7
7
|
zeitwerk (~> 2.3)
|
|
8
8
|
|
|
9
9
|
GEM
|
|
10
10
|
remote: https://rubygems.org/
|
|
11
11
|
specs:
|
|
12
|
-
activejob (7.1.3)
|
|
13
|
-
activesupport (= 7.1.3)
|
|
12
|
+
activejob (7.1.3.2)
|
|
13
|
+
activesupport (= 7.1.3.2)
|
|
14
14
|
globalid (>= 0.3.6)
|
|
15
|
-
activesupport (7.1.3)
|
|
15
|
+
activesupport (7.1.3.2)
|
|
16
16
|
base64
|
|
17
17
|
bigdecimal
|
|
18
18
|
concurrent-ruby (~> 1.0, >= 1.0.2)
|
|
@@ -23,40 +23,42 @@ GEM
|
|
|
23
23
|
mutex_m
|
|
24
24
|
tzinfo (~> 2.0)
|
|
25
25
|
base64 (0.2.0)
|
|
26
|
-
bigdecimal (3.1.
|
|
26
|
+
bigdecimal (3.1.7)
|
|
27
27
|
byebug (11.1.3)
|
|
28
28
|
concurrent-ruby (1.2.3)
|
|
29
29
|
connection_pool (2.4.1)
|
|
30
30
|
diff-lcs (1.5.1)
|
|
31
31
|
docile (1.4.0)
|
|
32
|
-
drb (2.2.
|
|
33
|
-
ruby2_keywords
|
|
32
|
+
drb (2.2.1)
|
|
34
33
|
erubi (1.12.0)
|
|
35
|
-
factory_bot (6.4.
|
|
34
|
+
factory_bot (6.4.6)
|
|
36
35
|
activesupport (>= 5.0.0)
|
|
37
36
|
ffi (1.16.3)
|
|
38
37
|
globalid (1.2.1)
|
|
39
38
|
activesupport (>= 6.1)
|
|
40
|
-
i18n (1.14.
|
|
39
|
+
i18n (1.14.4)
|
|
41
40
|
concurrent-ruby (~> 1.0)
|
|
42
|
-
karafka-core (2.
|
|
43
|
-
karafka-rdkafka (>= 0.
|
|
44
|
-
karafka-rdkafka (0.
|
|
41
|
+
karafka-core (2.4.0.rc1)
|
|
42
|
+
karafka-rdkafka (>= 0.15.0.rc1, < 0.16.0)
|
|
43
|
+
karafka-rdkafka (0.15.0.rc1)
|
|
45
44
|
ffi (~> 1.15)
|
|
46
45
|
mini_portile2 (~> 2.6)
|
|
47
46
|
rake (> 12)
|
|
48
|
-
karafka-
|
|
47
|
+
karafka-testing (2.4.0.rc1)
|
|
48
|
+
karafka (>= 2.4.0.beta1, < 2.5.0)
|
|
49
|
+
waterdrop (>= 2.7.0.rc1)
|
|
50
|
+
karafka-web (0.9.0.rc1)
|
|
49
51
|
erubi (~> 1.4)
|
|
50
|
-
karafka (>= 2.
|
|
51
|
-
karafka-core (>= 2.
|
|
52
|
+
karafka (>= 2.4.0.beta1, < 2.5.0)
|
|
53
|
+
karafka-core (>= 2.4.0.rc1, < 2.5.0)
|
|
52
54
|
roda (~> 3.68, >= 3.69)
|
|
53
55
|
tilt (~> 2.0)
|
|
54
56
|
mini_portile2 (2.8.5)
|
|
55
|
-
minitest (5.
|
|
57
|
+
minitest (5.22.3)
|
|
56
58
|
mutex_m (0.2.0)
|
|
57
|
-
rack (3.0.
|
|
58
|
-
rake (13.1
|
|
59
|
-
roda (3.
|
|
59
|
+
rack (3.0.10)
|
|
60
|
+
rake (13.2.1)
|
|
61
|
+
roda (3.78.0)
|
|
60
62
|
rack
|
|
61
63
|
rspec (3.13.0)
|
|
62
64
|
rspec-core (~> 3.13.0)
|
|
@@ -70,8 +72,7 @@ GEM
|
|
|
70
72
|
rspec-mocks (3.13.0)
|
|
71
73
|
diff-lcs (>= 1.2.0, < 2.0)
|
|
72
74
|
rspec-support (~> 3.13.0)
|
|
73
|
-
rspec-support (3.13.
|
|
74
|
-
ruby2_keywords (0.0.5)
|
|
75
|
+
rspec-support (3.13.1)
|
|
75
76
|
simplecov (0.22.0)
|
|
76
77
|
docile (~> 1.1)
|
|
77
78
|
simplecov-html (~> 0.11)
|
|
@@ -81,8 +82,8 @@ GEM
|
|
|
81
82
|
tilt (2.3.0)
|
|
82
83
|
tzinfo (2.0.6)
|
|
83
84
|
concurrent-ruby (~> 1.0)
|
|
84
|
-
waterdrop (2.
|
|
85
|
-
karafka-core (>= 2.
|
|
85
|
+
waterdrop (2.7.0.rc1)
|
|
86
|
+
karafka-core (>= 2.4.0.rc1, < 3.0.0)
|
|
86
87
|
zeitwerk (~> 2.3)
|
|
87
88
|
zeitwerk (2.6.13)
|
|
88
89
|
|
|
@@ -95,9 +96,10 @@ DEPENDENCIES
|
|
|
95
96
|
byebug
|
|
96
97
|
factory_bot
|
|
97
98
|
karafka!
|
|
98
|
-
karafka-
|
|
99
|
+
karafka-testing (>= 2.4.0.rc1)
|
|
100
|
+
karafka-web (>= 0.9.0.rc1)
|
|
99
101
|
rspec
|
|
100
102
|
simplecov
|
|
101
103
|
|
|
102
104
|
BUNDLED WITH
|
|
103
|
-
2.5.
|
|
105
|
+
2.5.7
|
data/bin/integrations
CHANGED
|
@@ -28,7 +28,7 @@ ROOT_PATH = Pathname.new(File.expand_path(File.join(File.dirname(__FILE__), '../
|
|
|
28
28
|
CONCURRENCY = ENV.key?('CI') ? 5 : Etc.nprocessors * 3
|
|
29
29
|
|
|
30
30
|
# How may bytes do we want to keep from the stdout in the buffer for when we need to print it
|
|
31
|
-
MAX_BUFFER_OUTPUT =
|
|
31
|
+
MAX_BUFFER_OUTPUT = 307_200
|
|
32
32
|
|
|
33
33
|
# Abstraction around a single test scenario execution process
|
|
34
34
|
class Scenario
|
data/config/locales/errors.yml
CHANGED
|
@@ -7,12 +7,13 @@ en:
|
|
|
7
7
|
|
|
8
8
|
missing: needs to be present
|
|
9
9
|
client_id_format: 'needs to be a string with a Kafka accepted format'
|
|
10
|
+
group_id_format: 'needs to be a string with a Kafka accepted format'
|
|
10
11
|
concurrency_format: needs to be an integer bigger than 0
|
|
11
|
-
consumer_mapper_format: needs to be present
|
|
12
12
|
consumer_persistence_format: needs to be either true or false
|
|
13
13
|
pause_timeout_format: needs to be an integer bigger than 0
|
|
14
14
|
pause_max_timeout_format: needs to be an integer bigger than 0
|
|
15
15
|
pause_with_exponential_backoff_format: needs to be either true or false
|
|
16
|
+
strict_topics_namespacing_format: needs to be either true or false
|
|
16
17
|
shutdown_timeout_format: needs to be an integer bigger than 0
|
|
17
18
|
max_wait_time_format: needs to be an integer bigger than 0
|
|
18
19
|
max_wait_time_max_wait_time_vs_swarm_node_report_timeout: >
|
|
@@ -23,6 +24,8 @@ en:
|
|
|
23
24
|
max_timeout_vs_pause_max_timeout: pause_timeout must be less or equal to pause_max_timeout
|
|
24
25
|
shutdown_timeout_vs_max_wait_time: shutdown_timeout must be more than max_wait_time
|
|
25
26
|
|
|
27
|
+
oauth.token_provider_listener_format: 'must be false or respond to #on_oauthbearer_token_refresh'
|
|
28
|
+
|
|
26
29
|
internal.processing.jobs_builder_format: cannot be nil
|
|
27
30
|
internal.processing.jobs_queue_class_format: cannot be nil
|
|
28
31
|
internal.processing.scheduler_class_format: cannot be nil
|
|
@@ -55,6 +58,11 @@ en:
|
|
|
55
58
|
internal.connection.proxy.committed.timeout_format: needs to be an integer bigger than 0
|
|
56
59
|
internal.connection.proxy.committed.max_attempts_format: needs to be an integer bigger than 0
|
|
57
60
|
internal.connection.proxy.committed.wait_time_format: needs to be an integer bigger than 0
|
|
61
|
+
internal.connection.proxy.commit.max_attempts_format: needs to be an integer bigger than 0
|
|
62
|
+
internal.connection.proxy.commit.wait_time_format: needs to be an integer bigger than 0
|
|
63
|
+
internal.connection.proxy.metadata.timeout_format: needs to be an integer bigger than 0
|
|
64
|
+
internal.connection.proxy.metadata.max_attempts_format: needs to be an integer bigger than 0
|
|
65
|
+
internal.connection.proxy.metadata.wait_time_format: needs to be an integer bigger than 0
|
|
58
66
|
|
|
59
67
|
internal.swarm.manager_format: cannot be nil
|
|
60
68
|
internal.swarm.orphaned_exit_code_format: needs to be an integer bigger or equal to 0
|
|
@@ -87,7 +95,7 @@ en:
|
|
|
87
95
|
max_messages_format: 'needs to be an integer bigger than 0'
|
|
88
96
|
max_wait_time_format: 'needs to be an integer bigger than 0'
|
|
89
97
|
name_format: 'needs to be a string with a Kafka accepted format'
|
|
90
|
-
|
|
98
|
+
deserializers_format: needs to be present
|
|
91
99
|
consumer_format: needs to be present
|
|
92
100
|
id_format: 'needs to be a string with a Kafka accepted format'
|
|
93
101
|
initial_offset_format: needs to be either earliest or latest
|
|
@@ -96,20 +104,31 @@ en:
|
|
|
96
104
|
manual_offset_management_must_be_enabled: cannot be disabled for ActiveJob topics
|
|
97
105
|
inline_insights.active_format: needs to be either true or false
|
|
98
106
|
consumer_active_job_missing: ActiveJob needs to be available
|
|
107
|
+
|
|
99
108
|
dead_letter_queue.max_retries_format: needs to be equal or bigger than 0
|
|
100
109
|
dead_letter_queue.topic_format: 'needs to be a string with a Kafka accepted format'
|
|
101
110
|
dead_letter_queue.active_format: needs to be either true or false
|
|
102
111
|
dead_letter_queue.independent_format: needs to be either true or false
|
|
103
112
|
dead_letter_queue.transactional_format: needs to be either true or false
|
|
113
|
+
dead_letter_queue.dispatch_method_format: 'needs to be either #produce_sync or #produce_async'
|
|
114
|
+
dead_letter_queue.marking_method_format: 'needs to be either #mark_as_consumed or #mark_as_consumed!'
|
|
115
|
+
|
|
104
116
|
active_format: needs to be either true or false
|
|
117
|
+
|
|
105
118
|
declaratives.partitions_format: needs to be more or equal to 1
|
|
106
119
|
declaratives.active_format: needs to be true
|
|
107
120
|
declaratives.replication_factor_format: needs to be more or equal to 1
|
|
108
121
|
declaratives.details_format: needs to be a hash with only symbol keys
|
|
122
|
+
|
|
109
123
|
inconsistent_namespacing: |
|
|
110
124
|
needs to be consistent namespacing style
|
|
111
125
|
disable this validation by setting config.strict_topics_namespacing to false
|
|
112
126
|
|
|
127
|
+
deserializers.active_format: 'needs to be true'
|
|
128
|
+
deserializers.payload_format: 'needs to respond to #call'
|
|
129
|
+
deserializers.key_format: 'needs to respond to #call'
|
|
130
|
+
deserializers.headers_format: 'needs to respond to #call'
|
|
131
|
+
|
|
113
132
|
consumer_group:
|
|
114
133
|
missing: needs to be present
|
|
115
134
|
topics_names_not_unique: all topic names within a single consumer group must be unique
|
|
@@ -59,13 +59,23 @@ en:
|
|
|
59
59
|
subscription_group_details.multiplexing_boot_not_dynamic: 'needs to be equal to max when not in dynamic mode'
|
|
60
60
|
|
|
61
61
|
swarm.active_format: needs to be true
|
|
62
|
-
swarm.nodes_format: needs to be a range
|
|
62
|
+
swarm.nodes_format: needs to be a range, array of nodes ids or a hash with direct assignments
|
|
63
63
|
swarm_nodes_with_non_existent_nodes: includes unreachable nodes ids
|
|
64
64
|
|
|
65
|
+
direct_assignments.active_missing: needs to be present
|
|
66
|
+
direct_assignments.active_format: 'needs to be boolean'
|
|
67
|
+
direct_assignments.partitions_missing: 'needs to be present'
|
|
68
|
+
direct_assignments.partitions_format: 'needs to be true, list of partitions or a range of partitions (finite)'
|
|
69
|
+
direct_assignments_active_but_empty: 'cannot be empty and active at the same time'
|
|
70
|
+
direct_assignments_swarm_not_complete: 'cannot have partitions that are assigned but not allocated'
|
|
71
|
+
direct_assignments_swarm_overbooked: 'cannot allocate partitions in swarm that were not assigned'
|
|
72
|
+
direct_assignments_patterns_active: 'patterns cannot be used with direct assignments'
|
|
73
|
+
|
|
65
74
|
consumer_group:
|
|
66
75
|
patterns_format: must be an array with hashes
|
|
67
76
|
patterns_missing: needs to be present
|
|
68
77
|
patterns_regexps_not_unique: 'must be unique within consumer group'
|
|
78
|
+
direct_assignments_homogenous: 'single consumer group cannot mix regular and direct assignments'
|
|
69
79
|
|
|
70
80
|
pattern:
|
|
71
81
|
regexp_format: must be a regular expression
|
|
@@ -82,6 +92,11 @@ en:
|
|
|
82
92
|
encryption.version_format: must be a non-empty string
|
|
83
93
|
encryption.public_key_format: 'is not a valid public RSA key'
|
|
84
94
|
encryption.private_keys_invalid: 'contains an invalid private RSA key string'
|
|
95
|
+
encryption.fingerprinter_missing: 'needs to be false or respond to #hexdigest method'
|
|
96
|
+
encryption.fingerprinter_format: 'needs to be false or respond to #hexdigest method'
|
|
85
97
|
|
|
86
98
|
patterns.ttl_format: needs to be an integer bigger than 0
|
|
87
99
|
patterns.ttl_missing: needs to be present
|
|
100
|
+
|
|
101
|
+
routing:
|
|
102
|
+
swarm_nodes_not_used: 'At least one of the nodes has no assignments'
|
data/karafka.gemspec
CHANGED
|
@@ -21,10 +21,12 @@ Gem::Specification.new do |spec|
|
|
|
21
21
|
without having to focus on things that are not your business domain.
|
|
22
22
|
DESC
|
|
23
23
|
|
|
24
|
-
spec.add_dependency 'karafka-core', '>= 2.
|
|
25
|
-
spec.add_dependency 'waterdrop', '>= 2.
|
|
24
|
+
spec.add_dependency 'karafka-core', '>= 2.4.0.rc1', '< 2.5.0'
|
|
25
|
+
spec.add_dependency 'waterdrop', '>= 2.7.0.rc1', '< 3.0.0'
|
|
26
26
|
spec.add_dependency 'zeitwerk', '~> 2.3'
|
|
27
27
|
|
|
28
|
+
spec.required_ruby_version = '>= 3.0.0'
|
|
29
|
+
|
|
28
30
|
if $PROGRAM_NAME.end_with?('gem')
|
|
29
31
|
spec.signing_key = File.expand_path('~/.ssh/gem-private_key.pem')
|
|
30
32
|
end
|
|
@@ -16,8 +16,10 @@ module ActiveJob
|
|
|
16
16
|
|
|
17
17
|
# Enqueues multiple jobs in one go
|
|
18
18
|
# @param jobs [Array<Object>] jobs that we want to enqueue
|
|
19
|
+
# @return [Integer] number of jobs enqueued (required by Rails)
|
|
19
20
|
def enqueue_all(jobs)
|
|
20
21
|
::Karafka::App.config.internal.active_job.dispatcher.dispatch_many(jobs)
|
|
22
|
+
jobs.size
|
|
21
23
|
end
|
|
22
24
|
|
|
23
25
|
# Raises info, that Karafka backend does not support scheduling jobs
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Karafka
|
|
4
|
+
module Admin
|
|
5
|
+
module Configs
|
|
6
|
+
# Represents a single config entry that is related to a resource
|
|
7
|
+
class Config
|
|
8
|
+
attr_reader :name, :value, :synonyms
|
|
9
|
+
|
|
10
|
+
class << self
|
|
11
|
+
# Creates a single config entry from the Rdkafka config result entry
|
|
12
|
+
#
|
|
13
|
+
# @param rd_kafka_config [Rdkafka::Admin::ConfigBindingResult]
|
|
14
|
+
# @return [Config]
|
|
15
|
+
def from_rd_kafka(rd_kafka_config)
|
|
16
|
+
new(
|
|
17
|
+
name: rd_kafka_config.name,
|
|
18
|
+
value: rd_kafka_config.value,
|
|
19
|
+
read_only: rd_kafka_config.read_only,
|
|
20
|
+
default: rd_kafka_config.default,
|
|
21
|
+
sensitive: rd_kafka_config.sensitive,
|
|
22
|
+
synonym: rd_kafka_config.synonym,
|
|
23
|
+
synonyms: rd_kafka_config.synonyms.map do |rd_kafka_synonym|
|
|
24
|
+
from_rd_kafka(rd_kafka_synonym)
|
|
25
|
+
end
|
|
26
|
+
)
|
|
27
|
+
end
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
# Creates new config instance either for reading or as part of altering operation
|
|
31
|
+
#
|
|
32
|
+
# @param name [String] config name
|
|
33
|
+
# @param value [String] config value
|
|
34
|
+
# @param default [Integer] 1 if default
|
|
35
|
+
# @param read_only [Integer] 1 if read only
|
|
36
|
+
# @param sensitive [Integer] 1 if sensitive
|
|
37
|
+
# @param synonym [Integer] 1 if synonym
|
|
38
|
+
# @param synonyms [Array] given config synonyms (if any)
|
|
39
|
+
#
|
|
40
|
+
# @note For alter operations only `name` and `value` are needed
|
|
41
|
+
def initialize(
|
|
42
|
+
name:,
|
|
43
|
+
value:,
|
|
44
|
+
default: -1,
|
|
45
|
+
read_only: -1,
|
|
46
|
+
sensitive: -1,
|
|
47
|
+
synonym: -1,
|
|
48
|
+
synonyms: []
|
|
49
|
+
)
|
|
50
|
+
@name = name
|
|
51
|
+
@value = value
|
|
52
|
+
@synonyms = []
|
|
53
|
+
@default = default
|
|
54
|
+
@read_only = read_only
|
|
55
|
+
@sensitive = sensitive
|
|
56
|
+
@synonym = synonym
|
|
57
|
+
@synonyms = synonyms
|
|
58
|
+
end
|
|
59
|
+
|
|
60
|
+
# @return [Boolean] Is the config property is set to its default value on the broker
|
|
61
|
+
def default? = @default.positive?
|
|
62
|
+
|
|
63
|
+
# @return [Boolean] Is the config property is read-only on the broker
|
|
64
|
+
def read_only? = @read_only.positive?
|
|
65
|
+
|
|
66
|
+
# @return [Boolean] if the config property contains sensitive information (such as
|
|
67
|
+
# security configuration
|
|
68
|
+
def sensitive? = @sensitive.positive?
|
|
69
|
+
|
|
70
|
+
# @return [Boolean] is this entry is a synonym
|
|
71
|
+
def synonym? = @synonym.positive?
|
|
72
|
+
|
|
73
|
+
# @return [Hash] hash that we can use to operate with rdkafka
|
|
74
|
+
def to_native_hash = {
|
|
75
|
+
name: name,
|
|
76
|
+
value: value
|
|
77
|
+
}.freeze
|
|
78
|
+
end
|
|
79
|
+
end
|
|
80
|
+
end
|
|
81
|
+
end
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Karafka
|
|
4
|
+
module Admin
|
|
5
|
+
module Configs
|
|
6
|
+
# Represents a single resource in the context of configuration management
|
|
7
|
+
class Resource
|
|
8
|
+
# Types of resources that have workable configs.
|
|
9
|
+
RESOURCE_TYPES_MAP = {
|
|
10
|
+
# use when you want to assign acl to a given topic
|
|
11
|
+
topic: Rdkafka::Bindings::RD_KAFKA_RESOURCE_TOPIC,
|
|
12
|
+
# use when you want to assign acl to a given broker
|
|
13
|
+
broker: Rdkafka::Bindings::RD_KAFKA_RESOURCE_BROKER
|
|
14
|
+
}.freeze
|
|
15
|
+
|
|
16
|
+
# Map for operations we may perform on the resource configs
|
|
17
|
+
OPERATIONS_TYPES_MAP = {
|
|
18
|
+
set: Rdkafka::Bindings::RD_KAFKA_ALTER_CONFIG_OP_TYPE_SET,
|
|
19
|
+
delete: Rdkafka::Bindings::RD_KAFKA_ALTER_CONFIG_OP_TYPE_DELETE,
|
|
20
|
+
append: Rdkafka::Bindings::RD_KAFKA_ALTER_CONFIG_OP_TYPE_APPEND,
|
|
21
|
+
subtract: Rdkafka::Bindings::RD_KAFKA_ALTER_CONFIG_OP_TYPE_SUBTRACT
|
|
22
|
+
}.freeze
|
|
23
|
+
|
|
24
|
+
private_constant :RESOURCE_TYPES_MAP, :OPERATIONS_TYPES_MAP
|
|
25
|
+
|
|
26
|
+
attr_reader :type, :name, :configs
|
|
27
|
+
|
|
28
|
+
# @param type [Symbol, Integer] type of resource as a symbol for mapping or integer
|
|
29
|
+
# @param name [String] name of the resource. It's the broker id or topic name
|
|
30
|
+
# @return [Resource]
|
|
31
|
+
def initialize(type:, name:)
|
|
32
|
+
@type = map_type(type)
|
|
33
|
+
@name = name.to_s
|
|
34
|
+
@configs = []
|
|
35
|
+
@operations = Hash.new { |h, k| h[k] = [] }
|
|
36
|
+
|
|
37
|
+
freeze
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
OPERATIONS_TYPES_MAP.each do |op_name, op_value|
|
|
41
|
+
# Adds an outgoing operation to a given resource of a given type
|
|
42
|
+
# Useful since we alter in batches and not one at a time
|
|
43
|
+
class_eval <<~RUBY, __FILE__, __LINE__ + 1
|
|
44
|
+
# @param name [String] name of the config to alter
|
|
45
|
+
# @param value [String] value of the config
|
|
46
|
+
def #{op_name}(name, value #{op_name == :delete ? ' = nil' : ''})
|
|
47
|
+
@operations[#{op_value}] << Config.new(name: name, value: value.to_s)
|
|
48
|
+
end
|
|
49
|
+
RUBY
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
# @return [Hash] resource converted to a hash that rdkafka can work with
|
|
53
|
+
# @note Configs include the operation type and are expected to be used only for the
|
|
54
|
+
# incremental alter API.
|
|
55
|
+
def to_native_hash
|
|
56
|
+
configs_with_operations = []
|
|
57
|
+
|
|
58
|
+
@operations.each do |op_type, configs|
|
|
59
|
+
configs.each do |config|
|
|
60
|
+
configs_with_operations << config.to_native_hash.merge(op_type: op_type)
|
|
61
|
+
end
|
|
62
|
+
end
|
|
63
|
+
|
|
64
|
+
{
|
|
65
|
+
resource_type: RESOURCE_TYPES_MAP.fetch(type),
|
|
66
|
+
resource_name: name,
|
|
67
|
+
configs: configs_with_operations
|
|
68
|
+
}.freeze
|
|
69
|
+
end
|
|
70
|
+
|
|
71
|
+
private
|
|
72
|
+
|
|
73
|
+
# Recognizes whether the type is provided and remaps it to a symbol representation if
|
|
74
|
+
# needed
|
|
75
|
+
#
|
|
76
|
+
# @param type [Symbol, Integer]
|
|
77
|
+
# @return [Symbol]
|
|
78
|
+
def map_type(type)
|
|
79
|
+
inverted = RESOURCE_TYPES_MAP.invert
|
|
80
|
+
|
|
81
|
+
return inverted[type] if inverted.key?(type)
|
|
82
|
+
|
|
83
|
+
RESOURCE_TYPES_MAP.fetch(type) ? type : nil
|
|
84
|
+
end
|
|
85
|
+
end
|
|
86
|
+
end
|
|
87
|
+
end
|
|
88
|
+
end
|