karafka 1.2.0 → 1.2.13

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 0bb0a1f72768ebf4bf720ebda57ebc26a0178275adabffd494b24e1612e9b38a
4
- data.tar.gz: f586038a0498227e8a287cc173a23e77bb99b6cb8d453c39be55a220e2a0d361
3
+ metadata.gz: d14a343dffbcc91c326964cc833bb82e9daedfdc4913eb6478c69e6caa5a995c
4
+ data.tar.gz: 14c3b5a9275dae6a5fb70790b4caf579cd2e1d9a6ac5d56307d950090012ec3d
5
5
  SHA512:
6
- metadata.gz: e2b862da6372bc91f76bc01c20eb21ccde9d61d67749ccce97eadd2f739484dbd68a149d08d6e275d94d95b6ce9610d86c2d01e85bb1841a80e048ed832810ac
7
- data.tar.gz: 06fb89700e59f810ec984b54f0424cfdeb98c79e17b8629010f011e68edff5fa1e345576a7b28ed07aff440739ed11824016da6083601a92b8f83fcca68e576e
6
+ metadata.gz: 890c1b249d3457984c2c392dc1bed2413f36704d1268247bd67b2fa70cd3489370786a71e6be6804aa9dc1e02136bc92e97a338c917577929f2a8e0aabaf07bc
7
+ data.tar.gz: cf9a0043a4b82797bf0f6d36a8ba18c97702e22dd9fea25d5c1df13ca3b30b4255244d828e6cd3fba68807d2717209ae626d2a99a25f497a7e15b04d80168d6f
@@ -0,0 +1,3 @@
1
+ api_key: <%= ENV['CODITSU_API_KEY'] %>
2
+ api_secret: <%= ENV['CODITSU_API_SECRET'] %>
3
+ repository_id: <%= ENV['CODITSU_REPOSITORY_ID'] %>
data/.gitignore CHANGED
File without changes
@@ -1 +1 @@
1
- 2.5.0
1
+ 2.6.1
@@ -1,18 +1,49 @@
1
- language: ruby
1
+ services:
2
+ - docker
3
+
4
+ dist: trusty
2
5
  sudo: false
3
- rvm:
4
- - 2.3.0
5
- - 2.3.1
6
- - 2.3.2
7
- - 2.3.3
8
- - 2.3.4
9
- - 2.4.0
10
- - 2.4.1
11
- - 2.4.2
12
- - 2.5.0
13
- - jruby-head
14
- script: bundle exec rspec spec/
15
- env:
16
- global:
17
- - JRUBY_OPTS='--debug'
18
- install: bundle install --jobs=3 --retry=3
6
+ cache: bundler
7
+
8
+ git:
9
+ depth: false
10
+
11
+ test: &test
12
+ stage: Test
13
+ language: ruby
14
+ before_install:
15
+ - gem install bundler
16
+ - gem update --system
17
+ script: bundle exec rspec
18
+
19
+ jobs:
20
+ include:
21
+ - <<: *test
22
+ rvm: 2.6.1
23
+ - <<: *test
24
+ rvm: 2.5.3
25
+ - <<: *test
26
+ rvm: 2.4.5
27
+ - <<: *test
28
+ rvm: 2.3.8
29
+
30
+ - stage: coditsu
31
+ language: ruby
32
+ rvm: 2.6.1
33
+ before_install:
34
+ - gem update --system
35
+ - gem install bundler
36
+ before_script:
37
+ - docker create -v /sources --name sources alpine:3.4 /bin/true
38
+ - docker cp ./ sources:/sources
39
+ script: >
40
+ docker run
41
+ -e CODITSU_API_KEY
42
+ -e CODITSU_API_SECRET
43
+ -e CODITSU_REPOSITORY_ID
44
+ --volumes-from sources
45
+ coditsu/build-runner:latest
46
+
47
+ stages:
48
+ - coditsu
49
+ - test
@@ -1,5 +1,54 @@
1
1
  # Karafka framework changelog
2
2
 
3
+ ## 1.2.13
4
+ - Add support for parameter sasl_over_ssl in ruby-kafka
5
+
6
+ ## 1.2.12
7
+ - #29 - Consumer class names must have the word "Consumer" in it in order to work (Sidekiq backend)
8
+
9
+ ## 1.2.11
10
+ - [#470](https://github.com/karafka/karafka/issues/470) Karafka not working with dry-configurable 0.8
11
+
12
+ ## 1.2.10
13
+ - [#453](https://github.com/karafka/karafka/pull/453) require `Forwardable` module
14
+
15
+ ## 1.2.9
16
+ - Critical exceptions now will cause consumer to stop instead of retrying without a break
17
+ - #412 - Fix dry-inflector dependency lock in gemspec
18
+ - #414 - Backport to 1.2 the delayed retry upon failure
19
+ - #437 - Raw message is no longer added to params after ParserError raised
20
+
21
+ ## 1.2.8
22
+ - #408 - Responder Topic Lookup Bug on Heroku
23
+
24
+ ## 1.2.7
25
+ - Unlock Ruby-kafka version with a warning
26
+
27
+ ## 1.2.6
28
+ - Lock WaterDrop to 1.2.3
29
+ - Lock Ruby-Kafka to 0.6.x (support for 0.7 will be added in Karafka 1.3)
30
+
31
+ ## 1.2.5
32
+ - #354 - Expose consumer heartbeat
33
+ - #373 - Async producer not working properly with responders
34
+
35
+ ## 1.2.4
36
+ - #332 - Fetcher for max queue size
37
+
38
+ ## 1.2.3
39
+ - #313 - support PLAINTEXT and SSL for scheme
40
+ - #320 - Pausing indefinetely with nil pause timeout doesn't work
41
+ - #318 - Partition pausing doesn't work with custom topic mappers
42
+ - Rename ConfigAdapter to ApiAdapter to better reflect what it does
43
+ - #317 - Manual offset committing doesn't work with custom topic mappers
44
+
45
+ ## 1.2.2
46
+ - #312 - Broken for ActiveSupport 5.2.0
47
+
48
+ ## 1.2.1
49
+ - #304 - Unification of error instrumentation event details
50
+ - #306 - Using file logger from within a trap context upon shutdown is impossible
51
+
3
52
  ## 1.2.0
4
53
  - Spec improvements
5
54
  - #260 - Specs missing randomization
data/Gemfile CHANGED
@@ -5,6 +5,10 @@ source 'https://rubygems.org'
5
5
  gemspec
6
6
 
7
7
  group :development, :test do
8
+ gem 'benchmark-ips'
9
+ end
10
+
11
+ group :test do
8
12
  gem 'rspec'
9
13
  gem 'simplecov'
10
14
  gem 'timecop'
@@ -1,123 +1,126 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- karafka (1.2.0)
4
+ karafka (1.2.13)
5
5
  activesupport (>= 4.0)
6
- dry-configurable (~> 0.7)
7
- dry-inflector (~> 0.1.1)
8
- dry-monitor (~> 0.1)
6
+ dry-configurable (~> 0.8)
7
+ dry-inflector (~> 0.1)
8
+ dry-monitor (~> 0.3)
9
9
  dry-validation (~> 0.11)
10
10
  envlogic (~> 1.0)
11
11
  multi_json (>= 1.12)
12
12
  rake (>= 11.3)
13
13
  require_all (>= 1.4)
14
- ruby-kafka (>= 0.5.3)
15
- thor (~> 0.19)
16
- waterdrop (~> 1.2)
14
+ ruby-kafka (>= 0.6)
15
+ thor (~> 0.20)
16
+ waterdrop (~> 1.2.4)
17
17
 
18
18
  GEM
19
19
  remote: https://rubygems.org/
20
20
  specs:
21
- activesupport (5.1.5)
21
+ activesupport (5.2.3)
22
22
  concurrent-ruby (~> 1.0, >= 1.0.2)
23
- i18n (~> 0.7)
23
+ i18n (>= 0.7, < 2)
24
24
  minitest (~> 5.1)
25
25
  tzinfo (~> 1.1)
26
- concurrent-ruby (1.0.5)
27
- delivery_boy (0.2.4)
28
- king_konf (~> 0.1.8)
29
- ruby-kafka (~> 0.5.1)
26
+ benchmark-ips (2.7.2)
27
+ concurrent-ruby (1.1.5)
28
+ delivery_boy (0.2.7)
29
+ king_konf (~> 0.2)
30
+ ruby-kafka (~> 0.5)
30
31
  diff-lcs (1.3)
31
- docile (1.3.0)
32
- dry-configurable (0.7.0)
32
+ digest-crc (0.4.1)
33
+ docile (1.3.1)
34
+ dry-configurable (0.8.2)
33
35
  concurrent-ruby (~> 1.0)
34
- dry-container (0.6.0)
36
+ dry-core (~> 0.4, >= 0.4.7)
37
+ dry-container (0.7.0)
35
38
  concurrent-ruby (~> 1.0)
36
39
  dry-configurable (~> 0.1, >= 0.1.3)
37
- dry-core (0.4.5)
40
+ dry-core (0.4.7)
38
41
  concurrent-ruby (~> 1.0)
39
- dry-equalizer (0.2.0)
40
- dry-events (0.1.0)
42
+ dry-equalizer (0.2.2)
43
+ dry-events (0.1.1)
41
44
  concurrent-ruby (~> 1.0)
42
45
  dry-core (~> 0.4)
43
46
  dry-equalizer (~> 0.2)
44
- dry-inflector (0.1.1)
45
- dry-logic (0.4.2)
46
- dry-container (~> 0.2, >= 0.2.6)
47
+ dry-inflector (0.1.2)
48
+ dry-logic (0.6.1)
49
+ concurrent-ruby (~> 1.0)
47
50
  dry-core (~> 0.2)
48
51
  dry-equalizer (~> 0.2)
49
- dry-monitor (0.1.2)
52
+ dry-monitor (0.3.0)
50
53
  dry-configurable (~> 0.5)
54
+ dry-core (~> 0.4)
51
55
  dry-equalizer (~> 0.2)
52
56
  dry-events (~> 0.1)
53
- rouge (~> 2.0, >= 2.2.1)
54
- dry-types (0.12.2)
57
+ dry-types (0.14.1)
55
58
  concurrent-ruby (~> 1.0)
56
- dry-configurable (~> 0.1)
57
59
  dry-container (~> 0.3)
58
- dry-core (~> 0.2, >= 0.2.1)
60
+ dry-core (~> 0.4, >= 0.4.4)
59
61
  dry-equalizer (~> 0.2)
60
- dry-logic (~> 0.4, >= 0.4.2)
61
- inflecto (~> 0.0.0, >= 0.0.2)
62
- dry-validation (0.11.1)
62
+ dry-inflector (~> 0.1, >= 0.1.2)
63
+ dry-logic (~> 0.5, >= 0.5)
64
+ dry-validation (0.13.2)
63
65
  concurrent-ruby (~> 1.0)
64
66
  dry-configurable (~> 0.1, >= 0.1.3)
65
67
  dry-core (~> 0.2, >= 0.2.1)
66
68
  dry-equalizer (~> 0.2)
67
- dry-logic (~> 0.4, >= 0.4.0)
68
- dry-types (~> 0.12.0)
69
+ dry-logic (~> 0.5, >= 0.5.0)
70
+ dry-types (~> 0.14.0)
69
71
  envlogic (1.1.0)
70
72
  dry-inflector (~> 0.1)
71
- i18n (0.9.5)
73
+ i18n (1.6.0)
72
74
  concurrent-ruby (~> 1.0)
73
- inflecto (0.0.2)
74
75
  json (2.1.0)
75
- king_konf (0.1.10)
76
+ king_konf (0.3.7)
76
77
  minitest (5.11.3)
77
78
  multi_json (1.13.1)
78
79
  null-logger (0.1.5)
79
- rake (12.3.1)
80
+ rake (12.3.2)
80
81
  require_all (2.0.0)
81
- rouge (2.2.1)
82
- rspec (3.7.0)
83
- rspec-core (~> 3.7.0)
84
- rspec-expectations (~> 3.7.0)
85
- rspec-mocks (~> 3.7.0)
86
- rspec-core (3.7.1)
87
- rspec-support (~> 3.7.0)
88
- rspec-expectations (3.7.0)
82
+ rspec (3.8.0)
83
+ rspec-core (~> 3.8.0)
84
+ rspec-expectations (~> 3.8.0)
85
+ rspec-mocks (~> 3.8.0)
86
+ rspec-core (3.8.0)
87
+ rspec-support (~> 3.8.0)
88
+ rspec-expectations (3.8.2)
89
89
  diff-lcs (>= 1.2.0, < 2.0)
90
- rspec-support (~> 3.7.0)
91
- rspec-mocks (3.7.0)
90
+ rspec-support (~> 3.8.0)
91
+ rspec-mocks (3.8.0)
92
92
  diff-lcs (>= 1.2.0, < 2.0)
93
- rspec-support (~> 3.7.0)
94
- rspec-support (3.7.1)
95
- ruby-kafka (0.5.4)
93
+ rspec-support (~> 3.8.0)
94
+ rspec-support (3.8.0)
95
+ ruby-kafka (0.7.7)
96
+ digest-crc
96
97
  simplecov (0.16.1)
97
98
  docile (~> 1.1)
98
99
  json (>= 1.8, < 3)
99
100
  simplecov-html (~> 0.10.0)
100
101
  simplecov-html (0.10.2)
101
- thor (0.20.0)
102
+ thor (0.20.3)
102
103
  thread_safe (0.3.6)
103
104
  timecop (0.9.1)
104
105
  tzinfo (1.2.5)
105
106
  thread_safe (~> 0.1)
106
- waterdrop (1.2.0)
107
+ waterdrop (1.2.5)
107
108
  delivery_boy (~> 0.2)
108
- dry-configurable (~> 0.7)
109
- dry-monitor (~> 0.1)
109
+ dry-configurable (~> 0.8)
110
+ dry-monitor (~> 0.3)
110
111
  dry-validation (~> 0.11)
111
- null-logger
112
+ null-logger (~> 0.1)
113
+ ruby-kafka (>= 0.6)
112
114
 
113
115
  PLATFORMS
114
116
  ruby
115
117
 
116
118
  DEPENDENCIES
119
+ benchmark-ips
117
120
  karafka!
118
121
  rspec
119
122
  simplecov
120
123
  timecop
121
124
 
122
125
  BUNDLED WITH
123
- 1.16.1
126
+ 2.0.1
data/README.md CHANGED
@@ -8,15 +8,13 @@ Karafka allows you to capture everything that happens in your systems in large s
8
8
 
9
9
  Karafka not only handles incoming messages but also provides tools for building complex data-flow applications that receive and send messages.
10
10
 
11
- **Warning**: Wiki and all the docs refer to the 1.2.0.beta4. Sorry for the inconvenience. We will release the stable 1.2.0 version soon.
12
-
13
11
  ## How does it work
14
12
 
15
- Karafka provides a higher-level abstraction that allows you to focus on your business logic development, instead of focusing on implementing lower level abstraction layers. It provides developers with a set of tools that are dedicated for building multi-topic applications similarly to how Rails applications are being built.
13
+ Karafka provides a higher-level abstraction that allows you to focus on your business logic development, instead of focusing on implementing lower level abstraction layers. It provides developers with a set of tools that are dedicated for building multi-topic applications similar to how Rails applications are being built.
16
14
 
17
15
  ### Some things you might wonder about:
18
16
 
19
- - You can integrate Karafka with **any** Ruby based application.
17
+ - You can integrate Karafka with **any** Ruby-based application.
20
18
  - Karafka does **not** require Sidekiq or any other third party software (apart from Kafka itself).
21
19
  - Karafka works with Ruby on Rails but it is a **standalone** framework that can work without it.
22
20
  - Karafka has a **minimal** set of dependencies, so adding it won't be a huge burden for your already existing applications.
@@ -27,15 +25,30 @@ Karafka based applications can be easily deployed to any type of infrastructure,
27
25
  * Heroku
28
26
  * Capistrano
29
27
  * Docker
28
+ * Terraform
29
+
30
+ ## Kafka 0.10 or prior
31
+
32
+ If you're using Kafka 0.10, please lock `ruby-kafka` gem in your Gemfile to version `0.6.8`:
33
+
34
+ ```ruby
35
+ gem 'karafka'
36
+ gem 'ruby-kafka', '~> 0.6.8'
37
+ ```
30
38
 
31
39
  ## Support
32
40
 
33
- Karafka has a [Wiki pages](https://github.com/karafka/karafka/wiki) for almost everything and a pretty decent [FAQ](https://github.com/karafka/karafka/wiki/FAQ). It covers the whole installation, setup and deployment along with other useful details on how to run Karafka.
41
+ Karafka has a [Wiki pages](https://github.com/karafka/karafka/wiki) for almost everything and a pretty decent [FAQ](https://github.com/karafka/karafka/wiki/FAQ). It covers the whole installation, setup, and deployment along with other useful details on how to run Karafka.
34
42
 
35
43
  If you have any questions about using Karafka, feel free to join our [Gitter](https://gitter.im/karafka/karafka) chat channel.
36
44
 
37
45
  ## Getting started
38
46
 
47
+ If you're completely new to the subject, you can start with our "Kafka on Rails" articles series, that will get you up and running with the terminology and basic ideas behind using Kafka:
48
+
49
+ - [Kafka on Rails: Using Kafka with Ruby on Rails – Part 1 – Kafka basics and its advantages](https://mensfeld.pl/2017/11/kafka-on-rails-using-kafka-with-ruby-on-rails-part-1-kafka-basics-and-its-advantages/)
50
+ - [Kafka on Rails: Using Kafka with Ruby on Rails – Part 2 – Getting started with Ruby and Kafka](https://mensfeld.pl/2018/01/kafka-on-rails-using-kafka-with-ruby-on-rails-part-2-getting-started-with-ruby-and-kafka/)
51
+
39
52
  If you want to get started with Kafka and Karafka as fast as possible, then the best idea is to just clone our example repository:
40
53
 
41
54
  ```bash
@@ -69,7 +82,7 @@ Karafka framework and Karafka team are __not__ related to Kafka streaming servic
69
82
 
70
83
  First, thank you for considering contributing to Karafka! It's people like you that make the open source community such a great community!
71
84
 
72
- Each pull request must pass all the rspec specs and meet our quality requirements.
85
+ Each pull request must pass all the RSpec specs and meet our quality requirements.
73
86
 
74
87
  To check if everything is as it should be, we use [Coditsu](https://coditsu.io) that combines multiple linters and code analyzers for both code and documentation. Once you're done with your changes, submit a pull request.
75
88
 
@@ -17,17 +17,22 @@ Gem::Specification.new do |spec|
17
17
  spec.license = 'MIT'
18
18
 
19
19
  spec.add_dependency 'activesupport', '>= 4.0'
20
- spec.add_dependency 'dry-configurable', '~> 0.7'
21
- spec.add_dependency 'dry-inflector', '~> 0.1.1'
22
- spec.add_dependency 'dry-monitor', '~> 0.1'
20
+ spec.add_dependency 'dry-configurable', '~> 0.8'
21
+ spec.add_dependency 'dry-inflector', '~> 0.1'
22
+ spec.add_dependency 'dry-monitor', '~> 0.3'
23
23
  spec.add_dependency 'dry-validation', '~> 0.11'
24
24
  spec.add_dependency 'envlogic', '~> 1.0'
25
25
  spec.add_dependency 'multi_json', '>= 1.12'
26
26
  spec.add_dependency 'rake', '>= 11.3'
27
27
  spec.add_dependency 'require_all', '>= 1.4'
28
- spec.add_dependency 'ruby-kafka', '>= 0.5.3'
29
- spec.add_dependency 'thor', '~> 0.19'
30
- spec.add_dependency 'waterdrop', '~> 1.2'
28
+ spec.add_dependency 'ruby-kafka', '>= 0.6'
29
+ spec.add_dependency 'thor', '~> 0.20'
30
+ spec.add_dependency 'waterdrop', '~> 1.2.4'
31
+
32
+ spec.post_install_message = <<~MSG
33
+ \e[93mWarning:\e[0m If you're using Kafka 0.10, please lock ruby-kafka in your Gemfile to version '0.6.8':
34
+ gem 'ruby-kafka', '~> 0.6.8'
35
+ MSG
31
36
 
32
37
  spec.required_ruby_version = '>= 2.3.0'
33
38
 
@@ -6,6 +6,7 @@
6
6
  kafka
7
7
  envlogic
8
8
  thor
9
+ forwardable
9
10
  fileutils
10
11
  multi_json
11
12
  require_all
@@ -14,15 +14,15 @@ module Karafka
14
14
  # @note All other settings will be passed to Kafka.new method invocation.
15
15
  # All elements in this hash are just edge cases
16
16
  # @return [Hash] hash with proper sections on what to proxy where in Ruby-Kafka
17
- def config_adapter
17
+ def api_adapter
18
18
  {
19
19
  consumer: %i[
20
20
  session_timeout offset_commit_interval offset_commit_threshold
21
- offset_retention_time heartbeat_interval
21
+ offset_retention_time heartbeat_interval fetcher_max_queue_size
22
22
  ],
23
- subscription: %i[start_from_beginning max_bytes_per_partition],
24
- consuming: %i[min_bytes max_bytes max_wait_time],
25
- pausing: %i[pause_timeout],
23
+ subscribe: %i[start_from_beginning max_bytes_per_partition],
24
+ consumption: %i[min_bytes max_bytes max_wait_time],
25
+ pause: %i[pause_timeout],
26
26
  # All the options that are under kafka config namespace, but are not used
27
27
  # directly with kafka api, but from the Karafka user perspective, they are
28
28
  # still related to kafka. They should not be proxied anywhere
@@ -32,7 +32,7 @@ module Karafka
32
32
 
33
33
  # @return [Array<Symbol>] properties that can be set on a per topic level
34
34
  def topic
35
- (config_adapter[:subscription] + %i[
35
+ (api_adapter[:subscribe] + %i[
36
36
  backend
37
37
  name
38
38
  parser
@@ -48,17 +48,19 @@ module Karafka
48
48
  # Thanks to this solution, if any new setting is available for ruby-kafka, we just need
49
49
  # to add it to our configuration class and it will be handled automatically.
50
50
  def consumer_group
51
- # @note We don't ignore the config_adapter[:ignored] values as they should be ignored
51
+ # @note We don't ignore the api_adapter[:ignored] values as they should be ignored
52
52
  # only when proxying details go ruby-kafka. We use ignored fields internally in karafka
53
- ignored_settings = config_adapter[:subscription]
54
- defined_settings = config_adapter.values.flatten
53
+ ignored_settings = api_adapter[:subscribe]
54
+ defined_settings = api_adapter.values.flatten
55
55
  karafka_settings = %i[batch_fetching]
56
56
  # This is a drity and bad hack of dry-configurable to get keys before setting values
57
57
  dynamically_proxied = Karafka::Setup::Config
58
58
  ._settings
59
+ .settings
59
60
  .find { |s| s.name == :kafka }
60
61
  .value
61
- .instance_variable_get('@klass').settings
62
+ .names
63
+ .to_a
62
64
 
63
65
  (defined_settings + dynamically_proxied).uniq + karafka_settings - ignored_settings
64
66
  end
@@ -163,7 +163,11 @@ module Karafka
163
163
  def deliver!
164
164
  messages_buffer.each_value do |data_elements|
165
165
  data_elements.each do |data, options|
166
- producer(options).call(data, options)
166
+ # We map this topic name, so it will match namespaced/etc topic in Kafka
167
+ # @note By default will not change topic (if default mapper used)
168
+ mapped_topic = Karafka::App.config.topic_mapper.outgoing(options[:topic])
169
+ external_options = options.merge(topic: mapped_topic)
170
+ producer(options).call(data, external_options)
167
171
  end
168
172
  end
169
173
  end
@@ -189,16 +193,16 @@ module Karafka
189
193
  messages_buffer[topic] ||= []
190
194
  messages_buffer[topic] << [
191
195
  @parser_class.generate(data),
192
- # We map this topic name, so it will match namespaced/etc topic in Kafka
193
- # @note By default will not change topic (if default mapper used)
194
- options.merge(topic: Karafka::App.config.topic_mapper.outgoing(topic))
196
+ options.merge(topic: topic)
195
197
  ]
196
198
  end
197
199
 
198
200
  # @param options [Hash] options for waterdrop
199
201
  # @return [Class] WaterDrop producer (sync or async based on the settings)
200
202
  def producer(options)
201
- options[:async] ? WaterDrop::AsyncProducer : WaterDrop::SyncProducer
203
+ self.class.topics[
204
+ options[:topic]
205
+ ].async? ? WaterDrop::AsyncProducer : WaterDrop::SyncProducer
202
206
  end
203
207
  end
204
208
  end
@@ -3,22 +3,22 @@
3
3
  module Karafka
4
4
  # Namespace for all the things related to Kafka connection
5
5
  module Connection
6
- # Mapper used to convert our internal settings into ruby-kafka settings
6
+ # Mapper used to convert our internal settings into ruby-kafka settings based on their
7
+ # API requirements.
7
8
  # Since ruby-kafka has more and more options and there are few "levels" on which
8
9
  # we have to apply them (despite the fact, that in Karafka you configure all of it
9
10
  # in one place), we have to remap it into what ruby-kafka driver requires
10
11
  # @note The good thing about Kafka.new method is that it ignores all options that
11
12
  # do nothing. So we don't have to worry about injecting our internal settings
12
13
  # into the client and breaking stuff
13
- module ConfigAdapter
14
+ module ApiAdapter
14
15
  class << self
15
16
  # Builds all the configuration settings for Kafka.new method
16
- # @param _consumer_group [Karafka::Routing::ConsumerGroup] consumer group details
17
17
  # @return [Array<Hash>] Array with all the client arguments including hash with all
18
18
  # the settings required by Kafka.new method
19
19
  # @note We return array, so we can inject any arguments we want, in case of changes in the
20
20
  # raw driver
21
- def client(_consumer_group)
21
+ def client
22
22
  # This one is a default that takes all the settings except special
23
23
  # cases defined in the map
24
24
  settings = {
@@ -29,9 +29,9 @@ module Karafka
29
29
  kafka_configs.each do |setting_name, setting_value|
30
30
  # All options for config adapter should be ignored as we're just interested
31
31
  # in what is left, as we want to pass all the options that are "typical"
32
- # and not listed in the config_adapter special cases mapping. All the values
33
- # from the config_adapter mapping go somewhere else, not to the client directly
34
- next if AttributesMap.config_adapter.values.flatten.include?(setting_name)
32
+ # and not listed in the api_adapter special cases mapping. All the values
33
+ # from the api_adapter mapping go somewhere else, not to the client directly
34
+ next if AttributesMap.api_adapter.values.flatten.include?(setting_name)
35
35
 
36
36
  settings[setting_name] = setting_value
37
37
  end
@@ -58,26 +58,54 @@ module Karafka
58
58
  # @return [Array<Hash>] Array with all the arguments required by consuming method
59
59
  # including hash with all the settings required by
60
60
  # Kafka::Consumer#consume_each_message and Kafka::Consumer#consume_each_batch method
61
- def consuming(consumer_group)
62
- settings = {
63
- automatically_mark_as_processed: consumer_group.automatically_mark_as_consumed
64
- }
65
- [sanitize(fetch_for(:consuming, consumer_group, settings))]
61
+ def consumption(consumer_group)
62
+ [
63
+ sanitize(
64
+ fetch_for(
65
+ :consumption,
66
+ consumer_group,
67
+ automatically_mark_as_processed: consumer_group.automatically_mark_as_consumed
68
+ )
69
+ )
70
+ ]
66
71
  end
67
72
 
68
73
  # Builds all the configuration settings for kafka consumer#subscribe method
69
74
  # @param topic [Karafka::Routing::Topic] topic that holds details for a given subscription
70
75
  # @return [Hash] hash with all the settings required by kafka consumer#subscribe method
71
- def subscription(topic)
72
- settings = fetch_for(:subscription, topic)
76
+ def subscribe(topic)
77
+ settings = fetch_for(:subscribe, topic)
73
78
  [Karafka::App.config.topic_mapper.outgoing(topic.name), sanitize(settings)]
74
79
  end
75
80
 
76
81
  # Builds all the configuration settings required by kafka consumer#pause method
82
+ # @param topic [String] topic that we want to pause
83
+ # @param partition [Integer] number partition that we want to pause
77
84
  # @param consumer_group [Karafka::Routing::ConsumerGroup] consumer group details
78
- # @return [Hash] hash with all the settings required to pause kafka consumer
79
- def pausing(consumer_group)
80
- { timeout: consumer_group.pause_timeout }
85
+ # @return [Array] array with all the details required to pause kafka consumer
86
+ def pause(topic, partition, consumer_group)
87
+ [
88
+ Karafka::App.config.topic_mapper.outgoing(topic),
89
+ partition,
90
+ { timeout: consumer_group.pause_timeout }
91
+ ]
92
+ end
93
+
94
+ # Remaps topic details taking the topic mapper feature into consideration.
95
+ # @param params [Karafka::Params::Params] params instance
96
+ # @return [Array] array with all the details needed by ruby-kafka to mark message
97
+ # as processed
98
+ # @note When default empty topic mapper is used, no need for any conversion as the
99
+ # internal and external format are exactly the same
100
+ def mark_message_as_processed(params)
101
+ # Majority of non heroku users don't use custom topic mappers. No need to change
102
+ # anything when it is a default mapper that does not change anything
103
+ return [params] if Karafka::App.config.topic_mapper == Karafka::Routing::TopicMapper
104
+
105
+ # @note We don't use tap as it is around 13% slower than non-dup version
106
+ dupped = params.dup
107
+ dupped['topic'] = Karafka::App.config.topic_mapper.outgoing(params.topic)
108
+ [dupped]
81
109
  end
82
110
 
83
111
  private
@@ -90,10 +118,10 @@ module Karafka
90
118
  def fetch_for(namespace_key, route_layer, preexisting_settings = {})
91
119
  kafka_configs.each_key do |setting_name|
92
120
  # Ignore settings that are not related to our namespace
93
- next unless AttributesMap.config_adapter[namespace_key].include?(setting_name)
121
+ next unless AttributesMap.api_adapter[namespace_key].include?(setting_name)
94
122
  # Ignore settings that are already initialized
95
123
  # In case they are in preexisting settings fetched differently
96
- next if preexisting_settings.keys.include?(setting_name)
124
+ next if preexisting_settings.key?(setting_name)
97
125
  # Fetch all the settings from a given layer object. Objects can handle the fallback
98
126
  # to the kafka settings, so
99
127
  preexisting_settings[setting_name] = route_layer.send(setting_name)
@@ -0,0 +1,16 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Connection
5
+ # Builder used to construct Kafka client
6
+ module Builder
7
+ class << self
8
+ # Builds a Kafka::Cient instance that we use to work with Kafka cluster
9
+ # @return [::Kafka::Client] returns a Kafka client
10
+ def call
11
+ Kafka.new(*ApiAdapter.client)
12
+ end
13
+ end
14
+ end
15
+ end
16
+ end
@@ -23,7 +23,7 @@ module Karafka
23
23
  # @yieldparam [Array<Kafka::FetchedMessage>] kafka fetched messages
24
24
  # @note This will yield with raw messages - no preprocessing or reformatting.
25
25
  def fetch_loop
26
- settings = ConfigAdapter.consuming(consumer_group)
26
+ settings = ApiAdapter.consumption(consumer_group)
27
27
 
28
28
  if consumer_group.batch_fetching
29
29
  kafka_consumer.each_batch(*settings) { |batch| yield(batch.messages) }
@@ -41,16 +41,6 @@ module Karafka
41
41
  )
42
42
  pause(error.topic, error.partition)
43
43
  retry
44
- # This is on purpose - see the notes for this method
45
- # rubocop:disable RescueException
46
- rescue Exception => error
47
- # rubocop:enable RescueException
48
- Karafka.monitor.instrument(
49
- 'connection.client.fetch_loop.error',
50
- caller: self,
51
- error: error
52
- )
53
- retry
54
44
  end
55
45
 
56
46
  # Gracefuly stops topic consumption
@@ -66,10 +56,7 @@ module Karafka
66
56
  # @param topic [String] topic that we want to pause
67
57
  # @param partition [Integer] number partition that we want to pause
68
58
  def pause(topic, partition)
69
- settings = ConfigAdapter.pausing(consumer_group)
70
- timeout = settings[:timeout]
71
- raise(Errors::InvalidPauseTimeout, timeout) unless timeout.positive?
72
- kafka_consumer.pause(topic, partition, settings)
59
+ kafka_consumer.pause(*ApiAdapter.pause(topic, partition, consumer_group))
73
60
  end
74
61
 
75
62
  # Marks a given message as consumed and commit the offsets
@@ -77,12 +64,20 @@ module Karafka
77
64
  # that offset commit happen asap in case of a crash
78
65
  # @param [Karafka::Params::Params] params message that we want to mark as processed
79
66
  def mark_as_consumed(params)
80
- kafka_consumer.mark_message_as_processed(params)
67
+ kafka_consumer.mark_message_as_processed(
68
+ *ApiAdapter.mark_message_as_processed(params)
69
+ )
81
70
  # Trigger an immediate, blocking offset commit in order to minimize the risk of crashing
82
71
  # before the automatic triggers have kicked in.
83
72
  kafka_consumer.commit_offsets
84
73
  end
85
74
 
75
+ # Triggers a non-optional blocking heartbeat that notifies Kafka about the fact, that this
76
+ # consumer / client is still up and running
77
+ def trigger_heartbeat
78
+ kafka_consumer.trigger_heartbeat!
79
+ end
80
+
86
81
  private
87
82
 
88
83
  attr_reader :consumer_group
@@ -90,11 +85,13 @@ module Karafka
90
85
  # @return [Kafka::Consumer] returns a ready to consume Kafka consumer
91
86
  # that is set up to consume from topics of a given consumer group
92
87
  def kafka_consumer
93
- @kafka_consumer ||= kafka.consumer(
94
- *ConfigAdapter.consumer(consumer_group)
88
+ # @note We don't cache the connection internally because we cache kafka_consumer that uses
89
+ # kafka client object instance
90
+ @kafka_consumer ||= Builder.call.consumer(
91
+ *ApiAdapter.consumer(consumer_group)
95
92
  ).tap do |consumer|
96
93
  consumer_group.topics.each do |topic|
97
- consumer.subscribe(*ConfigAdapter.subscription(topic))
94
+ consumer.subscribe(*ApiAdapter.subscribe(topic))
98
95
  end
99
96
  end
100
97
  rescue Kafka::ConnectionError
@@ -105,13 +102,6 @@ module Karafka
105
102
  # down the road
106
103
  raise
107
104
  end
108
-
109
- # @return [Kafka] returns a Kafka
110
- # @note We don't cache it internally because we cache kafka_consumer that uses kafka
111
- # object instance
112
- def kafka
113
- Kafka.new(*ConfigAdapter.client(consumer_group))
114
- end
115
105
  end
116
106
  end
117
107
  end
@@ -46,8 +46,8 @@ module Karafka
46
46
  rescue Exception => e
47
47
  Karafka.monitor.instrument('connection.listener.fetch_loop.error', caller: self, error: e)
48
48
  # rubocop:enable RescueException
49
- @client&.stop
50
- retry if @client
49
+ @client.stop
50
+ sleep(@consumer_group.reconnect_timeout) && retry
51
51
  end
52
52
 
53
53
  # @return [Karafka::Connection::Client] wrapped kafka consuming client for a given topic
@@ -23,7 +23,7 @@ module Karafka
23
23
  # @param method_name [Symbol, String] method name or nil if we plan to provide a block
24
24
  # @yield A block with a code that should be executed before scheduling
25
25
  define_method type do |method_name = nil, &block|
26
- set_callback type, :before, method_name ? method_name : block
26
+ set_callback type, :before, method_name || block
27
27
  end
28
28
  end
29
29
  end
@@ -41,10 +41,10 @@ module Karafka
41
41
  # This should never happen and if it does, please contact us
42
42
  MissingClient = Class.new(BaseError)
43
43
 
44
- # Raised when we attemp to pause a partition but the pause timeout is equal to 0
45
- InvalidPauseTimeout = Class.new(BaseError)
46
-
47
44
  # Raised when want to hook up to an event that is not registered and supported
48
45
  UnregisteredMonitorEvent = Class.new(BaseError)
46
+
47
+ # Raised when we've waited enough for shutting down an unresponding process
48
+ ForcefulShutdown = Class.new(BaseError)
49
49
  end
50
50
  end
@@ -42,6 +42,12 @@ module Karafka
42
42
  # matcher.name #=> Super2Responder
43
43
  def name
44
44
  inflected = @klass.to_s.split('::').last.to_s
45
+ # We inject the from into the name just in case it is missing as in a situation like
46
+ # that it would just sanitize the name without adding the "to" postfix.
47
+ # It could create cases when we want to build for example a responder to a consumer
48
+ # that does not have the "Consumer" postfix and would do nothing returning the same name.
49
+ # That would be bad as the matching classes shouldn't be matched to themselves.
50
+ inflected << @from unless inflected.include?(@from)
45
51
  inflected.gsub!(@from, @to)
46
52
  inflected.gsub!(CONSTANT_REGEXP, '')
47
53
  inflected
@@ -93,7 +93,9 @@ module Karafka
93
93
  return self if self['parsed']
94
94
  self['parsed'] = true
95
95
 
96
- merge!(parse(delete('value')))
96
+ parsed_data = parse(self['value'])
97
+ delete('value')
98
+ merge!(parsed_data)
97
99
  end
98
100
 
99
101
  # Includes and extends the base params klass with everything that is needed by Karafka to
@@ -10,10 +10,8 @@ module Karafka
10
10
  # Unfortunately it does not provide an on call proc evaluation, so
11
11
  # this feature had to be added here on demand/
12
12
  # @param args Any arguments that DryConfigurable::Config accepts
13
- def initialize(*args)
14
- super
15
-
16
- @config.each_key(&method(:rebuild))
13
+ def define!(*args)
14
+ super.tap { @config.each_key(&method(:rebuild)) }
17
15
  end
18
16
 
19
17
  private
@@ -34,10 +34,8 @@ module Karafka
34
34
 
35
35
  # Method catches all HANDLED_SIGNALS and performs appropriate callbacks (if defined)
36
36
  # @note If there are no callbacks, this method will just ignore a given signal that was sent
37
- # @yield [Block] block of code that we want to execute and supervise
38
37
  def supervise
39
38
  HANDLED_SIGNALS.each { |signal| trap_signal(signal) }
40
- yield
41
39
  end
42
40
 
43
41
  private
@@ -3,7 +3,7 @@
3
3
  module Karafka
4
4
  # Responders namespace encapsulates all the internal responder implementation parts
5
5
  module Responders
6
- # Responders builder is used to finding (based on the consumer class name) a responder
6
+ # Responders builder is used for finding (based on the consumer class name) a responder
7
7
  # that match the consumer. We use it when user does not provide a responder inside routing,
8
8
  # but he still names responder with the same convention (and namespaces) as consumer
9
9
  #
@@ -7,7 +7,7 @@ module Karafka
7
7
  # Valid uri schemas of Kafka broker url
8
8
  # The ||= is due to the behavior of require_all that resolves dependencies
9
9
  # but someetimes loads things twice
10
- URI_SCHEMES ||= %w[kafka kafka+ssl].freeze
10
+ URI_SCHEMES ||= %w[kafka kafka+ssl plaintext ssl].freeze
11
11
 
12
12
  # Available sasl scram mechanism of authentication (plus nil)
13
13
  SASL_SCRAM_MECHANISMS ||= %w[sha256 sha512].freeze
@@ -31,11 +31,12 @@ module Karafka
31
31
  required(:id).filled(:str?, format?: Karafka::Schemas::TOPIC_REGEXP)
32
32
  required(:seed_brokers).filled { each(:broker_schema?) }
33
33
  required(:session_timeout).filled { int? | float? }
34
- required(:pause_timeout).filled { (int? | float?) & gteq?(0) }
34
+ required(:pause_timeout) { none? | ((int? | float?) & gteq?(0)) }
35
35
  required(:offset_commit_interval) { int? | float? }
36
36
  required(:offset_commit_threshold).filled(:int?)
37
37
  required(:offset_retention_time) { none?.not > int? }
38
38
  required(:heartbeat_interval).filled { (int? | float?) & gteq?(0) }
39
+ required(:fetcher_max_queue_size).filled(:int?, gt?: 0)
39
40
  required(:connect_timeout).filled { (int? | float?) & gt?(0) }
40
41
  required(:socket_timeout).filled { (int? | float?) & gt?(0) }
41
42
  required(:min_bytes).filled(:int?, gt?: 0)
@@ -68,6 +69,7 @@ module Karafka
68
69
  end
69
70
 
70
71
  optional(:ssl_ca_certs_from_system).maybe(:bool?)
72
+ optional(:sasl_over_ssl).maybe(:bool?)
71
73
 
72
74
  # It's not with other encryptions as it has some more rules
73
75
  optional(:sasl_scram_mechanism)
@@ -19,9 +19,9 @@ module Karafka
19
19
 
20
20
  # Method which runs app
21
21
  def run
22
- bind_on_sigint
23
- bind_on_sigquit
24
- bind_on_sigterm
22
+ process.on_sigint { stop_supervised }
23
+ process.on_sigquit { stop_supervised }
24
+ process.on_sigterm { stop_supervised }
25
25
  start_supervised
26
26
  end
27
27
 
@@ -39,35 +39,24 @@ module Karafka
39
39
  Karafka::Process.instance
40
40
  end
41
41
 
42
- # What should happen when we decide to quit with sigint
43
- def bind_on_sigint
44
- process.on_sigint { stop_supervised }
45
- end
46
-
47
- # What should happen when we decide to quit with sigquit
48
- def bind_on_sigquit
49
- process.on_sigquit { stop_supervised }
50
- end
51
-
52
- # What should happen when we decide to quit with sigterm
53
- def bind_on_sigterm
54
- process.on_sigterm { stop_supervised }
55
- end
56
-
57
42
  # Starts Karafka with a supervision
58
43
  # @note We don't need to sleep because Karafka::Fetcher is locking and waiting to
59
44
  # finish loop (and it won't happen until we explicitily want to stop)
60
45
  def start_supervised
61
- process.supervise do
62
- Karafka::App.run!
63
- Karafka::Fetcher.call
64
- end
46
+ process.supervise
47
+ Karafka::App.run!
48
+ Karafka::Fetcher.call
65
49
  end
66
50
 
67
51
  # Stops Karafka with a supervision (as long as there is a shutdown timeout)
68
52
  # If consumers won't stop in a given timeframe, it will force them to exit
69
53
  def stop_supervised
70
- Karafka.monitor.instrument('server.stop', {})
54
+ # Because this is called in the trap context, there is a chance that instrumentation
55
+ # listeners contain things that aren't allowed from within a trap context.
56
+ # To bypass that (instead of telling users not to do things they need to)
57
+ # we spin up a thread to instrument server.stop and server.stop.error and wait until
58
+ # they're finished
59
+ Thread.new { Karafka.monitor.instrument('server.stop', {}) }.join
71
60
 
72
61
  Karafka::App.stop!
73
62
  # If there is no shutdown timeout, we don't exit and wait until all the consumers
@@ -82,7 +71,9 @@ module Karafka
82
71
  sleep SUPERVISION_SLEEP
83
72
  end
84
73
 
85
- Karafka.monitor.instrument('server.stop.error', {})
74
+ raise Errors::ForcefulShutdown
75
+ rescue Errors::ForcefulShutdown => error
76
+ Thread.new { Karafka.monitor.instrument('server.stop.error', error: error) }.join
86
77
  # We're done waiting, lets kill them!
87
78
  consumer_threads.each(&:terminate)
88
79
 
@@ -73,7 +73,7 @@ module Karafka
73
73
  # Time that a given partition will be paused from fetching messages, when message
74
74
  # consumption fails. It allows us to process other partitions, while the error is being
75
75
  # resolved and also "slows" things down, so it prevents from "eating" up all messages and
76
- # consuming them with failed code
76
+ # consuming them with failed code. Use `nil` if you want to pause forever and never retry.
77
77
  setting :pause_timeout, 10
78
78
  # option offset_commit_interval [Integer] the interval between offset commits,
79
79
  # in seconds.
@@ -85,6 +85,13 @@ module Karafka
85
85
  # option heartbeat_interval [Integer] the interval between heartbeats; must be less
86
86
  # than the session window.
87
87
  setting :heartbeat_interval, 10
88
+ # option offset_retention_time [Integer] The length of the retention window, known as
89
+ # offset retention time
90
+ setting :offset_retention_time, nil
91
+ # option fetcher_max_queue_size [Integer] max number of items in the fetch queue that
92
+ # are stored for further processing. Note, that each item in the queue represents a
93
+ # response from a single broker
94
+ setting :fetcher_max_queue_size, 100
88
95
  # option max_bytes_per_partition [Integer] the maximum amount of data fetched
89
96
  # from a single partition at a time.
90
97
  setting :max_bytes_per_partition, 1_048_576
@@ -111,9 +118,6 @@ module Karafka
111
118
  # option reconnect_timeout [Integer] How long should we wait before trying to reconnect to
112
119
  # Kafka cluster that went down (in seconds)
113
120
  setting :reconnect_timeout, 5
114
- # option offset_retention_time [Integer] The length of the retention window, known as
115
- # offset retention time
116
- setting :offset_retention_time, nil
117
121
  # option connect_timeout [Integer] Sets the number of seconds to wait while connecting to
118
122
  # a broker for the first time. When ruby-kafka initializes, it needs to connect to at
119
123
  # least one host.
@@ -152,6 +156,8 @@ module Karafka
152
156
  setting :sasl_scram_password, nil
153
157
  # option sasl_scram_mechanism [String, nil] Scram mechanism, either 'sha256' or 'sha512'
154
158
  setting :sasl_scram_mechanism, nil
159
+ # option sasl_over_ssl [Boolean] whether to enforce SSL with SASL
160
+ setting :sasl_over_ssl, true
155
161
  end
156
162
 
157
163
  class << self
@@ -13,7 +13,7 @@ module Karafka
13
13
  ::WaterDrop.setup do |water_config|
14
14
  water_config.deliver = true
15
15
 
16
- config.to_h.except(:kafka).each do |k, v|
16
+ config.to_h.reject { |k, _v| k == :kafka }.each do |k, v|
17
17
  key_assignment = :"#{k}="
18
18
  next unless water_config.respond_to?(key_assignment)
19
19
  water_config.public_send(key_assignment, v)
@@ -3,5 +3,5 @@
3
3
  # Main module namespace
4
4
  module Karafka
5
5
  # Current Karafka version
6
- VERSION = '1.2.0'
6
+ VERSION = '1.2.13'
7
7
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: karafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.2.0
4
+ version: 1.2.13
5
5
  platform: ruby
6
6
  authors:
7
7
  - Maciej Mensfeld
@@ -10,7 +10,7 @@ authors:
10
10
  autorequire:
11
11
  bindir: bin
12
12
  cert_chain: []
13
- date: 2018-03-23 00:00:00.000000000 Z
13
+ date: 2019-05-19 00:00:00.000000000 Z
14
14
  dependencies:
15
15
  - !ruby/object:Gem::Dependency
16
16
  name: activesupport
@@ -32,42 +32,42 @@ dependencies:
32
32
  requirements:
33
33
  - - "~>"
34
34
  - !ruby/object:Gem::Version
35
- version: '0.7'
35
+ version: '0.8'
36
36
  type: :runtime
37
37
  prerelease: false
38
38
  version_requirements: !ruby/object:Gem::Requirement
39
39
  requirements:
40
40
  - - "~>"
41
41
  - !ruby/object:Gem::Version
42
- version: '0.7'
42
+ version: '0.8'
43
43
  - !ruby/object:Gem::Dependency
44
44
  name: dry-inflector
45
45
  requirement: !ruby/object:Gem::Requirement
46
46
  requirements:
47
47
  - - "~>"
48
48
  - !ruby/object:Gem::Version
49
- version: 0.1.1
49
+ version: '0.1'
50
50
  type: :runtime
51
51
  prerelease: false
52
52
  version_requirements: !ruby/object:Gem::Requirement
53
53
  requirements:
54
54
  - - "~>"
55
55
  - !ruby/object:Gem::Version
56
- version: 0.1.1
56
+ version: '0.1'
57
57
  - !ruby/object:Gem::Dependency
58
58
  name: dry-monitor
59
59
  requirement: !ruby/object:Gem::Requirement
60
60
  requirements:
61
61
  - - "~>"
62
62
  - !ruby/object:Gem::Version
63
- version: '0.1'
63
+ version: '0.3'
64
64
  type: :runtime
65
65
  prerelease: false
66
66
  version_requirements: !ruby/object:Gem::Requirement
67
67
  requirements:
68
68
  - - "~>"
69
69
  - !ruby/object:Gem::Version
70
- version: '0.1'
70
+ version: '0.3'
71
71
  - !ruby/object:Gem::Dependency
72
72
  name: dry-validation
73
73
  requirement: !ruby/object:Gem::Requirement
@@ -144,42 +144,42 @@ dependencies:
144
144
  requirements:
145
145
  - - ">="
146
146
  - !ruby/object:Gem::Version
147
- version: 0.5.3
147
+ version: '0.6'
148
148
  type: :runtime
149
149
  prerelease: false
150
150
  version_requirements: !ruby/object:Gem::Requirement
151
151
  requirements:
152
152
  - - ">="
153
153
  - !ruby/object:Gem::Version
154
- version: 0.5.3
154
+ version: '0.6'
155
155
  - !ruby/object:Gem::Dependency
156
156
  name: thor
157
157
  requirement: !ruby/object:Gem::Requirement
158
158
  requirements:
159
159
  - - "~>"
160
160
  - !ruby/object:Gem::Version
161
- version: '0.19'
161
+ version: '0.20'
162
162
  type: :runtime
163
163
  prerelease: false
164
164
  version_requirements: !ruby/object:Gem::Requirement
165
165
  requirements:
166
166
  - - "~>"
167
167
  - !ruby/object:Gem::Version
168
- version: '0.19'
168
+ version: '0.20'
169
169
  - !ruby/object:Gem::Dependency
170
170
  name: waterdrop
171
171
  requirement: !ruby/object:Gem::Requirement
172
172
  requirements:
173
173
  - - "~>"
174
174
  - !ruby/object:Gem::Version
175
- version: '1.2'
175
+ version: 1.2.4
176
176
  type: :runtime
177
177
  prerelease: false
178
178
  version_requirements: !ruby/object:Gem::Requirement
179
179
  requirements:
180
180
  - - "~>"
181
181
  - !ruby/object:Gem::Version
182
- version: '1.2'
182
+ version: 1.2.4
183
183
  description: Framework used to simplify Apache Kafka based Ruby applications development
184
184
  email:
185
185
  - maciej@coditsu.io
@@ -190,6 +190,7 @@ executables:
190
190
  extensions: []
191
191
  extra_rdoc_files: []
192
192
  files:
193
+ - ".coditsu.yml"
193
194
  - ".console_irbrc"
194
195
  - ".gitignore"
195
196
  - ".rspec"
@@ -222,8 +223,9 @@ files:
222
223
  - lib/karafka/cli/info.rb
223
224
  - lib/karafka/cli/install.rb
224
225
  - lib/karafka/cli/server.rb
226
+ - lib/karafka/connection/api_adapter.rb
227
+ - lib/karafka/connection/builder.rb
225
228
  - lib/karafka/connection/client.rb
226
- - lib/karafka/connection/config_adapter.rb
227
229
  - lib/karafka/connection/delegator.rb
228
230
  - lib/karafka/connection/listener.rb
229
231
  - lib/karafka/consumers/callbacks.rb
@@ -278,7 +280,8 @@ homepage: https://github.com/karafka/karafka
278
280
  licenses:
279
281
  - MIT
280
282
  metadata: {}
281
- post_install_message:
283
+ post_install_message: "\e[93mWarning:\e[0m If you're using Kafka 0.10, please lock
284
+ ruby-kafka in your Gemfile to version '0.6.8':\ngem 'ruby-kafka', '~> 0.6.8'\n"
282
285
  rdoc_options: []
283
286
  require_paths:
284
287
  - lib
@@ -293,8 +296,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
293
296
  - !ruby/object:Gem::Version
294
297
  version: '0'
295
298
  requirements: []
296
- rubyforge_project:
297
- rubygems_version: 2.7.6
299
+ rubygems_version: 3.0.1
298
300
  signing_key:
299
301
  specification_version: 4
300
302
  summary: Ruby based framework for working with Apache Kafka