karafka 1.2.0 → 1.2.7

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 0bb0a1f72768ebf4bf720ebda57ebc26a0178275adabffd494b24e1612e9b38a
4
- data.tar.gz: f586038a0498227e8a287cc173a23e77bb99b6cb8d453c39be55a220e2a0d361
3
+ metadata.gz: 3f8c560eb30d1e8365d228786192ff6f1160e72cad7bb601a8a2fa88c4a1bdba
4
+ data.tar.gz: d56768f5127ba10bbd39b318b9b8efe696f65a93e29c2706de279e9bd3f9278f
5
5
  SHA512:
6
- metadata.gz: e2b862da6372bc91f76bc01c20eb21ccde9d61d67749ccce97eadd2f739484dbd68a149d08d6e275d94d95b6ce9610d86c2d01e85bb1841a80e048ed832810ac
7
- data.tar.gz: 06fb89700e59f810ec984b54f0424cfdeb98c79e17b8629010f011e68edff5fa1e345576a7b28ed07aff440739ed11824016da6083601a92b8f83fcca68e576e
6
+ metadata.gz: fed993507c0c79e90a8b11797db8caefb2565503f9076b01781892b0d97e941525d52a0f1bf273eee3d99b44b563e227b31e61cb82d790a02c19fd21915bd3fa
7
+ data.tar.gz: e64fdd7e015421048f93f693948d32e5cab85e7c0d8daab50e1810f3128e35544bc570468536800f0667ae1427b726582f2298c3b39b5c34c47d4a4b6ceb932a
data/.gitignore CHANGED
File without changes
@@ -1 +1 @@
1
- 2.5.0
1
+ 2.5.1
@@ -1,18 +1,21 @@
1
- language: ruby
1
+ dist: trusty
2
2
  sudo: false
3
+ language: ruby
4
+ cache: bundler
5
+
3
6
  rvm:
4
- - 2.3.0
5
- - 2.3.1
6
- - 2.3.2
7
- - 2.3.3
8
- - 2.3.4
9
- - 2.4.0
10
- - 2.4.1
11
- - 2.4.2
12
- - 2.5.0
13
- - jruby-head
14
- script: bundle exec rspec spec/
7
+ - 2.3.7
8
+ - 2.4.4
9
+ - 2.5.1
10
+ - ruby-head
11
+ - jruby-9.2.0.0
12
+
15
13
  env:
16
14
  global:
17
15
  - JRUBY_OPTS='--debug'
18
- install: bundle install --jobs=3 --retry=3
16
+
17
+ before_install:
18
+ - gem install bundler
19
+ - gem update --system
20
+
21
+ script: bundle exec rspec
@@ -1,5 +1,34 @@
1
1
  # Karafka framework changelog
2
2
 
3
+ ## 1.2.7
4
+
5
+ - Unlock Ruby-kafka version with a warning
6
+
7
+ ## 1.2.6
8
+ - Lock WaterDrop to 1.2.3
9
+ - Lock Ruby-Kafka to 0.6.x (support for 0.7 will be added in Karafka 1.3)
10
+
11
+ ## 1.2.5
12
+ - #354 - Expose consumer heartbeat
13
+ - #373 - Async producer not working properly with responders
14
+
15
+ ## 1.2.4
16
+ - #332 - Fetcher for max queue size
17
+
18
+ ## 1.2.3
19
+ - #313 - support PLAINTEXT and SSL for scheme
20
+ - #320 - Pausing indefinetely with nil pause timeout doesn't work
21
+ - #318 - Partition pausing doesn't work with custom topic mappers
22
+ - Rename ConfigAdapter to ApiAdapter to better reflect what it does
23
+ - #317 - Manual offset committing doesn't work with custom topic mappers
24
+
25
+ ## 1.2.2
26
+ - #312 - Broken for ActiveSupport 5.2.0
27
+
28
+ ## 1.2.1
29
+ - #304 - Unification of error instrumentation event details
30
+ - #306 - Using file logger from within a trap context upon shutdown is impossible
31
+
3
32
  ## 1.2.0
4
33
  - Spec improvements
5
34
  - #260 - Specs missing randomization
data/Gemfile CHANGED
@@ -5,6 +5,10 @@ source 'https://rubygems.org'
5
5
  gemspec
6
6
 
7
7
  group :development, :test do
8
+ gem 'benchmark-ips'
9
+ end
10
+
11
+ group :test do
8
12
  gem 'rspec'
9
13
  gem 'simplecov'
10
14
  gem 'timecop'
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- karafka (1.2.0)
4
+ karafka (1.2.7)
5
5
  activesupport (>= 4.0)
6
6
  dry-configurable (~> 0.7)
7
7
  dry-inflector (~> 0.1.1)
@@ -11,37 +11,40 @@ PATH
11
11
  multi_json (>= 1.12)
12
12
  rake (>= 11.3)
13
13
  require_all (>= 1.4)
14
- ruby-kafka (>= 0.5.3)
14
+ ruby-kafka (>= 0.6)
15
15
  thor (~> 0.19)
16
- waterdrop (~> 1.2)
16
+ waterdrop (~> 1.2.4)
17
17
 
18
18
  GEM
19
19
  remote: https://rubygems.org/
20
20
  specs:
21
- activesupport (5.1.5)
21
+ activesupport (5.2.1)
22
22
  concurrent-ruby (~> 1.0, >= 1.0.2)
23
- i18n (~> 0.7)
23
+ i18n (>= 0.7, < 2)
24
24
  minitest (~> 5.1)
25
25
  tzinfo (~> 1.1)
26
+ benchmark-ips (2.7.2)
26
27
  concurrent-ruby (1.0.5)
27
- delivery_boy (0.2.4)
28
- king_konf (~> 0.1.8)
29
- ruby-kafka (~> 0.5.1)
28
+ concurrent-ruby (1.0.5-java)
29
+ delivery_boy (0.2.7)
30
+ king_konf (~> 0.2)
31
+ ruby-kafka (~> 0.5)
30
32
  diff-lcs (1.3)
31
- docile (1.3.0)
33
+ digest-crc (0.4.1)
34
+ docile (1.3.1)
32
35
  dry-configurable (0.7.0)
33
36
  concurrent-ruby (~> 1.0)
34
37
  dry-container (0.6.0)
35
38
  concurrent-ruby (~> 1.0)
36
39
  dry-configurable (~> 0.1, >= 0.1.3)
37
- dry-core (0.4.5)
40
+ dry-core (0.4.7)
38
41
  concurrent-ruby (~> 1.0)
39
- dry-equalizer (0.2.0)
42
+ dry-equalizer (0.2.1)
40
43
  dry-events (0.1.0)
41
44
  concurrent-ruby (~> 1.0)
42
45
  dry-core (~> 0.4)
43
46
  dry-equalizer (~> 0.2)
44
- dry-inflector (0.1.1)
47
+ dry-inflector (0.1.2)
45
48
  dry-logic (0.4.2)
46
49
  dry-container (~> 0.2, >= 0.2.6)
47
50
  dry-core (~> 0.2)
@@ -51,48 +54,48 @@ GEM
51
54
  dry-equalizer (~> 0.2)
52
55
  dry-events (~> 0.1)
53
56
  rouge (~> 2.0, >= 2.2.1)
54
- dry-types (0.12.2)
57
+ dry-types (0.13.2)
55
58
  concurrent-ruby (~> 1.0)
56
- dry-configurable (~> 0.1)
57
59
  dry-container (~> 0.3)
58
- dry-core (~> 0.2, >= 0.2.1)
60
+ dry-core (~> 0.4, >= 0.4.4)
59
61
  dry-equalizer (~> 0.2)
62
+ dry-inflector (~> 0.1, >= 0.1.2)
60
63
  dry-logic (~> 0.4, >= 0.4.2)
61
- inflecto (~> 0.0.0, >= 0.0.2)
62
- dry-validation (0.11.1)
64
+ dry-validation (0.12.2)
63
65
  concurrent-ruby (~> 1.0)
64
66
  dry-configurable (~> 0.1, >= 0.1.3)
65
67
  dry-core (~> 0.2, >= 0.2.1)
66
68
  dry-equalizer (~> 0.2)
67
69
  dry-logic (~> 0.4, >= 0.4.0)
68
- dry-types (~> 0.12.0)
70
+ dry-types (~> 0.13.1)
69
71
  envlogic (1.1.0)
70
72
  dry-inflector (~> 0.1)
71
- i18n (0.9.5)
73
+ i18n (1.1.0)
72
74
  concurrent-ruby (~> 1.0)
73
- inflecto (0.0.2)
74
75
  json (2.1.0)
75
- king_konf (0.1.10)
76
+ json (2.1.0-java)
77
+ king_konf (0.3.6)
76
78
  minitest (5.11.3)
77
79
  multi_json (1.13.1)
78
80
  null-logger (0.1.5)
79
81
  rake (12.3.1)
80
82
  require_all (2.0.0)
81
83
  rouge (2.2.1)
82
- rspec (3.7.0)
83
- rspec-core (~> 3.7.0)
84
- rspec-expectations (~> 3.7.0)
85
- rspec-mocks (~> 3.7.0)
86
- rspec-core (3.7.1)
87
- rspec-support (~> 3.7.0)
88
- rspec-expectations (3.7.0)
84
+ rspec (3.8.0)
85
+ rspec-core (~> 3.8.0)
86
+ rspec-expectations (~> 3.8.0)
87
+ rspec-mocks (~> 3.8.0)
88
+ rspec-core (3.8.0)
89
+ rspec-support (~> 3.8.0)
90
+ rspec-expectations (3.8.1)
89
91
  diff-lcs (>= 1.2.0, < 2.0)
90
- rspec-support (~> 3.7.0)
91
- rspec-mocks (3.7.0)
92
+ rspec-support (~> 3.8.0)
93
+ rspec-mocks (3.8.0)
92
94
  diff-lcs (>= 1.2.0, < 2.0)
93
- rspec-support (~> 3.7.0)
94
- rspec-support (3.7.1)
95
- ruby-kafka (0.5.4)
95
+ rspec-support (~> 3.8.0)
96
+ rspec-support (3.8.0)
97
+ ruby-kafka (0.7.1)
98
+ digest-crc
96
99
  simplecov (0.16.1)
97
100
  docile (~> 1.1)
98
101
  json (>= 1.8, < 3)
@@ -100,24 +103,28 @@ GEM
100
103
  simplecov-html (0.10.2)
101
104
  thor (0.20.0)
102
105
  thread_safe (0.3.6)
106
+ thread_safe (0.3.6-java)
103
107
  timecop (0.9.1)
104
108
  tzinfo (1.2.5)
105
109
  thread_safe (~> 0.1)
106
- waterdrop (1.2.0)
110
+ waterdrop (1.2.4)
107
111
  delivery_boy (~> 0.2)
108
112
  dry-configurable (~> 0.7)
109
113
  dry-monitor (~> 0.1)
110
114
  dry-validation (~> 0.11)
111
- null-logger
115
+ null-logger (~> 0.1)
116
+ ruby-kafka (>= 0.6)
112
117
 
113
118
  PLATFORMS
119
+ java
114
120
  ruby
115
121
 
116
122
  DEPENDENCIES
123
+ benchmark-ips
117
124
  karafka!
118
125
  rspec
119
126
  simplecov
120
127
  timecop
121
128
 
122
129
  BUNDLED WITH
123
- 1.16.1
130
+ 1.16.5
data/README.md CHANGED
@@ -8,15 +8,13 @@ Karafka allows you to capture everything that happens in your systems in large s
8
8
 
9
9
  Karafka not only handles incoming messages but also provides tools for building complex data-flow applications that receive and send messages.
10
10
 
11
- **Warning**: Wiki and all the docs refer to the 1.2.0.beta4. Sorry for the inconvenience. We will release the stable 1.2.0 version soon.
12
-
13
11
  ## How does it work
14
12
 
15
- Karafka provides a higher-level abstraction that allows you to focus on your business logic development, instead of focusing on implementing lower level abstraction layers. It provides developers with a set of tools that are dedicated for building multi-topic applications similarly to how Rails applications are being built.
13
+ Karafka provides a higher-level abstraction that allows you to focus on your business logic development, instead of focusing on implementing lower level abstraction layers. It provides developers with a set of tools that are dedicated for building multi-topic applications similar to how Rails applications are being built.
16
14
 
17
15
  ### Some things you might wonder about:
18
16
 
19
- - You can integrate Karafka with **any** Ruby based application.
17
+ - You can integrate Karafka with **any** Ruby-based application.
20
18
  - Karafka does **not** require Sidekiq or any other third party software (apart from Kafka itself).
21
19
  - Karafka works with Ruby on Rails but it is a **standalone** framework that can work without it.
22
20
  - Karafka has a **minimal** set of dependencies, so adding it won't be a huge burden for your already existing applications.
@@ -27,15 +25,21 @@ Karafka based applications can be easily deployed to any type of infrastructure,
27
25
  * Heroku
28
26
  * Capistrano
29
27
  * Docker
28
+ * Terraform
30
29
 
31
30
  ## Support
32
31
 
33
- Karafka has a [Wiki pages](https://github.com/karafka/karafka/wiki) for almost everything and a pretty decent [FAQ](https://github.com/karafka/karafka/wiki/FAQ). It covers the whole installation, setup and deployment along with other useful details on how to run Karafka.
32
+ Karafka has a [Wiki pages](https://github.com/karafka/karafka/wiki) for almost everything and a pretty decent [FAQ](https://github.com/karafka/karafka/wiki/FAQ). It covers the whole installation, setup, and deployment along with other useful details on how to run Karafka.
34
33
 
35
34
  If you have any questions about using Karafka, feel free to join our [Gitter](https://gitter.im/karafka/karafka) chat channel.
36
35
 
37
36
  ## Getting started
38
37
 
38
+ If you're completely new to the subject, you can start with our "Kafka on Rails" articles series, that will get you up and running with the terminology and basic ideas behind using Kafka:
39
+
40
+ - [Kafka on Rails: Using Kafka with Ruby on Rails – Part 1 – Kafka basics and its advantages](https://mensfeld.pl/2017/11/kafka-on-rails-using-kafka-with-ruby-on-rails-part-1-kafka-basics-and-its-advantages/)
41
+ - [Kafka on Rails: Using Kafka with Ruby on Rails – Part 2 – Getting started with Ruby and Kafka](https://mensfeld.pl/2018/01/kafka-on-rails-using-kafka-with-ruby-on-rails-part-2-getting-started-with-ruby-and-kafka/)
42
+
39
43
  If you want to get started with Kafka and Karafka as fast as possible, then the best idea is to just clone our example repository:
40
44
 
41
45
  ```bash
@@ -69,7 +73,7 @@ Karafka framework and Karafka team are __not__ related to Kafka streaming servic
69
73
 
70
74
  First, thank you for considering contributing to Karafka! It's people like you that make the open source community such a great community!
71
75
 
72
- Each pull request must pass all the rspec specs and meet our quality requirements.
76
+ Each pull request must pass all the RSpec specs and meet our quality requirements.
73
77
 
74
78
  To check if everything is as it should be, we use [Coditsu](https://coditsu.io) that combines multiple linters and code analyzers for both code and documentation. Once you're done with your changes, submit a pull request.
75
79
 
@@ -25,9 +25,14 @@ Gem::Specification.new do |spec|
25
25
  spec.add_dependency 'multi_json', '>= 1.12'
26
26
  spec.add_dependency 'rake', '>= 11.3'
27
27
  spec.add_dependency 'require_all', '>= 1.4'
28
- spec.add_dependency 'ruby-kafka', '>= 0.5.3'
28
+ spec.add_dependency 'ruby-kafka', '>= 0.6'
29
29
  spec.add_dependency 'thor', '~> 0.19'
30
- spec.add_dependency 'waterdrop', '~> 1.2'
30
+ spec.add_dependency 'waterdrop', '~> 1.2.4'
31
+
32
+ spec.post_install_message = <<~MSG
33
+ \e[93mWarning:\e[0m If you're using Kafka 0.10, please lock ruby-kafka in your Gemfile to version '0.6.8':
34
+ gem 'ruby-kafka', '~> 0.6.8'
35
+ MSG
31
36
 
32
37
  spec.required_ruby_version = '>= 2.3.0'
33
38
 
@@ -14,15 +14,15 @@ module Karafka
14
14
  # @note All other settings will be passed to Kafka.new method invocation.
15
15
  # All elements in this hash are just edge cases
16
16
  # @return [Hash] hash with proper sections on what to proxy where in Ruby-Kafka
17
- def config_adapter
17
+ def api_adapter
18
18
  {
19
19
  consumer: %i[
20
20
  session_timeout offset_commit_interval offset_commit_threshold
21
- offset_retention_time heartbeat_interval
21
+ offset_retention_time heartbeat_interval fetcher_max_queue_size
22
22
  ],
23
- subscription: %i[start_from_beginning max_bytes_per_partition],
24
- consuming: %i[min_bytes max_bytes max_wait_time],
25
- pausing: %i[pause_timeout],
23
+ subscribe: %i[start_from_beginning max_bytes_per_partition],
24
+ consumption: %i[min_bytes max_bytes max_wait_time],
25
+ pause: %i[pause_timeout],
26
26
  # All the options that are under kafka config namespace, but are not used
27
27
  # directly with kafka api, but from the Karafka user perspective, they are
28
28
  # still related to kafka. They should not be proxied anywhere
@@ -32,7 +32,7 @@ module Karafka
32
32
 
33
33
  # @return [Array<Symbol>] properties that can be set on a per topic level
34
34
  def topic
35
- (config_adapter[:subscription] + %i[
35
+ (api_adapter[:subscribe] + %i[
36
36
  backend
37
37
  name
38
38
  parser
@@ -48,10 +48,10 @@ module Karafka
48
48
  # Thanks to this solution, if any new setting is available for ruby-kafka, we just need
49
49
  # to add it to our configuration class and it will be handled automatically.
50
50
  def consumer_group
51
- # @note We don't ignore the config_adapter[:ignored] values as they should be ignored
51
+ # @note We don't ignore the api_adapter[:ignored] values as they should be ignored
52
52
  # only when proxying details go ruby-kafka. We use ignored fields internally in karafka
53
- ignored_settings = config_adapter[:subscription]
54
- defined_settings = config_adapter.values.flatten
53
+ ignored_settings = api_adapter[:subscribe]
54
+ defined_settings = api_adapter.values.flatten
55
55
  karafka_settings = %i[batch_fetching]
56
56
  # This is a drity and bad hack of dry-configurable to get keys before setting values
57
57
  dynamically_proxied = Karafka::Setup::Config
@@ -198,7 +198,9 @@ module Karafka
198
198
  # @param options [Hash] options for waterdrop
199
199
  # @return [Class] WaterDrop producer (sync or async based on the settings)
200
200
  def producer(options)
201
- options[:async] ? WaterDrop::AsyncProducer : WaterDrop::SyncProducer
201
+ self.class.topics[
202
+ options[:topic]
203
+ ].async? ? WaterDrop::AsyncProducer : WaterDrop::SyncProducer
202
204
  end
203
205
  end
204
206
  end
@@ -3,22 +3,22 @@
3
3
  module Karafka
4
4
  # Namespace for all the things related to Kafka connection
5
5
  module Connection
6
- # Mapper used to convert our internal settings into ruby-kafka settings
6
+ # Mapper used to convert our internal settings into ruby-kafka settings based on their
7
+ # API requirements.
7
8
  # Since ruby-kafka has more and more options and there are few "levels" on which
8
9
  # we have to apply them (despite the fact, that in Karafka you configure all of it
9
10
  # in one place), we have to remap it into what ruby-kafka driver requires
10
11
  # @note The good thing about Kafka.new method is that it ignores all options that
11
12
  # do nothing. So we don't have to worry about injecting our internal settings
12
13
  # into the client and breaking stuff
13
- module ConfigAdapter
14
+ module ApiAdapter
14
15
  class << self
15
16
  # Builds all the configuration settings for Kafka.new method
16
- # @param _consumer_group [Karafka::Routing::ConsumerGroup] consumer group details
17
17
  # @return [Array<Hash>] Array with all the client arguments including hash with all
18
18
  # the settings required by Kafka.new method
19
19
  # @note We return array, so we can inject any arguments we want, in case of changes in the
20
20
  # raw driver
21
- def client(_consumer_group)
21
+ def client
22
22
  # This one is a default that takes all the settings except special
23
23
  # cases defined in the map
24
24
  settings = {
@@ -29,9 +29,9 @@ module Karafka
29
29
  kafka_configs.each do |setting_name, setting_value|
30
30
  # All options for config adapter should be ignored as we're just interested
31
31
  # in what is left, as we want to pass all the options that are "typical"
32
- # and not listed in the config_adapter special cases mapping. All the values
33
- # from the config_adapter mapping go somewhere else, not to the client directly
34
- next if AttributesMap.config_adapter.values.flatten.include?(setting_name)
32
+ # and not listed in the api_adapter special cases mapping. All the values
33
+ # from the api_adapter mapping go somewhere else, not to the client directly
34
+ next if AttributesMap.api_adapter.values.flatten.include?(setting_name)
35
35
 
36
36
  settings[setting_name] = setting_value
37
37
  end
@@ -58,26 +58,54 @@ module Karafka
58
58
  # @return [Array<Hash>] Array with all the arguments required by consuming method
59
59
  # including hash with all the settings required by
60
60
  # Kafka::Consumer#consume_each_message and Kafka::Consumer#consume_each_batch method
61
- def consuming(consumer_group)
62
- settings = {
63
- automatically_mark_as_processed: consumer_group.automatically_mark_as_consumed
64
- }
65
- [sanitize(fetch_for(:consuming, consumer_group, settings))]
61
+ def consumption(consumer_group)
62
+ [
63
+ sanitize(
64
+ fetch_for(
65
+ :consumption,
66
+ consumer_group,
67
+ automatically_mark_as_processed: consumer_group.automatically_mark_as_consumed
68
+ )
69
+ )
70
+ ]
66
71
  end
67
72
 
68
73
  # Builds all the configuration settings for kafka consumer#subscribe method
69
74
  # @param topic [Karafka::Routing::Topic] topic that holds details for a given subscription
70
75
  # @return [Hash] hash with all the settings required by kafka consumer#subscribe method
71
- def subscription(topic)
72
- settings = fetch_for(:subscription, topic)
76
+ def subscribe(topic)
77
+ settings = fetch_for(:subscribe, topic)
73
78
  [Karafka::App.config.topic_mapper.outgoing(topic.name), sanitize(settings)]
74
79
  end
75
80
 
76
81
  # Builds all the configuration settings required by kafka consumer#pause method
82
+ # @param topic [String] topic that we want to pause
83
+ # @param partition [Integer] number partition that we want to pause
77
84
  # @param consumer_group [Karafka::Routing::ConsumerGroup] consumer group details
78
- # @return [Hash] hash with all the settings required to pause kafka consumer
79
- def pausing(consumer_group)
80
- { timeout: consumer_group.pause_timeout }
85
+ # @return [Array] array with all the details required to pause kafka consumer
86
+ def pause(topic, partition, consumer_group)
87
+ [
88
+ Karafka::App.config.topic_mapper.outgoing(topic),
89
+ partition,
90
+ { timeout: consumer_group.pause_timeout }
91
+ ]
92
+ end
93
+
94
+ # Remaps topic details taking the topic mapper feature into consideration.
95
+ # @param params [Karafka::Params::Params] params instance
96
+ # @return [Array] array with all the details needed by ruby-kafka to mark message
97
+ # as processed
98
+ # @note When default empty topic mapper is used, no need for any conversion as the
99
+ # internal and external format are exactly the same
100
+ def mark_message_as_processed(params)
101
+ # Majority of non heroku users don't use custom topic mappers. No need to change
102
+ # anything when it is a default mapper that does not change anything
103
+ return [params] if Karafka::App.config.topic_mapper == Karafka::Routing::TopicMapper
104
+
105
+ # @note We don't use tap as it is around 13% slower than non-dup version
106
+ dupped = params.dup
107
+ dupped['topic'] = Karafka::App.config.topic_mapper.outgoing(params.topic)
108
+ [dupped]
81
109
  end
82
110
 
83
111
  private
@@ -90,10 +118,10 @@ module Karafka
90
118
  def fetch_for(namespace_key, route_layer, preexisting_settings = {})
91
119
  kafka_configs.each_key do |setting_name|
92
120
  # Ignore settings that are not related to our namespace
93
- next unless AttributesMap.config_adapter[namespace_key].include?(setting_name)
121
+ next unless AttributesMap.api_adapter[namespace_key].include?(setting_name)
94
122
  # Ignore settings that are already initialized
95
123
  # In case they are in preexisting settings fetched differently
96
- next if preexisting_settings.keys.include?(setting_name)
124
+ next if preexisting_settings.key?(setting_name)
97
125
  # Fetch all the settings from a given layer object. Objects can handle the fallback
98
126
  # to the kafka settings, so
99
127
  preexisting_settings[setting_name] = route_layer.send(setting_name)
@@ -0,0 +1,16 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Connection
5
+ # Builder used to construct Kafka client
6
+ module Builder
7
+ class << self
8
+ # Builds a Kafka::Cient instance that we use to work with Kafka cluster
9
+ # @return [::Kafka::Client] returns a Kafka client
10
+ def call
11
+ Kafka.new(*ApiAdapter.client)
12
+ end
13
+ end
14
+ end
15
+ end
16
+ end
@@ -23,7 +23,7 @@ module Karafka
23
23
  # @yieldparam [Array<Kafka::FetchedMessage>] kafka fetched messages
24
24
  # @note This will yield with raw messages - no preprocessing or reformatting.
25
25
  def fetch_loop
26
- settings = ConfigAdapter.consuming(consumer_group)
26
+ settings = ApiAdapter.consumption(consumer_group)
27
27
 
28
28
  if consumer_group.batch_fetching
29
29
  kafka_consumer.each_batch(*settings) { |batch| yield(batch.messages) }
@@ -66,10 +66,7 @@ module Karafka
66
66
  # @param topic [String] topic that we want to pause
67
67
  # @param partition [Integer] number partition that we want to pause
68
68
  def pause(topic, partition)
69
- settings = ConfigAdapter.pausing(consumer_group)
70
- timeout = settings[:timeout]
71
- raise(Errors::InvalidPauseTimeout, timeout) unless timeout.positive?
72
- kafka_consumer.pause(topic, partition, settings)
69
+ kafka_consumer.pause(*ApiAdapter.pause(topic, partition, consumer_group))
73
70
  end
74
71
 
75
72
  # Marks a given message as consumed and commit the offsets
@@ -77,12 +74,20 @@ module Karafka
77
74
  # that offset commit happen asap in case of a crash
78
75
  # @param [Karafka::Params::Params] params message that we want to mark as processed
79
76
  def mark_as_consumed(params)
80
- kafka_consumer.mark_message_as_processed(params)
77
+ kafka_consumer.mark_message_as_processed(
78
+ *ApiAdapter.mark_message_as_processed(params)
79
+ )
81
80
  # Trigger an immediate, blocking offset commit in order to minimize the risk of crashing
82
81
  # before the automatic triggers have kicked in.
83
82
  kafka_consumer.commit_offsets
84
83
  end
85
84
 
85
+ # Triggers a non-optional blocking heartbeat that notifies Kafka about the fact, that this
86
+ # consumer / client is still up and running
87
+ def trigger_heartbeat
88
+ kafka_consumer.trigger_heartbeat!
89
+ end
90
+
86
91
  private
87
92
 
88
93
  attr_reader :consumer_group
@@ -90,11 +95,13 @@ module Karafka
90
95
  # @return [Kafka::Consumer] returns a ready to consume Kafka consumer
91
96
  # that is set up to consume from topics of a given consumer group
92
97
  def kafka_consumer
93
- @kafka_consumer ||= kafka.consumer(
94
- *ConfigAdapter.consumer(consumer_group)
98
+ # @note We don't cache the connection internally because we cache kafka_consumer that uses
99
+ # kafka client object instance
100
+ @kafka_consumer ||= Builder.call.consumer(
101
+ *ApiAdapter.consumer(consumer_group)
95
102
  ).tap do |consumer|
96
103
  consumer_group.topics.each do |topic|
97
- consumer.subscribe(*ConfigAdapter.subscription(topic))
104
+ consumer.subscribe(*ApiAdapter.subscribe(topic))
98
105
  end
99
106
  end
100
107
  rescue Kafka::ConnectionError
@@ -105,13 +112,6 @@ module Karafka
105
112
  # down the road
106
113
  raise
107
114
  end
108
-
109
- # @return [Kafka] returns a Kafka
110
- # @note We don't cache it internally because we cache kafka_consumer that uses kafka
111
- # object instance
112
- def kafka
113
- Kafka.new(*ConfigAdapter.client(consumer_group))
114
- end
115
115
  end
116
116
  end
117
117
  end
@@ -23,7 +23,7 @@ module Karafka
23
23
  # @param method_name [Symbol, String] method name or nil if we plan to provide a block
24
24
  # @yield A block with a code that should be executed before scheduling
25
25
  define_method type do |method_name = nil, &block|
26
- set_callback type, :before, method_name ? method_name : block
26
+ set_callback type, :before, method_name || block
27
27
  end
28
28
  end
29
29
  end
@@ -41,10 +41,10 @@ module Karafka
41
41
  # This should never happen and if it does, please contact us
42
42
  MissingClient = Class.new(BaseError)
43
43
 
44
- # Raised when we attemp to pause a partition but the pause timeout is equal to 0
45
- InvalidPauseTimeout = Class.new(BaseError)
46
-
47
44
  # Raised when want to hook up to an event that is not registered and supported
48
45
  UnregisteredMonitorEvent = Class.new(BaseError)
46
+
47
+ # Raised when we've waited enough for shutting down an unresponding process
48
+ ForcefulShutdown = Class.new(BaseError)
49
49
  end
50
50
  end
@@ -34,10 +34,8 @@ module Karafka
34
34
 
35
35
  # Method catches all HANDLED_SIGNALS and performs appropriate callbacks (if defined)
36
36
  # @note If there are no callbacks, this method will just ignore a given signal that was sent
37
- # @yield [Block] block of code that we want to execute and supervise
38
37
  def supervise
39
38
  HANDLED_SIGNALS.each { |signal| trap_signal(signal) }
40
- yield
41
39
  end
42
40
 
43
41
  private
@@ -7,7 +7,7 @@ module Karafka
7
7
  # Valid uri schemas of Kafka broker url
8
8
  # The ||= is due to the behavior of require_all that resolves dependencies
9
9
  # but someetimes loads things twice
10
- URI_SCHEMES ||= %w[kafka kafka+ssl].freeze
10
+ URI_SCHEMES ||= %w[kafka kafka+ssl plaintext ssl].freeze
11
11
 
12
12
  # Available sasl scram mechanism of authentication (plus nil)
13
13
  SASL_SCRAM_MECHANISMS ||= %w[sha256 sha512].freeze
@@ -31,11 +31,12 @@ module Karafka
31
31
  required(:id).filled(:str?, format?: Karafka::Schemas::TOPIC_REGEXP)
32
32
  required(:seed_brokers).filled { each(:broker_schema?) }
33
33
  required(:session_timeout).filled { int? | float? }
34
- required(:pause_timeout).filled { (int? | float?) & gteq?(0) }
34
+ required(:pause_timeout) { none? | ((int? | float?) & gteq?(0)) }
35
35
  required(:offset_commit_interval) { int? | float? }
36
36
  required(:offset_commit_threshold).filled(:int?)
37
37
  required(:offset_retention_time) { none?.not > int? }
38
38
  required(:heartbeat_interval).filled { (int? | float?) & gteq?(0) }
39
+ required(:fetcher_max_queue_size).filled(:int?, gt?: 0)
39
40
  required(:connect_timeout).filled { (int? | float?) & gt?(0) }
40
41
  required(:socket_timeout).filled { (int? | float?) & gt?(0) }
41
42
  required(:min_bytes).filled(:int?, gt?: 0)
@@ -19,9 +19,9 @@ module Karafka
19
19
 
20
20
  # Method which runs app
21
21
  def run
22
- bind_on_sigint
23
- bind_on_sigquit
24
- bind_on_sigterm
22
+ process.on_sigint { stop_supervised }
23
+ process.on_sigquit { stop_supervised }
24
+ process.on_sigterm { stop_supervised }
25
25
  start_supervised
26
26
  end
27
27
 
@@ -39,35 +39,24 @@ module Karafka
39
39
  Karafka::Process.instance
40
40
  end
41
41
 
42
- # What should happen when we decide to quit with sigint
43
- def bind_on_sigint
44
- process.on_sigint { stop_supervised }
45
- end
46
-
47
- # What should happen when we decide to quit with sigquit
48
- def bind_on_sigquit
49
- process.on_sigquit { stop_supervised }
50
- end
51
-
52
- # What should happen when we decide to quit with sigterm
53
- def bind_on_sigterm
54
- process.on_sigterm { stop_supervised }
55
- end
56
-
57
42
  # Starts Karafka with a supervision
58
43
  # @note We don't need to sleep because Karafka::Fetcher is locking and waiting to
59
44
  # finish loop (and it won't happen until we explicitily want to stop)
60
45
  def start_supervised
61
- process.supervise do
62
- Karafka::App.run!
63
- Karafka::Fetcher.call
64
- end
46
+ process.supervise
47
+ Karafka::App.run!
48
+ Karafka::Fetcher.call
65
49
  end
66
50
 
67
51
  # Stops Karafka with a supervision (as long as there is a shutdown timeout)
68
52
  # If consumers won't stop in a given timeframe, it will force them to exit
69
53
  def stop_supervised
70
- Karafka.monitor.instrument('server.stop', {})
54
+ # Because this is called in the trap context, there is a chance that instrumentation
55
+ # listeners contain things that aren't allowed from within a trap context.
56
+ # To bypass that (instead of telling users not to do things they need to)
57
+ # we spin up a thread to instrument server.stop and server.stop.error and wait until
58
+ # they're finished
59
+ Thread.new { Karafka.monitor.instrument('server.stop', {}) }.join
71
60
 
72
61
  Karafka::App.stop!
73
62
  # If there is no shutdown timeout, we don't exit and wait until all the consumers
@@ -82,7 +71,9 @@ module Karafka
82
71
  sleep SUPERVISION_SLEEP
83
72
  end
84
73
 
85
- Karafka.monitor.instrument('server.stop.error', {})
74
+ raise Errors::ForcefulShutdown
75
+ rescue Errors::ForcefulShutdown => error
76
+ Thread.new { Karafka.monitor.instrument('server.stop.error', error: error) }.join
86
77
  # We're done waiting, lets kill them!
87
78
  consumer_threads.each(&:terminate)
88
79
 
@@ -73,7 +73,7 @@ module Karafka
73
73
  # Time that a given partition will be paused from fetching messages, when message
74
74
  # consumption fails. It allows us to process other partitions, while the error is being
75
75
  # resolved and also "slows" things down, so it prevents from "eating" up all messages and
76
- # consuming them with failed code
76
+ # consuming them with failed code. Use `nil` if you want to pause forever and never retry.
77
77
  setting :pause_timeout, 10
78
78
  # option offset_commit_interval [Integer] the interval between offset commits,
79
79
  # in seconds.
@@ -85,6 +85,13 @@ module Karafka
85
85
  # option heartbeat_interval [Integer] the interval between heartbeats; must be less
86
86
  # than the session window.
87
87
  setting :heartbeat_interval, 10
88
+ # option offset_retention_time [Integer] The length of the retention window, known as
89
+ # offset retention time
90
+ setting :offset_retention_time, nil
91
+ # option fetcher_max_queue_size [Integer] max number of items in the fetch queue that
92
+ # are stored for further processing. Note, that each item in the queue represents a
93
+ # response from a single broker
94
+ setting :fetcher_max_queue_size, 100
88
95
  # option max_bytes_per_partition [Integer] the maximum amount of data fetched
89
96
  # from a single partition at a time.
90
97
  setting :max_bytes_per_partition, 1_048_576
@@ -111,9 +118,6 @@ module Karafka
111
118
  # option reconnect_timeout [Integer] How long should we wait before trying to reconnect to
112
119
  # Kafka cluster that went down (in seconds)
113
120
  setting :reconnect_timeout, 5
114
- # option offset_retention_time [Integer] The length of the retention window, known as
115
- # offset retention time
116
- setting :offset_retention_time, nil
117
121
  # option connect_timeout [Integer] Sets the number of seconds to wait while connecting to
118
122
  # a broker for the first time. When ruby-kafka initializes, it needs to connect to at
119
123
  # least one host.
@@ -13,7 +13,7 @@ module Karafka
13
13
  ::WaterDrop.setup do |water_config|
14
14
  water_config.deliver = true
15
15
 
16
- config.to_h.except(:kafka).each do |k, v|
16
+ config.to_h.reject { |k, _v| k == :kafka }.each do |k, v|
17
17
  key_assignment = :"#{k}="
18
18
  next unless water_config.respond_to?(key_assignment)
19
19
  water_config.public_send(key_assignment, v)
@@ -3,5 +3,5 @@
3
3
  # Main module namespace
4
4
  module Karafka
5
5
  # Current Karafka version
6
- VERSION = '1.2.0'
6
+ VERSION = '1.2.7'
7
7
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: karafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.2.0
4
+ version: 1.2.7
5
5
  platform: ruby
6
6
  authors:
7
7
  - Maciej Mensfeld
@@ -10,7 +10,7 @@ authors:
10
10
  autorequire:
11
11
  bindir: bin
12
12
  cert_chain: []
13
- date: 2018-03-23 00:00:00.000000000 Z
13
+ date: 2018-09-20 00:00:00.000000000 Z
14
14
  dependencies:
15
15
  - !ruby/object:Gem::Dependency
16
16
  name: activesupport
@@ -144,14 +144,14 @@ dependencies:
144
144
  requirements:
145
145
  - - ">="
146
146
  - !ruby/object:Gem::Version
147
- version: 0.5.3
147
+ version: '0.6'
148
148
  type: :runtime
149
149
  prerelease: false
150
150
  version_requirements: !ruby/object:Gem::Requirement
151
151
  requirements:
152
152
  - - ">="
153
153
  - !ruby/object:Gem::Version
154
- version: 0.5.3
154
+ version: '0.6'
155
155
  - !ruby/object:Gem::Dependency
156
156
  name: thor
157
157
  requirement: !ruby/object:Gem::Requirement
@@ -172,14 +172,14 @@ dependencies:
172
172
  requirements:
173
173
  - - "~>"
174
174
  - !ruby/object:Gem::Version
175
- version: '1.2'
175
+ version: 1.2.4
176
176
  type: :runtime
177
177
  prerelease: false
178
178
  version_requirements: !ruby/object:Gem::Requirement
179
179
  requirements:
180
180
  - - "~>"
181
181
  - !ruby/object:Gem::Version
182
- version: '1.2'
182
+ version: 1.2.4
183
183
  description: Framework used to simplify Apache Kafka based Ruby applications development
184
184
  email:
185
185
  - maciej@coditsu.io
@@ -222,8 +222,9 @@ files:
222
222
  - lib/karafka/cli/info.rb
223
223
  - lib/karafka/cli/install.rb
224
224
  - lib/karafka/cli/server.rb
225
+ - lib/karafka/connection/api_adapter.rb
226
+ - lib/karafka/connection/builder.rb
225
227
  - lib/karafka/connection/client.rb
226
- - lib/karafka/connection/config_adapter.rb
227
228
  - lib/karafka/connection/delegator.rb
228
229
  - lib/karafka/connection/listener.rb
229
230
  - lib/karafka/consumers/callbacks.rb
@@ -278,7 +279,8 @@ homepage: https://github.com/karafka/karafka
278
279
  licenses:
279
280
  - MIT
280
281
  metadata: {}
281
- post_install_message:
282
+ post_install_message: "\e[93mWarning:\e[0m If you're using Kafka 0.10, please lock
283
+ ruby-kafka in your Gemfile to version '0.6.8':\ngem 'ruby-kafka', '~> 0.6.8'\n"
282
284
  rdoc_options: []
283
285
  require_paths:
284
286
  - lib
@@ -294,7 +296,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
294
296
  version: '0'
295
297
  requirements: []
296
298
  rubyforge_project:
297
- rubygems_version: 2.7.6
299
+ rubygems_version: 2.7.4
298
300
  signing_key:
299
301
  specification_version: 4
300
302
  summary: Ruby based framework for working with Apache Kafka