karafka 1.2.2 → 1.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 9ba856816cda2f5e766137d0e61b9a2a4c554a462cbcf4b51816e8eb5e5b20a5
4
- data.tar.gz: a12c0dc26c94916f4da4ceb7f195e429ab70011379b1475cc05e3fbb11a7de7d
3
+ metadata.gz: dbcc16b7a1dadd11283c52b9e9edd838a0f793e23aebf990aba78baf24c6efa1
4
+ data.tar.gz: a4da64f172baf0700faae318648b3007ef8406ce861122aa13104daae06213e6
5
5
  SHA512:
6
- metadata.gz: 8d861156abd5026ef14ca5e029dda5f3c58829b6929852076c9da528f42c9e6c4ad81d3309cf7f2d4f08e421e7fe6f5f3d0a3b5ea3c5903e2ce9a773b5ebda47
7
- data.tar.gz: 0257b70be38e2cfc9841dead5fb475a38523eb5064f6930ff062c84bdae746354ab37cb29028c9275089fd935f62c40fd5a8d1573a0c25ab63625aac922576d1
6
+ metadata.gz: b23b92b6c084d934cf64425f0d6a88ec63300972dd1f6c047f8e99be5e7f6233d2ca89af37d562fd9acee2025f60f8fdf4159bff1222ccb624c5b61526696922
7
+ data.tar.gz: d0ff78e7e938070b95d37b38cde37729a28cf26cc1bb47706fe848a8fe18a5a58f078b1f384e3ecefdcc07861cc4a18e66a68e8c674116f5e773acf2ea8988bb
@@ -1,5 +1,12 @@
1
1
  # Karafka framework changelog
2
2
 
3
+ ## 1.2.3
4
+ - #313 - support PLAINTEXT and SSL for scheme
5
+ - #320 - Pausing indefinetely with nil pause timeout doesn't work
6
+ - #318 - Partition pausing doesn't work with custom topic mappers
7
+ - Rename ConfigAdapter to ApiAdapter to better reflect what it does
8
+ - #317 - Manual offset committing doesn't work with custom topic mappers
9
+
3
10
  ## 1.2.2
4
11
  - #312 - Broken for ActiveSupport 5.2.0
5
12
 
data/Gemfile CHANGED
@@ -5,6 +5,7 @@ source 'https://rubygems.org'
5
5
  gemspec
6
6
 
7
7
  group :development, :test do
8
+ gem 'benchmark-ips'
8
9
  gem 'rspec'
9
10
  gem 'simplecov'
10
11
  gem 'timecop'
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- karafka (1.2.2)
4
+ karafka (1.2.3)
5
5
  activesupport (>= 4.0)
6
6
  dry-configurable (~> 0.7)
7
7
  dry-inflector (~> 0.1.1)
@@ -23,10 +23,11 @@ GEM
23
23
  i18n (>= 0.7, < 2)
24
24
  minitest (~> 5.1)
25
25
  tzinfo (~> 1.1)
26
+ benchmark-ips (2.7.2)
26
27
  concurrent-ruby (1.0.5)
27
- delivery_boy (0.2.4)
28
- king_konf (~> 0.1.8)
29
- ruby-kafka (~> 0.5.1)
28
+ delivery_boy (0.2.6)
29
+ king_konf (~> 0.2)
30
+ ruby-kafka (~> 0.5)
30
31
  diff-lcs (1.3)
31
32
  docile (1.3.0)
32
33
  dry-configurable (0.7.0)
@@ -36,12 +37,12 @@ GEM
36
37
  dry-configurable (~> 0.1, >= 0.1.3)
37
38
  dry-core (0.4.5)
38
39
  concurrent-ruby (~> 1.0)
39
- dry-equalizer (0.2.0)
40
+ dry-equalizer (0.2.1)
40
41
  dry-events (0.1.0)
41
42
  concurrent-ruby (~> 1.0)
42
43
  dry-core (~> 0.4)
43
44
  dry-equalizer (~> 0.2)
44
- dry-inflector (0.1.1)
45
+ dry-inflector (0.1.2)
45
46
  dry-logic (0.4.2)
46
47
  dry-container (~> 0.2, >= 0.2.6)
47
48
  dry-core (~> 0.2)
@@ -68,11 +69,11 @@ GEM
68
69
  dry-types (~> 0.12.0)
69
70
  envlogic (1.1.0)
70
71
  dry-inflector (~> 0.1)
71
- i18n (1.0.0)
72
+ i18n (1.0.1)
72
73
  concurrent-ruby (~> 1.0)
73
74
  inflecto (0.0.2)
74
75
  json (2.1.0)
75
- king_konf (0.1.10)
76
+ king_konf (0.2.0)
76
77
  minitest (5.11.3)
77
78
  multi_json (1.13.1)
78
79
  null-logger (0.1.5)
@@ -92,7 +93,7 @@ GEM
92
93
  diff-lcs (>= 1.2.0, < 2.0)
93
94
  rspec-support (~> 3.7.0)
94
95
  rspec-support (3.7.1)
95
- ruby-kafka (0.5.4)
96
+ ruby-kafka (0.6.2)
96
97
  simplecov (0.16.1)
97
98
  docile (~> 1.1)
98
99
  json (>= 1.8, < 3)
@@ -114,6 +115,7 @@ PLATFORMS
114
115
  ruby
115
116
 
116
117
  DEPENDENCIES
118
+ benchmark-ips
117
119
  karafka!
118
120
  rspec
119
121
  simplecov
@@ -14,15 +14,15 @@ module Karafka
14
14
  # @note All other settings will be passed to Kafka.new method invocation.
15
15
  # All elements in this hash are just edge cases
16
16
  # @return [Hash] hash with proper sections on what to proxy where in Ruby-Kafka
17
- def config_adapter
17
+ def api_adapter
18
18
  {
19
19
  consumer: %i[
20
20
  session_timeout offset_commit_interval offset_commit_threshold
21
21
  offset_retention_time heartbeat_interval
22
22
  ],
23
- subscription: %i[start_from_beginning max_bytes_per_partition],
24
- consuming: %i[min_bytes max_bytes max_wait_time],
25
- pausing: %i[pause_timeout],
23
+ subscribe: %i[start_from_beginning max_bytes_per_partition],
24
+ consumption: %i[min_bytes max_bytes max_wait_time],
25
+ pause: %i[pause_timeout],
26
26
  # All the options that are under kafka config namespace, but are not used
27
27
  # directly with kafka api, but from the Karafka user perspective, they are
28
28
  # still related to kafka. They should not be proxied anywhere
@@ -32,7 +32,7 @@ module Karafka
32
32
 
33
33
  # @return [Array<Symbol>] properties that can be set on a per topic level
34
34
  def topic
35
- (config_adapter[:subscription] + %i[
35
+ (api_adapter[:subscribe] + %i[
36
36
  backend
37
37
  name
38
38
  parser
@@ -48,10 +48,10 @@ module Karafka
48
48
  # Thanks to this solution, if any new setting is available for ruby-kafka, we just need
49
49
  # to add it to our configuration class and it will be handled automatically.
50
50
  def consumer_group
51
- # @note We don't ignore the config_adapter[:ignored] values as they should be ignored
51
+ # @note We don't ignore the api_adapter[:ignored] values as they should be ignored
52
52
  # only when proxying details go ruby-kafka. We use ignored fields internally in karafka
53
- ignored_settings = config_adapter[:subscription]
54
- defined_settings = config_adapter.values.flatten
53
+ ignored_settings = api_adapter[:subscribe]
54
+ defined_settings = api_adapter.values.flatten
55
55
  karafka_settings = %i[batch_fetching]
56
56
  # This is a drity and bad hack of dry-configurable to get keys before setting values
57
57
  dynamically_proxied = Karafka::Setup::Config
@@ -3,22 +3,22 @@
3
3
  module Karafka
4
4
  # Namespace for all the things related to Kafka connection
5
5
  module Connection
6
- # Mapper used to convert our internal settings into ruby-kafka settings
6
+ # Mapper used to convert our internal settings into ruby-kafka settings based on their
7
+ # API requirements.
7
8
  # Since ruby-kafka has more and more options and there are few "levels" on which
8
9
  # we have to apply them (despite the fact, that in Karafka you configure all of it
9
10
  # in one place), we have to remap it into what ruby-kafka driver requires
10
11
  # @note The good thing about Kafka.new method is that it ignores all options that
11
12
  # do nothing. So we don't have to worry about injecting our internal settings
12
13
  # into the client and breaking stuff
13
- module ConfigAdapter
14
+ module ApiAdapter
14
15
  class << self
15
16
  # Builds all the configuration settings for Kafka.new method
16
- # @param _consumer_group [Karafka::Routing::ConsumerGroup] consumer group details
17
17
  # @return [Array<Hash>] Array with all the client arguments including hash with all
18
18
  # the settings required by Kafka.new method
19
19
  # @note We return array, so we can inject any arguments we want, in case of changes in the
20
20
  # raw driver
21
- def client(_consumer_group)
21
+ def client
22
22
  # This one is a default that takes all the settings except special
23
23
  # cases defined in the map
24
24
  settings = {
@@ -29,9 +29,9 @@ module Karafka
29
29
  kafka_configs.each do |setting_name, setting_value|
30
30
  # All options for config adapter should be ignored as we're just interested
31
31
  # in what is left, as we want to pass all the options that are "typical"
32
- # and not listed in the config_adapter special cases mapping. All the values
33
- # from the config_adapter mapping go somewhere else, not to the client directly
34
- next if AttributesMap.config_adapter.values.flatten.include?(setting_name)
32
+ # and not listed in the api_adapter special cases mapping. All the values
33
+ # from the api_adapter mapping go somewhere else, not to the client directly
34
+ next if AttributesMap.api_adapter.values.flatten.include?(setting_name)
35
35
 
36
36
  settings[setting_name] = setting_value
37
37
  end
@@ -58,26 +58,54 @@ module Karafka
58
58
  # @return [Array<Hash>] Array with all the arguments required by consuming method
59
59
  # including hash with all the settings required by
60
60
  # Kafka::Consumer#consume_each_message and Kafka::Consumer#consume_each_batch method
61
- def consuming(consumer_group)
62
- settings = {
63
- automatically_mark_as_processed: consumer_group.automatically_mark_as_consumed
64
- }
65
- [sanitize(fetch_for(:consuming, consumer_group, settings))]
61
+ def consumption(consumer_group)
62
+ [
63
+ sanitize(
64
+ fetch_for(
65
+ :consumption,
66
+ consumer_group,
67
+ automatically_mark_as_processed: consumer_group.automatically_mark_as_consumed
68
+ )
69
+ )
70
+ ]
66
71
  end
67
72
 
68
73
  # Builds all the configuration settings for kafka consumer#subscribe method
69
74
  # @param topic [Karafka::Routing::Topic] topic that holds details for a given subscription
70
75
  # @return [Hash] hash with all the settings required by kafka consumer#subscribe method
71
- def subscription(topic)
72
- settings = fetch_for(:subscription, topic)
76
+ def subscribe(topic)
77
+ settings = fetch_for(:subscribe, topic)
73
78
  [Karafka::App.config.topic_mapper.outgoing(topic.name), sanitize(settings)]
74
79
  end
75
80
 
76
81
  # Builds all the configuration settings required by kafka consumer#pause method
82
+ # @param topic [String] topic that we want to pause
83
+ # @param partition [Integer] number partition that we want to pause
77
84
  # @param consumer_group [Karafka::Routing::ConsumerGroup] consumer group details
78
- # @return [Hash] hash with all the settings required to pause kafka consumer
79
- def pausing(consumer_group)
80
- { timeout: consumer_group.pause_timeout }
85
+ # @return [Array] array with all the details required to pause kafka consumer
86
+ def pause(topic, partition, consumer_group)
87
+ [
88
+ Karafka::App.config.topic_mapper.outgoing(topic),
89
+ partition,
90
+ { timeout: consumer_group.pause_timeout }
91
+ ]
92
+ end
93
+
94
+ # Remaps topic details taking the topic mapper feature into consideration.
95
+ # @param params [Karafka::Params::Params] params instance
96
+ # @return [Array] array with all the details needed by ruby-kafka to mark message
97
+ # as processed
98
+ # @note When default empty topic mapper is used, no need for any conversion as the
99
+ # internal and external format are exactly the same
100
+ def mark_message_as_processed(params)
101
+ # Majority of non heroku users don't use custom topic mappers. No need to change
102
+ # anything when it is a default mapper that does not change anything
103
+ return [params] if Karafka::App.config.topic_mapper == Karafka::Routing::TopicMapper
104
+
105
+ # @note We don't use tap as it is around 13% slower than non-dup version
106
+ dupped = params.dup
107
+ dupped['topic'] = Karafka::App.config.topic_mapper.outgoing(params.topic)
108
+ [dupped]
81
109
  end
82
110
 
83
111
  private
@@ -90,7 +118,7 @@ module Karafka
90
118
  def fetch_for(namespace_key, route_layer, preexisting_settings = {})
91
119
  kafka_configs.each_key do |setting_name|
92
120
  # Ignore settings that are not related to our namespace
93
- next unless AttributesMap.config_adapter[namespace_key].include?(setting_name)
121
+ next unless AttributesMap.api_adapter[namespace_key].include?(setting_name)
94
122
  # Ignore settings that are already initialized
95
123
  # In case they are in preexisting settings fetched differently
96
124
  next if preexisting_settings.keys.include?(setting_name)
@@ -0,0 +1,16 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Connection
5
+ # Builder used to construct Kafka client
6
+ module Builder
7
+ class << self
8
+ # Builds a Kafka::Cient instance that we use to work with Kafka cluster
9
+ # @return [::Kafka::Client] returns a Kafka client
10
+ def call
11
+ Kafka.new(*ApiAdapter.client)
12
+ end
13
+ end
14
+ end
15
+ end
16
+ end
@@ -23,7 +23,7 @@ module Karafka
23
23
  # @yieldparam [Array<Kafka::FetchedMessage>] kafka fetched messages
24
24
  # @note This will yield with raw messages - no preprocessing or reformatting.
25
25
  def fetch_loop
26
- settings = ConfigAdapter.consuming(consumer_group)
26
+ settings = ApiAdapter.consumption(consumer_group)
27
27
 
28
28
  if consumer_group.batch_fetching
29
29
  kafka_consumer.each_batch(*settings) { |batch| yield(batch.messages) }
@@ -66,10 +66,7 @@ module Karafka
66
66
  # @param topic [String] topic that we want to pause
67
67
  # @param partition [Integer] number partition that we want to pause
68
68
  def pause(topic, partition)
69
- settings = ConfigAdapter.pausing(consumer_group)
70
- timeout = settings[:timeout]
71
- raise(Errors::InvalidPauseTimeout, timeout) unless timeout.positive?
72
- kafka_consumer.pause(topic, partition, settings)
69
+ kafka_consumer.pause(*ApiAdapter.pause(topic, partition, consumer_group))
73
70
  end
74
71
 
75
72
  # Marks a given message as consumed and commit the offsets
@@ -77,7 +74,9 @@ module Karafka
77
74
  # that offset commit happen asap in case of a crash
78
75
  # @param [Karafka::Params::Params] params message that we want to mark as processed
79
76
  def mark_as_consumed(params)
80
- kafka_consumer.mark_message_as_processed(params)
77
+ kafka_consumer.mark_message_as_processed(
78
+ *ApiAdapter.mark_message_as_processed(params)
79
+ )
81
80
  # Trigger an immediate, blocking offset commit in order to minimize the risk of crashing
82
81
  # before the automatic triggers have kicked in.
83
82
  kafka_consumer.commit_offsets
@@ -90,11 +89,13 @@ module Karafka
90
89
  # @return [Kafka::Consumer] returns a ready to consume Kafka consumer
91
90
  # that is set up to consume from topics of a given consumer group
92
91
  def kafka_consumer
93
- @kafka_consumer ||= kafka.consumer(
94
- *ConfigAdapter.consumer(consumer_group)
92
+ # @note We don't cache the connection internally because we cache kafka_consumer that uses
93
+ # kafka client object instance
94
+ @kafka_consumer ||= Builder.call.consumer(
95
+ *ApiAdapter.consumer(consumer_group)
95
96
  ).tap do |consumer|
96
97
  consumer_group.topics.each do |topic|
97
- consumer.subscribe(*ConfigAdapter.subscription(topic))
98
+ consumer.subscribe(*ApiAdapter.subscribe(topic))
98
99
  end
99
100
  end
100
101
  rescue Kafka::ConnectionError
@@ -105,13 +106,6 @@ module Karafka
105
106
  # down the road
106
107
  raise
107
108
  end
108
-
109
- # @return [Kafka] returns a Kafka
110
- # @note We don't cache it internally because we cache kafka_consumer that uses kafka
111
- # object instance
112
- def kafka
113
- Kafka.new(*ConfigAdapter.client(consumer_group))
114
- end
115
109
  end
116
110
  end
117
111
  end
@@ -41,9 +41,6 @@ module Karafka
41
41
  # This should never happen and if it does, please contact us
42
42
  MissingClient = Class.new(BaseError)
43
43
 
44
- # Raised when we attemp to pause a partition but the pause timeout is equal to 0
45
- InvalidPauseTimeout = Class.new(BaseError)
46
-
47
44
  # Raised when want to hook up to an event that is not registered and supported
48
45
  UnregisteredMonitorEvent = Class.new(BaseError)
49
46
 
@@ -34,10 +34,8 @@ module Karafka
34
34
 
35
35
  # Method catches all HANDLED_SIGNALS and performs appropriate callbacks (if defined)
36
36
  # @note If there are no callbacks, this method will just ignore a given signal that was sent
37
- # @yield [Block] block of code that we want to execute and supervise
38
37
  def supervise
39
38
  HANDLED_SIGNALS.each { |signal| trap_signal(signal) }
40
- yield
41
39
  end
42
40
 
43
41
  private
@@ -7,7 +7,7 @@ module Karafka
7
7
  # Valid uri schemas of Kafka broker url
8
8
  # The ||= is due to the behavior of require_all that resolves dependencies
9
9
  # but someetimes loads things twice
10
- URI_SCHEMES ||= %w[kafka kafka+ssl].freeze
10
+ URI_SCHEMES ||= %w[kafka kafka+ssl plaintext ssl].freeze
11
11
 
12
12
  # Available sasl scram mechanism of authentication (plus nil)
13
13
  SASL_SCRAM_MECHANISMS ||= %w[sha256 sha512].freeze
@@ -31,7 +31,7 @@ module Karafka
31
31
  required(:id).filled(:str?, format?: Karafka::Schemas::TOPIC_REGEXP)
32
32
  required(:seed_brokers).filled { each(:broker_schema?) }
33
33
  required(:session_timeout).filled { int? | float? }
34
- required(:pause_timeout).filled { (int? | float?) & gteq?(0) }
34
+ required(:pause_timeout) { none? | ((int? | float?) & gteq?(0)) }
35
35
  required(:offset_commit_interval) { int? | float? }
36
36
  required(:offset_commit_threshold).filled(:int?)
37
37
  required(:offset_retention_time) { none?.not > int? }
@@ -19,9 +19,9 @@ module Karafka
19
19
 
20
20
  # Method which runs app
21
21
  def run
22
- bind_on_sigint
23
- bind_on_sigquit
24
- bind_on_sigterm
22
+ process.on_sigint { stop_supervised }
23
+ process.on_sigquit { stop_supervised }
24
+ process.on_sigterm { stop_supervised }
25
25
  start_supervised
26
26
  end
27
27
 
@@ -39,29 +39,13 @@ module Karafka
39
39
  Karafka::Process.instance
40
40
  end
41
41
 
42
- # What should happen when we decide to quit with sigint
43
- def bind_on_sigint
44
- process.on_sigint { stop_supervised }
45
- end
46
-
47
- # What should happen when we decide to quit with sigquit
48
- def bind_on_sigquit
49
- process.on_sigquit { stop_supervised }
50
- end
51
-
52
- # What should happen when we decide to quit with sigterm
53
- def bind_on_sigterm
54
- process.on_sigterm { stop_supervised }
55
- end
56
-
57
42
  # Starts Karafka with a supervision
58
43
  # @note We don't need to sleep because Karafka::Fetcher is locking and waiting to
59
44
  # finish loop (and it won't happen until we explicitily want to stop)
60
45
  def start_supervised
61
- process.supervise do
62
- Karafka::App.run!
63
- Karafka::Fetcher.call
64
- end
46
+ process.supervise
47
+ Karafka::App.run!
48
+ Karafka::Fetcher.call
65
49
  end
66
50
 
67
51
  # Stops Karafka with a supervision (as long as there is a shutdown timeout)
@@ -73,7 +73,7 @@ module Karafka
73
73
  # Time that a given partition will be paused from fetching messages, when message
74
74
  # consumption fails. It allows us to process other partitions, while the error is being
75
75
  # resolved and also "slows" things down, so it prevents from "eating" up all messages and
76
- # consuming them with failed code
76
+ # consuming them with failed code. Use `nil` if you want to pause forever and never retry.
77
77
  setting :pause_timeout, 10
78
78
  # option offset_commit_interval [Integer] the interval between offset commits,
79
79
  # in seconds.
@@ -3,5 +3,5 @@
3
3
  # Main module namespace
4
4
  module Karafka
5
5
  # Current Karafka version
6
- VERSION = '1.2.2'
6
+ VERSION = '1.2.3'
7
7
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: karafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.2.2
4
+ version: 1.2.3
5
5
  platform: ruby
6
6
  authors:
7
7
  - Maciej Mensfeld
@@ -10,7 +10,7 @@ authors:
10
10
  autorequire:
11
11
  bindir: bin
12
12
  cert_chain: []
13
- date: 2018-04-10 00:00:00.000000000 Z
13
+ date: 2018-05-09 00:00:00.000000000 Z
14
14
  dependencies:
15
15
  - !ruby/object:Gem::Dependency
16
16
  name: activesupport
@@ -222,8 +222,9 @@ files:
222
222
  - lib/karafka/cli/info.rb
223
223
  - lib/karafka/cli/install.rb
224
224
  - lib/karafka/cli/server.rb
225
+ - lib/karafka/connection/api_adapter.rb
226
+ - lib/karafka/connection/builder.rb
225
227
  - lib/karafka/connection/client.rb
226
- - lib/karafka/connection/config_adapter.rb
227
228
  - lib/karafka/connection/delegator.rb
228
229
  - lib/karafka/connection/listener.rb
229
230
  - lib/karafka/consumers/callbacks.rb
@@ -294,7 +295,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
294
295
  version: '0'
295
296
  requirements: []
296
297
  rubyforge_project:
297
- rubygems_version: 2.7.6
298
+ rubygems_version: 2.7.4
298
299
  signing_key:
299
300
  specification_version: 4
300
301
  summary: Ruby based framework for working with Apache Kafka