karafka 1.4.15 → 2.0.0.alpha1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (128) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.github/FUNDING.yml +3 -0
  4. data/.github/workflows/ci.yml +74 -24
  5. data/.ruby-version +1 -1
  6. data/CHANGELOG.md +38 -39
  7. data/Gemfile +6 -0
  8. data/Gemfile.lock +50 -52
  9. data/LICENSE +14 -0
  10. data/LICENSE-COMM +89 -0
  11. data/LICENSE-LGPL +165 -0
  12. data/README.md +59 -14
  13. data/bin/benchmarks +85 -0
  14. data/bin/create_token +28 -0
  15. data/bin/integrations +160 -0
  16. data/bin/stress +13 -0
  17. data/certs/karafka-pro.pem +11 -0
  18. data/certs/mensfeld.pem +23 -24
  19. data/config/errors.yml +4 -38
  20. data/docker-compose.yml +11 -3
  21. data/karafka.gemspec +10 -20
  22. data/lib/active_job/consumer.rb +22 -0
  23. data/lib/active_job/karafka.rb +18 -0
  24. data/lib/active_job/queue_adapters/karafka_adapter.rb +29 -0
  25. data/lib/active_job/routing_extensions.rb +15 -0
  26. data/lib/karafka/app.rb +13 -20
  27. data/lib/karafka/base_consumer.rb +103 -34
  28. data/lib/karafka/cli/base.rb +4 -4
  29. data/lib/karafka/cli/info.rb +43 -8
  30. data/lib/karafka/cli/install.rb +3 -8
  31. data/lib/karafka/cli/server.rb +17 -30
  32. data/lib/karafka/cli.rb +4 -11
  33. data/lib/karafka/connection/client.rb +279 -93
  34. data/lib/karafka/connection/listener.rb +137 -38
  35. data/lib/karafka/connection/messages_buffer.rb +57 -0
  36. data/lib/karafka/connection/pauses_manager.rb +46 -0
  37. data/lib/karafka/connection/rebalance_manager.rb +62 -0
  38. data/lib/karafka/contracts/config.rb +25 -7
  39. data/lib/karafka/contracts/consumer_group.rb +0 -173
  40. data/lib/karafka/contracts/consumer_group_topic.rb +17 -7
  41. data/lib/karafka/contracts/server_cli_options.rb +1 -9
  42. data/lib/karafka/contracts.rb +1 -1
  43. data/lib/karafka/env.rb +46 -0
  44. data/lib/karafka/errors.rb +14 -18
  45. data/lib/karafka/helpers/multi_delegator.rb +2 -2
  46. data/lib/karafka/instrumentation/callbacks/error.rb +40 -0
  47. data/lib/karafka/instrumentation/callbacks/statistics.rb +42 -0
  48. data/lib/karafka/instrumentation/monitor.rb +14 -21
  49. data/lib/karafka/instrumentation/stdout_listener.rb +64 -91
  50. data/lib/karafka/instrumentation.rb +21 -0
  51. data/lib/karafka/licenser.rb +65 -0
  52. data/lib/karafka/{params → messages}/batch_metadata.rb +7 -13
  53. data/lib/karafka/messages/builders/batch_metadata.rb +30 -0
  54. data/lib/karafka/messages/builders/message.rb +38 -0
  55. data/lib/karafka/messages/builders/messages.rb +40 -0
  56. data/lib/karafka/{params/params.rb → messages/message.rb} +7 -12
  57. data/lib/karafka/messages/messages.rb +64 -0
  58. data/lib/karafka/{params → messages}/metadata.rb +4 -6
  59. data/lib/karafka/messages/seek.rb +9 -0
  60. data/lib/karafka/patches/rdkafka/consumer.rb +22 -0
  61. data/lib/karafka/processing/executor.rb +96 -0
  62. data/lib/karafka/processing/executors_buffer.rb +49 -0
  63. data/lib/karafka/processing/jobs/base.rb +18 -0
  64. data/lib/karafka/processing/jobs/consume.rb +28 -0
  65. data/lib/karafka/processing/jobs/revoked.rb +22 -0
  66. data/lib/karafka/processing/jobs/shutdown.rb +23 -0
  67. data/lib/karafka/processing/jobs_queue.rb +121 -0
  68. data/lib/karafka/processing/worker.rb +57 -0
  69. data/lib/karafka/processing/workers_batch.rb +22 -0
  70. data/lib/karafka/railtie.rb +65 -0
  71. data/lib/karafka/routing/builder.rb +15 -14
  72. data/lib/karafka/routing/consumer_group.rb +10 -18
  73. data/lib/karafka/routing/consumer_mapper.rb +1 -2
  74. data/lib/karafka/routing/router.rb +1 -1
  75. data/lib/karafka/routing/subscription_group.rb +53 -0
  76. data/lib/karafka/routing/subscription_groups_builder.rb +51 -0
  77. data/lib/karafka/routing/topic.rb +47 -25
  78. data/lib/karafka/runner.rb +59 -0
  79. data/lib/karafka/serialization/json/deserializer.rb +6 -15
  80. data/lib/karafka/server.rb +62 -25
  81. data/lib/karafka/setup/config.rb +86 -159
  82. data/lib/karafka/status.rb +13 -3
  83. data/lib/karafka/templates/example_consumer.rb.erb +16 -0
  84. data/lib/karafka/templates/karafka.rb.erb +14 -50
  85. data/lib/karafka/time_trackers/base.rb +19 -0
  86. data/lib/karafka/time_trackers/pause.rb +84 -0
  87. data/lib/karafka/time_trackers/poll.rb +65 -0
  88. data/lib/karafka/version.rb +1 -1
  89. data/lib/karafka.rb +30 -44
  90. data.tar.gz.sig +0 -0
  91. metadata +96 -132
  92. metadata.gz.sig +0 -0
  93. data/MIT-LICENCE +0 -18
  94. data/lib/karafka/assignment_strategies/round_robin.rb +0 -13
  95. data/lib/karafka/attributes_map.rb +0 -63
  96. data/lib/karafka/backends/inline.rb +0 -16
  97. data/lib/karafka/base_responder.rb +0 -226
  98. data/lib/karafka/cli/flow.rb +0 -48
  99. data/lib/karafka/cli/missingno.rb +0 -19
  100. data/lib/karafka/code_reloader.rb +0 -67
  101. data/lib/karafka/connection/api_adapter.rb +0 -158
  102. data/lib/karafka/connection/batch_delegator.rb +0 -55
  103. data/lib/karafka/connection/builder.rb +0 -23
  104. data/lib/karafka/connection/message_delegator.rb +0 -36
  105. data/lib/karafka/consumers/batch_metadata.rb +0 -10
  106. data/lib/karafka/consumers/callbacks.rb +0 -71
  107. data/lib/karafka/consumers/includer.rb +0 -64
  108. data/lib/karafka/consumers/responders.rb +0 -24
  109. data/lib/karafka/consumers/single_params.rb +0 -15
  110. data/lib/karafka/contracts/responder_usage.rb +0 -54
  111. data/lib/karafka/fetcher.rb +0 -42
  112. data/lib/karafka/helpers/class_matcher.rb +0 -88
  113. data/lib/karafka/helpers/config_retriever.rb +0 -46
  114. data/lib/karafka/helpers/inflector.rb +0 -26
  115. data/lib/karafka/params/builders/batch_metadata.rb +0 -30
  116. data/lib/karafka/params/builders/params.rb +0 -38
  117. data/lib/karafka/params/builders/params_batch.rb +0 -25
  118. data/lib/karafka/params/params_batch.rb +0 -60
  119. data/lib/karafka/patches/ruby_kafka.rb +0 -47
  120. data/lib/karafka/persistence/client.rb +0 -29
  121. data/lib/karafka/persistence/consumers.rb +0 -45
  122. data/lib/karafka/persistence/topics.rb +0 -48
  123. data/lib/karafka/responders/builder.rb +0 -36
  124. data/lib/karafka/responders/topic.rb +0 -55
  125. data/lib/karafka/routing/topic_mapper.rb +0 -53
  126. data/lib/karafka/serialization/json/serializer.rb +0 -31
  127. data/lib/karafka/setup/configurators/water_drop.rb +0 -36
  128. data/lib/karafka/templates/application_responder.rb.erb +0 -11
@@ -17,14 +17,33 @@ module Karafka
17
17
  # Contract for checking the config provided by the user
18
18
  CONTRACT = Karafka::Contracts::Config.new.freeze
19
19
 
20
- private_constant :CONTRACT
20
+ # Defaults for kafka settings, that will be overwritten only if not present already
21
+ KAFKA_DEFAULTS = {
22
+ 'client.id' => 'karafka'
23
+ }.freeze
24
+
25
+ private_constant :CONTRACT, :KAFKA_DEFAULTS
21
26
 
22
27
  # Available settings
28
+
29
+ # Namespace for Pro version related license management. If you use LGPL, no need to worry
30
+ # about any of this
31
+ setting :license do
32
+ # option token [String, false] - license token issued when you acquire a Pro license
33
+ # Leave false if using the LGPL version and all is going to work just fine :)
34
+ #
35
+ # @note By using the commercial components, you accept the LICENSE-COMM commercial license
36
+ # terms and conditions
37
+ setting :token, default: false
38
+ # option entity [String] for whom we did issue the license
39
+ setting :entity, default: ''
40
+ # option expires_on [Date] date when the license expires
41
+ setting :expires_on, default: Date.parse('2100-01-01')
42
+ end
43
+
23
44
  # option client_id [String] kafka client_id - used to provide
24
45
  # default Kafka groups namespaces and identify that app in kafka
25
- setting :client_id
26
- # What backend do we want to use to process messages
27
- setting :backend, default: :inline
46
+ setting :client_id, default: 'karafka'
28
47
  # option logger [Instance] logger that we want to use
29
48
  setting :logger, default: ::Karafka::Instrumentation::Logger.new
30
49
  # option monitor [Instance] monitor that we will to use (defaults to Karafka::Monitor)
@@ -33,147 +52,36 @@ module Karafka
33
52
  # or they need to maintain their own internal consumer group naming conventions, they
34
53
  # can easily do it, replacing the default client_id + consumer name pattern concept
35
54
  setting :consumer_mapper, default: Routing::ConsumerMapper.new
36
- # Mapper used to remap names of topics, so we can have a clean internal topic naming
37
- # despite using any Kafka provider that uses namespacing, etc
38
- # It needs to implement two methods:
39
- # - #incoming - for remapping from the incoming message to our internal format
40
- # - #outgoing - for remapping from internal topic name into outgoing message
41
- setting :topic_mapper, default: Routing::TopicMapper.new
42
- # Default serializer for converting whatever we want to send to kafka to json
43
- setting :serializer, default: Karafka::Serialization::Json::Serializer.new
55
+ # option [Boolean] should we reload consumers with each incoming batch thus effectively
56
+ # supporting code reload (if someone reloads code) or should we keep the persistence
57
+ setting :consumer_persistence, default: true
44
58
  # Default deserializer for converting incoming data into ruby objects
45
59
  setting :deserializer, default: Karafka::Serialization::Json::Deserializer.new
46
- # If batch_fetching is true, we will fetch kafka messages in batches instead of 1 by 1
47
- # @note Fetching does not equal consuming, see batch_consuming description for details
48
- setting :batch_fetching, default: true
49
- # If batch_consuming is true, we will have access to #params_batch instead of #params.
50
- # #params_batch will contain params received from Kafka (may be more than 1) so we can
51
- # process them in batches
52
- setting :batch_consuming, default: false
53
- # option shutdown_timeout [Integer, nil] the number of seconds after which Karafka no
54
- # longer wait for the consumers to stop gracefully but instead we force terminate
60
+ # option [Boolean] should we leave offset management to the user
61
+ setting :manual_offset_management, default: false
62
+ # options max_messages [Integer] how many messages do we want to fetch from Kafka in one go
63
+ setting :max_messages, default: 100_000
64
+ # option [Integer] number of milliseconds we can wait while fetching data
65
+ setting :max_wait_time, default: 10_000
66
+ # option shutdown_timeout [Integer] the number of milliseconds after which Karafka no
67
+ # longer waits for the consumers to stop gracefully but instead we force terminate
55
68
  # everything.
56
- setting :shutdown_timeout, default: 60
57
-
58
- # option kafka [Hash] - optional - kafka configuration options
59
- setting :kafka do
60
- # Array with at least one host
61
- setting :seed_brokers, default: %w[kafka://127.0.0.1:9092]
62
- # option session_timeout [Integer] the number of seconds after which, if a client
63
- # hasn't contacted the Kafka cluster, it will be kicked out of the group.
64
- setting :session_timeout, default: 30
65
- # Time that a given partition will be paused from fetching messages, when message
66
- # consumption fails. It allows us to process other partitions, while the error is being
67
- # resolved and also "slows" things down, so it prevents from "eating" up all messages and
68
- # consuming them with failed code. Use `nil` if you want to pause forever and never retry.
69
- setting :pause_timeout, default: 10
70
- # option pause_max_timeout [Integer, nil] the maximum number of seconds to pause for,
71
- # or `nil` if no maximum should be enforced.
72
- setting :pause_max_timeout, default: nil
73
- # option pause_exponential_backoff [Boolean] whether to enable exponential backoff
74
- setting :pause_exponential_backoff, default: false
75
- # option offset_commit_interval [Integer] the interval between offset commits,
76
- # in seconds.
77
- setting :offset_commit_interval, default: 10
78
- # option offset_commit_threshold [Integer] the number of messages that can be
79
- # processed before their offsets are committed. If zero, offset commits are
80
- # not triggered by message consumption.
81
- setting :offset_commit_threshold, default: 0
82
- # option heartbeat_interval [Integer] the interval between heartbeats; must be less
83
- # than the session window.
84
- setting :heartbeat_interval, default: 10
85
- # option offset_retention_time [Integer] The length of the retention window, known as
86
- # offset retention time
87
- setting :offset_retention_time, default: nil
88
- # option fetcher_max_queue_size [Integer] max number of items in the fetch queue that
89
- # are stored for further processing. Note, that each item in the queue represents a
90
- # response from a single broker
91
- setting :fetcher_max_queue_size, default: 10
92
- # option assignment_strategy [Object] a strategy determining the assignment of
93
- # partitions to the consumers.
94
- setting :assignment_strategy, default: Karafka::AssignmentStrategies::RoundRobin.new
95
- # option max_bytes_per_partition [Integer] the maximum amount of data fetched
96
- # from a single partition at a time.
97
- setting :max_bytes_per_partition, default: 1_048_576
98
- # whether to consume messages starting at the beginning or to just consume new messages
99
- setting :start_from_beginning, default: true
100
- # option resolve_seed_brokers [Boolean] whether to resolve each hostname of the seed
101
- # brokers
102
- setting :resolve_seed_brokers, default: false
103
- # option min_bytes [Integer] the minimum number of bytes to read before
104
- # returning messages from the server; if `max_wait_time` is reached, this
105
- # is ignored.
106
- setting :min_bytes, default: 1
107
- # option max_bytes [Integer] the maximum number of bytes to read before returning messages
108
- # from each broker.
109
- setting :max_bytes, default: 10_485_760
110
- # option max_wait_time [Integer, Float] max_wait_time is the maximum number of seconds to
111
- # wait before returning data from a single message fetch. By setting this high you also
112
- # increase the fetching throughput - and by setting it low you set a bound on latency.
113
- # This configuration overrides `min_bytes`, so you'll _always_ get data back within the
114
- # time specified. The default value is one second. If you want to have at most five
115
- # seconds of latency, set `max_wait_time` to 5. You should make sure
116
- # max_wait_time * num brokers + heartbeat_interval is less than session_timeout.
117
- setting :max_wait_time, default: 1
118
- # option automatically_mark_as_consumed [Boolean] should we automatically mark received
119
- # messages as consumed (processed) after non-error consumption
120
- setting :automatically_mark_as_consumed, default: true
121
- # option reconnect_timeout [Integer] How long should we wait before trying to reconnect to
122
- # Kafka cluster that went down (in seconds)
123
- setting :reconnect_timeout, default: 5
124
- # option connect_timeout [Integer] Sets the number of seconds to wait while connecting to
125
- # a broker for the first time. When ruby-kafka initializes, it needs to connect to at
126
- # least one host.
127
- setting :connect_timeout, default: 10
128
- # option socket_timeout [Integer] Sets the number of seconds to wait when reading from or
129
- # writing to a socket connection to a broker. After this timeout expires the connection
130
- # will be killed. Note that some Kafka operations are by definition long-running, such as
131
- # waiting for new messages to arrive in a partition, so don't set this value too low
132
- setting :socket_timeout, default: 30
133
- # option partitioner [Object, nil] the partitioner that should be used by the client
134
- setting :partitioner, default: nil
135
-
136
- # SSL authentication related settings
137
- # option ca_cert [String, nil] SSL CA certificate
138
- setting :ssl_ca_cert, default: nil
139
- # option ssl_ca_cert_file_path [String, nil] SSL CA certificate file path
140
- setting :ssl_ca_cert_file_path, default: nil
141
- # option ssl_ca_certs_from_system [Boolean] Use the CA certs from your system's default
142
- # certificate store
143
- setting :ssl_ca_certs_from_system, default: false
144
- # option ssl_verify_hostname [Boolean] Verify the hostname for client certs
145
- setting :ssl_verify_hostname, default: true
146
- # option ssl_client_cert [String, nil] SSL client certificate
147
- setting :ssl_client_cert, default: nil
148
- # option ssl_client_cert_key [String, nil] SSL client certificate password
149
- setting :ssl_client_cert_key, default: nil
150
- # option sasl_gssapi_principal [String, nil] sasl principal
151
- setting :sasl_gssapi_principal, default: nil
152
- # option sasl_gssapi_keytab [String, nil] sasl keytab
153
- setting :sasl_gssapi_keytab, default: nil
154
- # option sasl_plain_authzid [String] The authorization identity to use
155
- setting :sasl_plain_authzid, default: ''
156
- # option sasl_plain_username [String, nil] The username used to authenticate
157
- setting :sasl_plain_username, default: nil
158
- # option sasl_plain_password [String, nil] The password used to authenticate
159
- setting :sasl_plain_password, default: nil
160
- # option sasl_scram_username [String, nil] The username used to authenticate
161
- setting :sasl_scram_username, default: nil
162
- # option sasl_scram_password [String, nil] The password used to authenticate
163
- setting :sasl_scram_password, default: nil
164
- # option sasl_scram_mechanism [String, nil] Scram mechanism, either 'sha256' or 'sha512'
165
- setting :sasl_scram_mechanism, default: nil
166
- # option sasl_over_ssl [Boolean] whether to enforce SSL with SASL
167
- setting :sasl_over_ssl, default: true
168
- # option ssl_client_cert_chain [String, nil] client cert chain or nil if not used
169
- setting :ssl_client_cert_chain, default: nil
170
- # option ssl_client_cert_key_password [String, nil] the password required to read
171
- # the ssl_client_cert_key
172
- setting :ssl_client_cert_key_password, default: nil
173
- # @param sasl_oauth_token_provider [Object, nil] OAuthBearer Token Provider instance that
174
- # implements method token.
175
- setting :sasl_oauth_token_provider, default: nil
176
- end
69
+ setting :shutdown_timeout, default: 60_000
70
+ # option [Integer] number of threads in which we want to do parallel processing
71
+ setting :concurrency, default: 5
72
+ # option [Integer] how long should we wait upon processing error
73
+ setting :pause_timeout, default: 1_000
74
+ # option [Integer] what is the max timeout in case of an exponential backoff
75
+ setting :pause_max_timeout, default: 30_000
76
+ # option [Boolean] should we use exponential backoff
77
+ setting :pause_with_exponential_backoff, default: true
78
+ # option [::WaterDrop::Producer, nil]
79
+ # Unless configured, will be created once Karafka is configured based on user Karafka setup
80
+ setting :producer, default: nil
81
+
82
+ # rdkafka default options
83
+ # @see https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md
84
+ setting :kafka, default: {}
177
85
 
178
86
  # Namespace for internal settings that should not be modified
179
87
  # It's a temporary step to "declassify" several things internally before we move to a
@@ -187,29 +95,37 @@ module Karafka
187
95
  # @note In the future, we need to have a single process representation for all the karafka
188
96
  # instances
189
97
  setting :process, default: Process.new
190
- # option fetcher [Karafka::Fetcher] fetcher instance
191
- setting :fetcher, default: Fetcher.new
192
- # option configurators [Array<Object>] all configurators that we want to run after
193
- # the setup
194
- setting :configurators, default: [Configurators::WaterDrop.new]
98
+ # option subscription_groups_builder [Routing::SubscriptionGroupsBuilder] subscription
99
+ # group builder
100
+ setting :subscription_groups_builder, default: Routing::SubscriptionGroupsBuilder.new
195
101
  end
196
102
 
197
103
  class << self
198
104
  # Configuring method
199
- # @yield Runs a block of code providing a config singleton instance to it
200
- # @yieldparam [Karafka::Setup::Config] Karafka config instance
201
- def setup
202
- configure { |config| yield(config) }
105
+ # @param block [Proc] block we want to execute with the config instance
106
+ def setup(&block)
107
+ configure(&block)
108
+ merge_kafka_defaults!(config)
109
+ validate!
110
+
111
+ # Check the license presence (if needed) and
112
+ Licenser.new.verify(config.license)
113
+
114
+ configure_components
203
115
  end
204
116
 
205
- # Everything that should be initialized after the setup
206
- # Components are in karafka/config directory and are all loaded one by one
207
- # If you want to configure a next component, please add a proper file to config dir
208
- def setup_components
209
- config
210
- .internal
211
- .configurators
212
- .each { |configurator| configurator.call(config) }
117
+ private
118
+
119
+ # Propagates the kafka setting defaults unless they are already present
120
+ # This makes it easier to set some values that users usually don't change but still allows
121
+ # them to overwrite the whole hash if they want to
122
+ # @param config [Dry::Configurable::Config] dry config of this producer
123
+ def merge_kafka_defaults!(config)
124
+ KAFKA_DEFAULTS.each do |key, value|
125
+ next if config.kafka.key?(key)
126
+
127
+ config.kafka[key] = value
128
+ end
213
129
  end
214
130
 
215
131
  # Validate config based on the config contract
@@ -223,6 +139,17 @@ module Karafka
223
139
 
224
140
  raise Errors::InvalidConfigurationError, validation_result.errors.to_h
225
141
  end
142
+
143
+ # Sets up all the components that are based on the user configuration
144
+ # @note At the moment it is only WaterDrop
145
+ def configure_components
146
+ config.producer ||= ::WaterDrop::Producer.new do |producer_config|
147
+ # In some cases WaterDrop updates the config and we don't want our consumer config to
148
+ # be polluted by those updates, that's why we copy
149
+ producer_config.kafka = config.kafka.dup
150
+ producer_config.logger = config.logger
151
+ end
152
+ end
226
153
  end
227
154
  end
228
155
  end
@@ -3,16 +3,21 @@
3
3
  module Karafka
4
4
  # App status monitor
5
5
  class Status
6
- # Available states and their transitions
6
+ # Available states and their transitions.
7
7
  STATES = {
8
8
  initializing: :initialize!,
9
- initialized: :initialized!,
10
9
  running: :run!,
11
- stopping: :stop!
10
+ stopping: :stop!,
11
+ stopped: :stopped!
12
12
  }.freeze
13
13
 
14
14
  private_constant :STATES
15
15
 
16
+ # By default we are in the initializing state
17
+ def initialize
18
+ initialize!
19
+ end
20
+
16
21
  STATES.each do |state, transition|
17
22
  define_method :"#{state}?" do
18
23
  @status == state
@@ -20,6 +25,11 @@ module Karafka
20
25
 
21
26
  define_method transition do
22
27
  @status = state
28
+
29
+ # Skip on creation (initializing)
30
+ # We skip as during this state we do not have yet a monitor
31
+ return if initializing?
32
+
23
33
  # Trap context disallows to run certain things that we instrument
24
34
  # so the state changes are executed from a separate thread
25
35
  Thread.new { Karafka.monitor.instrument("app.#{state}") }.join
@@ -0,0 +1,16 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Example consumer that prints messages payloads
4
+ class ExampleConsumer < ApplicationConsumer
5
+ def consume
6
+ messages.each { |message| puts message.payload }
7
+ end
8
+
9
+ # Run anything upon partition being revoked
10
+ # def on_revoked
11
+ # end
12
+
13
+ # Define here any teardown things you want when Karafka server stops
14
+ # def on_shutdown
15
+ # end
16
+ end
@@ -1,19 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  <% if rails? -%>
4
- ENV['RAILS_ENV'] ||= 'development'
5
- ENV['KARAFKA_ENV'] = ENV['RAILS_ENV']
6
4
  require ::File.expand_path('../config/environment', __FILE__)
7
- Rails.application.eager_load!
8
-
9
- # This lines will make Karafka print to stdout like puma or unicorn
10
- if Rails.env.development?
11
- Rails.logger.extend(
12
- ActiveSupport::Logger.broadcast(
13
- ActiveSupport::Logger.new($stdout)
14
- )
15
- )
16
- end
17
5
  <% else -%>
18
6
  # This file is auto-generated during the install process.
19
7
  # If by any chance you've wanted a setup for Rails app, either run the `karafka:install`
@@ -31,9 +19,7 @@ APP_LOADER.enable_reloading
31
19
  %w[
32
20
  lib
33
21
  app/consumers
34
- app/responders
35
- app/workers
36
- ].each(&APP_LOADER.method(:push_dir))
22
+ ].each { |dir| APP_LOADER.push_dir(dir) }
37
23
 
38
24
  APP_LOADER.setup
39
25
  APP_LOADER.eager_load
@@ -41,10 +27,12 @@ APP_LOADER.eager_load
41
27
 
42
28
  class KarafkaApp < Karafka::App
43
29
  setup do |config|
44
- config.kafka.seed_brokers = %w[kafka://127.0.0.1:9092]
30
+ config.kafka = { 'bootstrap.servers' => '127.0.0.1:9092' }
45
31
  config.client_id = 'example_app'
46
32
  <% if rails? -%>
47
- config.logger = Rails.logger
33
+ # Recreate consumers with each batch. This will allow Rails code reload to work in the
34
+ # development mode. Otherwise Karafka process would not be aware of code changes
35
+ config.consumer_persistence = !Rails.env.development?
48
36
  <% end -%>
49
37
  end
50
38
 
@@ -52,41 +40,17 @@ class KarafkaApp < Karafka::App
52
40
  # interested in logging events for certain environments. Since instrumentation
53
41
  # notifications add extra boilerplate, if you want to achieve max performance,
54
42
  # listen to only what you really need for given environment.
55
- Karafka.monitor.subscribe(WaterDrop::Instrumentation::StdoutListener.new)
56
43
  Karafka.monitor.subscribe(Karafka::Instrumentation::StdoutListener.new)
57
44
  # Karafka.monitor.subscribe(Karafka::Instrumentation::ProctitleListener.new)
58
45
 
59
- # Uncomment that in order to achieve code reload in development mode
60
- # Be aware, that this might have some side-effects. Please refer to the wiki
61
- # for more details on benefits and downsides of the code reload in the
62
- # development mode
63
- #
64
- # Karafka.monitor.subscribe(
65
- # Karafka::CodeReloader.new(
66
- # <%= rails? ? '*Rails.application.reloaders' : 'APP_LOADER' %>
67
- # )
68
- # )
69
-
70
- consumer_groups.draw do
71
- # topic :example do
72
- # consumer ExampleConsumer
73
- # end
74
-
75
- # consumer_group :bigger_group do
76
- # topic :test do
77
- # consumer TestConsumer
78
- # end
79
- #
80
- # topic :test2 do
81
- # consumer Test2Consumer
82
- # end
83
- # end
46
+ routes.draw do
47
+ <% if rails? -%>
48
+ # Uncomment this if you use Karafka with ActiveJob
49
+ # You ned to define the topic per each queue name you use
50
+ # active_job_topic :default
51
+ <% end -%>
52
+ topic :example do
53
+ consumer ExampleConsumer
54
+ end
84
55
  end
85
56
  end
86
-
87
- Karafka.monitor.subscribe('app.initialized') do
88
- # Put here all the things you want to do after the Karafka framework
89
- # initialization
90
- end
91
-
92
- KarafkaApp.boot!
@@ -0,0 +1,19 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ # Time trackers module.
5
+ #
6
+ # Time trackers are used to track time in context of having a time poll (amount of time
7
+ # available for processing) or a pausing engine (pause for a time period).
8
+ module TimeTrackers
9
+ # Base class for all the time-trackers.
10
+ class Base
11
+ private
12
+
13
+ # @return [Float] current time in milliseconds
14
+ def now
15
+ ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) * 1000
16
+ end
17
+ end
18
+ end
19
+ end
@@ -0,0 +1,84 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module TimeTrackers
5
+ # Handles Kafka topic partition pausing and resuming with exponential back-offs.
6
+ class Pause < Base
7
+ attr_reader :count
8
+
9
+ # @param timeout [Integer] how long should we wait when anything went wrong (in ms)
10
+ # @param max_timeout [Integer, nil] if exponential is on, what is the max value we can reach
11
+ # exponentially on which we will stay
12
+ # @param exponential_backoff [Boolean] should we wait exponentially or with the same
13
+ # timeout value
14
+ # @return [Karafka::TimeTrackers::Pause]
15
+ # @example
16
+ # pause = Karafka::TimeTrackers::Pause.new(timeout: 1000)
17
+ # pause.expired? #=> true
18
+ # pause.paused? #=> false
19
+ # pause.pause
20
+ # sleep(1.1)
21
+ # pause.paused? #=> true
22
+ # pause.expired? #=> true
23
+ # pause.count #=> 1
24
+ # pause.pause
25
+ # pause.count #=> 1
26
+ # pause.paused? #=> true
27
+ # pause.expired? #=> false
28
+ # pause.resume
29
+ # pause.count #=> 2
30
+ # pause.paused? #=> false
31
+ # pause.reset
32
+ # pause.count #=> 0
33
+ def initialize(timeout:, max_timeout:, exponential_backoff:)
34
+ @started_at = nil
35
+ @count = 0
36
+ @timeout = timeout
37
+ @max_timeout = max_timeout
38
+ @exponential_backoff = exponential_backoff
39
+ super()
40
+ end
41
+
42
+ # Pauses the processing from now till the end of the interval (backoff or non-backoff)
43
+ # and records the count.
44
+ def pause
45
+ @started_at = now
46
+ @ends_at = @started_at + backoff_interval
47
+ @count += 1
48
+ end
49
+
50
+ # Marks the pause as resumed.
51
+ def resume
52
+ @started_at = nil
53
+ @ends_at = nil
54
+ end
55
+
56
+ # @return [Boolean] are we paused from processing
57
+ def paused?
58
+ !@started_at.nil?
59
+ end
60
+
61
+ # @return [Boolean] did the pause expire
62
+ def expired?
63
+ @ends_at ? now >= @ends_at : true
64
+ end
65
+
66
+ # Resets the pause counter.
67
+ def reset
68
+ @count = 0
69
+ end
70
+
71
+ private
72
+
73
+ # Computers the exponential backoff
74
+ # @return [Integer] backoff in milliseconds
75
+ def backoff_interval
76
+ backoff_factor = @exponential_backoff ? 2**@count : 1
77
+
78
+ timeout = backoff_factor * @timeout
79
+
80
+ @max_timeout && timeout > @max_timeout ? @max_timeout : timeout
81
+ end
82
+ end
83
+ end
84
+ end
@@ -0,0 +1,65 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module TimeTrackers
5
+ # Object used to keep track of time we've used running certain operations.
6
+ #
7
+ # @example Keep track of sleeping and stop after 3 seconds of 0.1 sleep intervals
8
+ # time_poll = Poll.new(3000)
9
+ # time_poll.start
10
+ #
11
+ # until time_poll.exceeded?
12
+ # time_poll.start
13
+ # puts "I have #{time_poll.remaining.to_i}ms remaining to sleep..."
14
+ # sleep(0.1)
15
+ # time_poll.checkpoint
16
+ # end
17
+ class Poll < Base
18
+ attr_reader :remaining, :attempts
19
+
20
+ # @param total_time [Integer] amount of milliseconds before we exceed the given time limit
21
+ # @return [TimeTracker] time poll instance
22
+ def initialize(total_time)
23
+ @remaining = total_time
24
+ @attempts = 0
25
+ super()
26
+ end
27
+
28
+ # @return [Boolean] did we exceed the time limit
29
+ def exceeded?
30
+ @remaining <= 0
31
+ end
32
+
33
+ # Starts time tracking.
34
+ def start
35
+ @attempts += 1
36
+ @started_at = now
37
+ end
38
+
39
+ # Stops time tracking of a given piece of code and updates the remaining time.
40
+ def checkpoint
41
+ @remaining -= (now - @started_at)
42
+ end
43
+
44
+ # @return [Boolean] If anything went wrong, can we retry after a backoff period or not
45
+ # (do we have enough time)
46
+ def retryable?
47
+ remaining > backoff_interval
48
+ end
49
+
50
+ # Sleeps for amount of time matching attempt, so we sleep more with each attempt in case of
51
+ # a retry.
52
+ def backoff
53
+ # Sleep requires seconds not ms
54
+ sleep(backoff_interval / 1_000.0)
55
+ end
56
+
57
+ private
58
+
59
+ # @return [Integer] milliseconds of the backoff time
60
+ def backoff_interval
61
+ 100 * attempts
62
+ end
63
+ end
64
+ end
65
+ end
@@ -3,5 +3,5 @@
3
3
  # Main module namespace
4
4
  module Karafka
5
5
  # Current Karafka version
6
- VERSION = '1.4.15'
6
+ VERSION = '2.0.0.alpha1'
7
7
  end