waterdrop 2.0.7 → 2.6.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.github/FUNDING.yml +1 -0
  4. data/.github/workflows/ci.yml +22 -11
  5. data/.ruby-version +1 -1
  6. data/CHANGELOG.md +200 -0
  7. data/Gemfile +0 -2
  8. data/Gemfile.lock +32 -75
  9. data/README.md +22 -275
  10. data/certs/cert_chain.pem +26 -0
  11. data/config/locales/errors.yml +33 -0
  12. data/docker-compose.yml +19 -12
  13. data/lib/waterdrop/clients/buffered.rb +90 -0
  14. data/lib/waterdrop/clients/dummy.rb +69 -0
  15. data/lib/waterdrop/clients/rdkafka.rb +34 -0
  16. data/lib/{water_drop → waterdrop}/config.rb +39 -16
  17. data/lib/waterdrop/contracts/config.rb +43 -0
  18. data/lib/waterdrop/contracts/message.rb +64 -0
  19. data/lib/{water_drop → waterdrop}/errors.rb +14 -7
  20. data/lib/waterdrop/instrumentation/callbacks/delivery.rb +102 -0
  21. data/lib/{water_drop → waterdrop}/instrumentation/callbacks/error.rb +6 -2
  22. data/lib/{water_drop → waterdrop}/instrumentation/callbacks/statistics.rb +1 -1
  23. data/lib/{water_drop/instrumentation/stdout_listener.rb → waterdrop/instrumentation/logger_listener.rb} +66 -21
  24. data/lib/waterdrop/instrumentation/monitor.rb +20 -0
  25. data/lib/{water_drop/instrumentation/monitor.rb → waterdrop/instrumentation/notifications.rb} +12 -14
  26. data/lib/waterdrop/instrumentation/vendors/datadog/dashboard.json +1 -0
  27. data/lib/waterdrop/instrumentation/vendors/datadog/metrics_listener.rb +210 -0
  28. data/lib/waterdrop/middleware.rb +50 -0
  29. data/lib/{water_drop → waterdrop}/producer/async.rb +40 -4
  30. data/lib/{water_drop → waterdrop}/producer/buffer.rb +12 -30
  31. data/lib/{water_drop → waterdrop}/producer/builder.rb +6 -11
  32. data/lib/{water_drop → waterdrop}/producer/sync.rb +44 -15
  33. data/lib/waterdrop/producer/transactions.rb +170 -0
  34. data/lib/waterdrop/producer.rb +308 -0
  35. data/lib/{water_drop → waterdrop}/version.rb +1 -1
  36. data/lib/waterdrop.rb +28 -2
  37. data/renovate.json +6 -0
  38. data/waterdrop.gemspec +14 -11
  39. data.tar.gz.sig +0 -0
  40. metadata +71 -111
  41. metadata.gz.sig +0 -0
  42. data/certs/mensfeld.pem +0 -25
  43. data/config/errors.yml +0 -6
  44. data/lib/water_drop/contracts/config.rb +0 -26
  45. data/lib/water_drop/contracts/message.rb +0 -42
  46. data/lib/water_drop/instrumentation/callbacks/delivery.rb +0 -30
  47. data/lib/water_drop/instrumentation/callbacks/statistics_decorator.rb +0 -77
  48. data/lib/water_drop/instrumentation/callbacks_manager.rb +0 -39
  49. data/lib/water_drop/instrumentation.rb +0 -20
  50. data/lib/water_drop/patches/rdkafka/bindings.rb +0 -42
  51. data/lib/water_drop/patches/rdkafka/producer.rb +0 -20
  52. data/lib/water_drop/producer/dummy_client.rb +0 -32
  53. data/lib/water_drop/producer.rb +0 -162
  54. data/lib/water_drop.rb +0 -36
  55. /data/lib/{water_drop → waterdrop}/contracts.rb +0 -0
  56. /data/lib/{water_drop → waterdrop}/producer/status.rb +0 -0
@@ -0,0 +1,170 @@
1
+ # frozen_string_literal: true
2
+
3
+ module WaterDrop
4
+ class Producer
5
+ # Transactions related producer functionalities
6
+ module Transactions
7
+ # Creates a transaction.
8
+ #
9
+ # Karafka transactions work in a similar manner to SQL db transactions though there are some
10
+ # crucial differences. When you start a transaction, all messages produced during it will
11
+ # be delivered together or will fail together. The difference is, that messages from within
12
+ # a single transaction can be delivered and will have a delivery handle but will be then
13
+ # compacted prior to moving the LSO forward. This means, that not every delivery handle for
14
+ # async dispatches will emit a queue purge error. None for sync as the delivery has happened
15
+ # but they will never be visible by the transactional consumers.
16
+ #
17
+ # Transactions **are** thread-safe however they lock a mutex. This means, that for
18
+ # high-throughput transactional messages production in multiple threads
19
+ # (for example in Karafka), it may be much better to use few instances that can work in
20
+ # parallel.
21
+ #
22
+ # Please note, that if a producer is configured as transactional, it **cannot** produce
23
+ # messages outside of transactions, that is why by default all dispatches will be wrapped
24
+ # with a transaction. One transaction per single dispatch and for `produce_many` it will be
25
+ # a single transaction wrapping all messages dispatches (not one per message).
26
+ #
27
+ # @return Block result
28
+ #
29
+ # @example Simple transaction
30
+ # producer.transaction do
31
+ # producer.produce_async(topic: 'topic', payload: 'data')
32
+ # end
33
+ #
34
+ # @example Aborted transaction - messages producer won't be visible by consumers
35
+ # producer.transaction do
36
+ # producer.produce_sync(topic: 'topic', payload: 'data')
37
+ # throw(:abort)
38
+ # end
39
+ #
40
+ # @example Use block result last handler to wait on all messages ack
41
+ # handler = producer.transaction do
42
+ # producer.produce_async(topic: 'topic', payload: 'data')
43
+ # end
44
+ #
45
+ # handler.wait
46
+ def transaction
47
+ # This will safely allow us to support one operation transactions so a transactional
48
+ # producer can work without the transactional block if needed
49
+ return yield if @transaction_mutex.owned?
50
+
51
+ @transaction_mutex.synchronize do
52
+ transactional_instrument(:finished) do
53
+ with_transactional_error_handling(:begin) do
54
+ transactional_instrument(:started) { client.begin_transaction }
55
+ end
56
+
57
+ result = nil
58
+ commit = false
59
+
60
+ catch(:abort) do
61
+ result = yield
62
+ commit = true
63
+ end
64
+
65
+ commit || raise(WaterDrop::Errors::AbortTransaction)
66
+
67
+ with_transactional_error_handling(:commit) do
68
+ transactional_instrument(:committed) { client.commit_transaction }
69
+ end
70
+
71
+ result
72
+ # We need to handle any interrupt including critical in order not to have the transaction
73
+ # running. This will also handle things like `IRB::Abort`
74
+ #
75
+ # rubocop:disable Lint/RescueException
76
+ rescue Exception => e
77
+ # rubocop:enable Lint/RescueException
78
+ with_transactional_error_handling(:abort) do
79
+ transactional_instrument(:aborted) { client.abort_transaction }
80
+ end
81
+
82
+ raise unless e.is_a?(WaterDrop::Errors::AbortTransaction)
83
+ end
84
+ end
85
+ end
86
+
87
+ # @return [Boolean] Is this producer a transactional one
88
+ def transactional?
89
+ return @transactional if instance_variable_defined?(:'@transactional')
90
+
91
+ @transactional = config.kafka.to_h.key?(:'transactional.id')
92
+ end
93
+
94
+ private
95
+
96
+ # Runs provided code with a transaction wrapper if transactions are enabled.
97
+ # This allows us to simplify the async and sync batch dispatchers because we can ensure that
98
+ # their internal dispatches will be wrapped only with a single transaction and not
99
+ # a transaction per message
100
+ # @param block [Proc] code we want to run
101
+ def with_transaction_if_transactional(&block)
102
+ transactional? ? transaction(&block) : yield
103
+ end
104
+
105
+ # Instruments the transactional operation with producer id
106
+ #
107
+ # @param key [Symbol] transaction operation key
108
+ # @param block [Proc] block to run inside the instrumentation or nothing if not given
109
+ def transactional_instrument(key, &block)
110
+ @monitor.instrument("transaction.#{key}", producer_id: id, &block)
111
+ end
112
+
113
+ # Error handling for transactional operations is a bit special. There are three types of
114
+ # errors coming from librdkafka:
115
+ # - retryable - indicates that a given operation (like offset commit) can be retried after
116
+ # a backoff and that is should be operating later as expected. We try to retry those
117
+ # few times before finally failing.
118
+ # - fatal - errors that will not recover no matter what (for example being fenced out)
119
+ # - abortable - error from which we cannot recover but for which we should abort the
120
+ # current transaction.
121
+ #
122
+ # The code below handles this logic also publishing the appropriate notifications via our
123
+ # notifications pipeline.
124
+ #
125
+ # @param action [Symbol] action type
126
+ # @param allow_abortable [Boolean] should we allow for the abortable flow. This is set to
127
+ # false internally to prevent attempts to abort from failed abort operations
128
+ def with_transactional_error_handling(action, allow_abortable: true)
129
+ attempt ||= 0
130
+ attempt += 1
131
+
132
+ yield
133
+ rescue ::Rdkafka::RdkafkaError => e
134
+ # Decide if there is a chance to retry given error
135
+ do_retry = e.retryable? && attempt < config.max_attempts_on_transaction_command
136
+
137
+ @monitor.instrument(
138
+ 'error.occurred',
139
+ producer_id: id,
140
+ caller: self,
141
+ error: e,
142
+ type: "transaction.#{action}",
143
+ retry: do_retry,
144
+ attempt: attempt
145
+ )
146
+
147
+ raise if e.fatal?
148
+
149
+ if do_retry
150
+ # Backoff more and more before retries
151
+ sleep(config.wait_backoff_on_transaction_command * attempt)
152
+
153
+ retry
154
+ end
155
+
156
+ if e.abortable? && allow_abortable
157
+ # Always attempt to abort but if aborting fails with an abortable error, do not attempt
158
+ # to abort from abort as this could create an infinite loop
159
+ with_transactional_error_handling(:abort, allow_abortable: false) do
160
+ transactional_instrument(:aborted) { @client.abort_transaction }
161
+ end
162
+
163
+ raise
164
+ end
165
+
166
+ raise
167
+ end
168
+ end
169
+ end
170
+ end
@@ -0,0 +1,308 @@
1
+ # frozen_string_literal: true
2
+
3
+ module WaterDrop
4
+ # Main WaterDrop messages producer
5
+ class Producer
6
+ extend Forwardable
7
+ include Sync
8
+ include Async
9
+ include Buffer
10
+ include Transactions
11
+ include ::Karafka::Core::Helpers::Time
12
+
13
+ # Which of the inline flow errors do we want to intercept and re-bind
14
+ SUPPORTED_FLOW_ERRORS = [
15
+ Rdkafka::RdkafkaError,
16
+ Rdkafka::Producer::DeliveryHandle::WaitTimeoutError
17
+ ].freeze
18
+
19
+ private_constant :SUPPORTED_FLOW_ERRORS
20
+
21
+ def_delegators :config, :middleware
22
+
23
+ # @return [String] uuid of the current producer
24
+ attr_reader :id
25
+ # @return [Status] producer status object
26
+ attr_reader :status
27
+ # @return [Concurrent::Array] internal messages buffer
28
+ attr_reader :messages
29
+ # @return [Object] monitor we want to use
30
+ attr_reader :monitor
31
+ # @return [Object] dry-configurable config object
32
+ attr_reader :config
33
+
34
+ # Creates a not-yet-configured instance of the producer
35
+ # @param block [Proc] configuration block
36
+ # @return [Producer] producer instance
37
+ def initialize(&block)
38
+ @operations_in_progress = Concurrent::AtomicFixnum.new(0)
39
+ @buffer_mutex = Mutex.new
40
+ @connecting_mutex = Mutex.new
41
+ @operating_mutex = Mutex.new
42
+ @transaction_mutex = Mutex.new
43
+
44
+ @status = Status.new
45
+ @messages = Concurrent::Array.new
46
+
47
+ return unless block
48
+
49
+ setup(&block)
50
+ end
51
+
52
+ # Sets up the whole configuration and initializes all that is needed
53
+ # @param block [Block] configuration block
54
+ def setup(&block)
55
+ raise Errors::ProducerAlreadyConfiguredError, id unless @status.initial?
56
+
57
+ @config = Config
58
+ .new
59
+ .setup(&block)
60
+ .config
61
+
62
+ @id = @config.id
63
+ @monitor = @config.monitor
64
+ @contract = Contracts::Message.new(max_payload_size: @config.max_payload_size)
65
+ @status.configured!
66
+ end
67
+
68
+ # @return [Rdkafka::Producer] raw rdkafka producer
69
+ # @note Client is lazy initialized, keeping in mind also the fact of a potential fork that
70
+ # can happen any time.
71
+ # @note It is not recommended to fork a producer that is already in use so in case of
72
+ # bootstrapping a cluster, it's much better to fork configured but not used producers
73
+ def client
74
+ return @client if @client && @pid == Process.pid
75
+
76
+ # Don't allow to obtain a client reference for a producer that was not configured
77
+ raise Errors::ProducerNotConfiguredError, id if @status.initial?
78
+
79
+ @connecting_mutex.synchronize do
80
+ return @client if @client && @pid == Process.pid
81
+
82
+ # We undefine all the finalizers, in case it was a fork, so the finalizers from the parent
83
+ # process don't leak
84
+ ObjectSpace.undefine_finalizer(id)
85
+
86
+ # We should raise an error when trying to use a producer with client from a fork. Always.
87
+ if @client
88
+ # We need to reset the client, otherwise there might be attempt to close the parent
89
+ # client
90
+ @client = nil
91
+ raise Errors::ProducerUsedInParentProcess, Process.pid
92
+ end
93
+
94
+ # Finalizer tracking is needed for handling shutdowns gracefully.
95
+ # I don't expect everyone to remember about closing all the producers all the time, thus
96
+ # this approach is better. Although it is still worth keeping in mind, that this will
97
+ # block GC from removing a no longer used producer unless closed properly but at least
98
+ # won't crash the VM upon closing the process
99
+ ObjectSpace.define_finalizer(id, proc { close })
100
+
101
+ @pid = Process.pid
102
+ @client = Builder.new.call(self, @config)
103
+
104
+ # Register statistics runner for this particular type of callbacks
105
+ ::Karafka::Core::Instrumentation.statistics_callbacks.add(
106
+ @id,
107
+ Instrumentation::Callbacks::Statistics.new(@id, @client.name, @config.monitor)
108
+ )
109
+
110
+ # Register error tracking callback
111
+ ::Karafka::Core::Instrumentation.error_callbacks.add(
112
+ @id,
113
+ Instrumentation::Callbacks::Error.new(@id, @client.name, @config.monitor)
114
+ )
115
+
116
+ @status.connected!
117
+ end
118
+
119
+ @client
120
+ end
121
+
122
+ # Purges data from both the buffer queue as well as the librdkafka queue.
123
+ #
124
+ # @note This is an operation that can cause data loss. Keep that in mind. It will not only
125
+ # purge the internal WaterDrop buffer but will also purge the librdkafka queue as well as
126
+ # will cancel any outgoing messages dispatches.
127
+ def purge
128
+ @monitor.instrument('buffer.purged', producer_id: id) do
129
+ @buffer_mutex.synchronize do
130
+ @messages = Concurrent::Array.new
131
+ end
132
+
133
+ @client.purge
134
+ end
135
+ end
136
+
137
+ # Flushes the buffers in a sync way and closes the producer
138
+ # @param force [Boolean] should we force closing even with outstanding messages after the
139
+ # max wait timeout
140
+ def close(force: false)
141
+ @operating_mutex.synchronize do
142
+ return unless @status.active?
143
+
144
+ @monitor.instrument(
145
+ 'producer.closed',
146
+ producer_id: id
147
+ ) do
148
+ @status.closing!
149
+
150
+ # No need for auto-gc if everything got closed by us
151
+ # This should be used only in case a producer was not closed properly and forgotten
152
+ ObjectSpace.undefine_finalizer(id)
153
+
154
+ # We save this thread id because we need to bypass the activity verification on the
155
+ # producer for final flush of buffers.
156
+ @closing_thread_id = Thread.current.object_id
157
+
158
+ # Wait until all the outgoing operations are done. Only when no one is using the
159
+ # underlying client running operations we can close
160
+ sleep(0.001) until @operations_in_progress.value.zero?
161
+
162
+ # Flush has its own buffer mutex but even if it is blocked, flushing can still happen
163
+ # as we close the client after the flushing (even if blocked by the mutex)
164
+ flush(true)
165
+
166
+ # We should not close the client in several threads the same time
167
+ # It is safe to run it several times but not exactly the same moment
168
+ # We also mark it as closed only if it was connected, if not, it would trigger a new
169
+ # connection that anyhow would be immediately closed
170
+ if @client
171
+ # Why do we trigger it early instead of just having `#close` do it?
172
+ # The linger.ms time will be ignored for the duration of the call,
173
+ # queued messages will be sent to the broker as soon as possible.
174
+ begin
175
+ # `max_wait_timeout` is in seconds at the moment
176
+ @client.flush(@config.max_wait_timeout * 1_000) unless @client.closed?
177
+ # We can safely ignore timeouts here because any left outstanding requests
178
+ # will anyhow force wait on close if not forced.
179
+ # If forced, we will purge the queue and just close
180
+ rescue ::Rdkafka::RdkafkaError, Rdkafka::AbstractHandle::WaitTimeoutError
181
+ nil
182
+ ensure
183
+ # Purge fully the local queue in case of a forceful shutdown just to be sure, that
184
+ # there are no dangling messages. In case flush was successful, there should be
185
+ # none but we do it just in case it timed out
186
+ purge if force
187
+ end
188
+
189
+ @client.close
190
+
191
+ @client = nil
192
+ end
193
+
194
+ # Remove callbacks runners that were registered
195
+ ::Karafka::Core::Instrumentation.statistics_callbacks.delete(@id)
196
+ ::Karafka::Core::Instrumentation.error_callbacks.delete(@id)
197
+
198
+ @status.closed!
199
+ end
200
+ end
201
+ end
202
+
203
+ # Closes the producer with forced close after timeout, purging any outgoing data
204
+ def close!
205
+ close(force: true)
206
+ end
207
+
208
+ private
209
+
210
+ # Ensures that we don't run any operations when the producer is not configured or when it
211
+ # was already closed
212
+ def ensure_active!
213
+ return if @status.active?
214
+ return if @status.closing? && @operating_mutex.owned?
215
+
216
+ raise Errors::ProducerNotConfiguredError, id if @status.initial?
217
+ raise Errors::ProducerClosedError, id if @status.closing?
218
+ raise Errors::ProducerClosedError, id if @status.closed?
219
+
220
+ # This should never happen
221
+ raise Errors::StatusInvalidError, [id, @status.to_s]
222
+ end
223
+
224
+ # Ensures that the message we want to send out to Kafka is actually valid and that it can be
225
+ # sent there
226
+ # @param message [Hash] message we want to send
227
+ # @raise [Karafka::Errors::MessageInvalidError]
228
+ def validate_message!(message)
229
+ @contract.validate!(message, Errors::MessageInvalidError)
230
+ end
231
+
232
+ # Waits on a given handler
233
+ #
234
+ # @param handler [Rdkafka::Producer::DeliveryHandle]
235
+ def wait(handler)
236
+ handler.wait(
237
+ max_wait_timeout: @config.max_wait_timeout,
238
+ wait_timeout: @config.wait_timeout
239
+ )
240
+ end
241
+
242
+ # Runs the client produce method with a given message
243
+ #
244
+ # @param message [Hash] message we want to send
245
+ def produce(message)
246
+ produce_time ||= monotonic_now
247
+
248
+ # This can happen only during flushing on closing, in case like this we don't have to
249
+ # synchronize because we already own the lock
250
+ if @operating_mutex.owned?
251
+ @operations_in_progress.increment
252
+ else
253
+ @operating_mutex.synchronize { @operations_in_progress.increment }
254
+ ensure_active!
255
+ end
256
+
257
+ # In case someone defines topic as a symbol, we need to convert it into a string as
258
+ # librdkafka does not accept symbols
259
+ message = message.merge(topic: message[:topic].to_s) if message[:topic].is_a?(Symbol)
260
+
261
+ if transactional?
262
+ transaction { client.produce(**message) }
263
+ else
264
+ client.produce(**message)
265
+ end
266
+ rescue SUPPORTED_FLOW_ERRORS.first => e
267
+ # Unless we want to wait and retry and it's a full queue, we raise normally
268
+ raise unless @config.wait_on_queue_full
269
+ raise unless e.code == :queue_full
270
+ # If we're running for longer than the timeout, we need to re-raise the queue full.
271
+ # This will prevent from situation where cluster is down forever and we just retry and retry
272
+ # in an infinite loop, effectively hanging the processing
273
+ raise unless monotonic_now - produce_time < @config.wait_timeout_on_queue_full * 1_000
274
+
275
+ label = caller_locations(2, 1)[0].label.split(' ').last
276
+
277
+ # We use this syntax here because we want to preserve the original `#cause` when we
278
+ # instrument the error and there is no way to manually assign `#cause` value. We want to keep
279
+ # the original cause to maintain the same API across all the errors dispatched to the
280
+ # notifications pipeline.
281
+ begin
282
+ raise Errors::ProduceError, e.inspect
283
+ rescue Errors::ProduceError => e
284
+ # We want to instrument on this event even when we restart it.
285
+ # The reason is simple: instrumentation and visibility.
286
+ # We can recover from this, but despite that we should be able to instrument this.
287
+ # If this type of event happens too often, it may indicate that the buffer settings are not
288
+ # well configured.
289
+ @monitor.instrument(
290
+ 'error.occurred',
291
+ producer_id: id,
292
+ message: message,
293
+ error: e,
294
+ type: "message.#{label}"
295
+ )
296
+
297
+ # We do not poll the producer because polling happens in a background thread
298
+ # It also should not be a frequent case (queue full), hence it's ok to just throttle.
299
+ sleep @config.wait_backoff_on_queue_full
300
+ end
301
+
302
+ @operations_in_progress.decrement
303
+ retry
304
+ ensure
305
+ @operations_in_progress.decrement
306
+ end
307
+ end
308
+ end
@@ -3,5 +3,5 @@
3
3
  # WaterDrop library
4
4
  module WaterDrop
5
5
  # Current WaterDrop version
6
- VERSION = '2.0.7'
6
+ VERSION = '2.6.11'
7
7
  end
data/lib/waterdrop.rb CHANGED
@@ -1,4 +1,30 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- # This file is used as a compatibility step
4
- require 'water_drop'
3
+ # External components
4
+ # delegate should be removed because we don't need it, we just add it because of ruby-kafka
5
+ %w[
6
+ forwardable
7
+ json
8
+ zeitwerk
9
+ securerandom
10
+ karafka-core
11
+ pathname
12
+ concurrent/atomic/atomic_fixnum
13
+ ].each { |lib| require lib }
14
+
15
+ # WaterDrop library
16
+ module WaterDrop
17
+ class << self
18
+ # @return [String] root path of this gem
19
+ def gem_root
20
+ Pathname.new(File.expand_path('..', __dir__))
21
+ end
22
+ end
23
+ end
24
+
25
+ loader = Zeitwerk::Loader.for_gem
26
+ loader.inflector.inflect('waterdrop' => 'WaterDrop')
27
+ # Do not load vendors instrumentation components. Those need to be required manually if needed
28
+ loader.ignore("#{__dir__}/waterdrop/instrumentation/vendors/**/*.rb")
29
+ loader.setup
30
+ loader.eager_load
data/renovate.json ADDED
@@ -0,0 +1,6 @@
1
+ {
2
+ "$schema": "https://docs.renovatebot.com/renovate-schema.json",
3
+ "extends": [
4
+ "config:base"
5
+ ]
6
+ }
data/waterdrop.gemspec CHANGED
@@ -3,35 +3,38 @@
3
3
  lib = File.expand_path('lib', __dir__)
4
4
  $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
5
5
 
6
- require 'water_drop/version'
6
+ require 'waterdrop/version'
7
7
 
8
8
  Gem::Specification.new do |spec|
9
9
  spec.name = 'waterdrop'
10
10
  spec.version = ::WaterDrop::VERSION
11
11
  spec.platform = Gem::Platform::RUBY
12
12
  spec.authors = ['Maciej Mensfeld']
13
- spec.email = %w[maciej@mensfeld.pl]
13
+ spec.email = %w[contact@karafka.io]
14
14
  spec.homepage = 'https://karafka.io'
15
15
  spec.summary = 'Kafka messaging made easy!'
16
16
  spec.description = spec.summary
17
17
  spec.license = 'MIT'
18
18
 
19
- spec.add_dependency 'concurrent-ruby', '>= 1.1'
20
- spec.add_dependency 'dry-configurable', '~> 0.13'
21
- spec.add_dependency 'dry-monitor', '~> 0.5'
22
- spec.add_dependency 'dry-validation', '~> 1.7'
23
- spec.add_dependency 'rdkafka', '>= 0.10'
19
+ spec.add_dependency 'karafka-core', '>= 2.2.3', '< 3.0.0'
24
20
  spec.add_dependency 'zeitwerk', '~> 2.3'
25
21
 
26
- spec.required_ruby_version = '>= 2.6.0'
27
-
28
22
  if $PROGRAM_NAME.end_with?('gem')
29
23
  spec.signing_key = File.expand_path('~/.ssh/gem-private_key.pem')
30
24
  end
31
25
 
32
- spec.cert_chain = %w[certs/mensfeld.pem]
26
+ spec.cert_chain = %w[certs/cert_chain.pem]
33
27
  spec.files = `git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(spec)/}) }
34
28
  spec.executables = spec.files.grep(%r{^bin/}) { |f| File.basename(f) }
35
29
  spec.require_paths = %w[lib]
36
- spec.metadata = { 'source_code_uri' => 'https://github.com/karafka/waterdrop' }
30
+
31
+ spec.metadata = {
32
+ 'funding_uri' => 'https://karafka.io/#become-pro',
33
+ 'homepage_uri' => 'https://karafka.io',
34
+ 'changelog_uri' => 'https://karafka.io/docs/Changelog-WaterDrop',
35
+ 'bug_tracker_uri' => 'https://github.com/karafka/waterdrop/issues',
36
+ 'source_code_uri' => 'https://github.com/karafka/waterdrop',
37
+ 'documentation_uri' => 'https://karafka.io/docs/#waterdrop',
38
+ 'rubygems_mfa_required' => 'true'
39
+ }
37
40
  end
data.tar.gz.sig CHANGED
Binary file