waterdrop 2.8.14 → 2.8.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/ci.yml +215 -36
  3. data/.github/workflows/push.yml +3 -3
  4. data/.github/workflows/trigger-wiki-refresh.yml +1 -1
  5. data/.github/workflows/verify-action-pins.yml +1 -1
  6. data/.gitignore +0 -1
  7. data/.rubocop.yml +87 -0
  8. data/.ruby-version +1 -1
  9. data/.yard-lint.yml +172 -72
  10. data/CHANGELOG.md +13 -0
  11. data/Gemfile +8 -9
  12. data/Gemfile.lint +14 -0
  13. data/Gemfile.lint.lock +123 -0
  14. data/Gemfile.lock +27 -28
  15. data/README.md +1 -1
  16. data/Rakefile +2 -2
  17. data/bin/integrations +28 -29
  18. data/bin/verify_topics_naming +8 -8
  19. data/config/locales/errors.yml +12 -0
  20. data/docker-compose.oauth.yml +56 -0
  21. data/docker-compose.yml +1 -1
  22. data/lib/waterdrop/clients/dummy.rb +9 -0
  23. data/lib/waterdrop/clients/rdkafka.rb +13 -2
  24. data/lib/waterdrop/config.rb +32 -5
  25. data/lib/waterdrop/connection_pool.rb +13 -11
  26. data/lib/waterdrop/contracts/config.rb +30 -6
  27. data/lib/waterdrop/contracts/message.rb +2 -2
  28. data/lib/waterdrop/contracts/poller_config.rb +26 -0
  29. data/lib/waterdrop/contracts/transactional_offset.rb +2 -2
  30. data/lib/waterdrop/contracts/variant.rb +18 -18
  31. data/lib/waterdrop/errors.rb +3 -0
  32. data/lib/waterdrop/instrumentation/callbacks/delivery.rb +8 -8
  33. data/lib/waterdrop/instrumentation/callbacks/error.rb +5 -5
  34. data/lib/waterdrop/instrumentation/callbacks/oauthbearer_token_refresh.rb +4 -4
  35. data/lib/waterdrop/instrumentation/callbacks/statistics.rb +18 -5
  36. data/lib/waterdrop/instrumentation/idle_disconnector_listener.rb +4 -4
  37. data/lib/waterdrop/instrumentation/logger_listener.rb +10 -10
  38. data/lib/waterdrop/instrumentation/notifications.rb +3 -0
  39. data/lib/waterdrop/instrumentation/vendors/datadog/metrics_listener.rb +19 -19
  40. data/lib/waterdrop/polling/config.rb +52 -0
  41. data/lib/waterdrop/polling/latch.rb +49 -0
  42. data/lib/waterdrop/polling/poller.rb +415 -0
  43. data/lib/waterdrop/polling/queue_pipe.rb +63 -0
  44. data/lib/waterdrop/polling/state.rb +151 -0
  45. data/lib/waterdrop/polling.rb +22 -0
  46. data/lib/waterdrop/producer/async.rb +6 -6
  47. data/lib/waterdrop/producer/buffer.rb +8 -8
  48. data/lib/waterdrop/producer/idempotence.rb +3 -3
  49. data/lib/waterdrop/producer/sync.rb +15 -8
  50. data/lib/waterdrop/producer/testing.rb +1 -1
  51. data/lib/waterdrop/producer/transactions.rb +6 -6
  52. data/lib/waterdrop/producer.rb +113 -30
  53. data/lib/waterdrop/version.rb +1 -1
  54. data/lib/waterdrop.rb +15 -10
  55. data/package-lock.json +331 -0
  56. data/package.json +9 -0
  57. data/renovate.json +25 -6
  58. data/waterdrop.gemspec +23 -23
  59. metadata +17 -5
  60. data/.coditsu/ci.yml +0 -3
data/bin/integrations CHANGED
@@ -4,14 +4,14 @@
4
4
 
5
5
  # All integration specs run with their own bundler context to avoid dependency conflicts.
6
6
  # All WaterDrop integration specs are pristine by default since they use isolated Gemfiles.
7
- raise 'This code needs to be executed WITHOUT bundle exec' if Kernel.const_defined?(:Bundler)
7
+ raise "This code needs to be executed WITHOUT bundle exec" if Kernel.const_defined?(:Bundler)
8
8
 
9
- require 'open3'
10
- require 'fileutils'
11
- require 'pathname'
12
- require 'tmpdir'
9
+ require "open3"
10
+ require "fileutils"
11
+ require "pathname"
12
+ require "tmpdir"
13
13
 
14
- ROOT_PATH = Pathname.new(File.expand_path(File.join(File.dirname(__FILE__), '../')))
14
+ ROOT_PATH = Pathname.new(File.expand_path(File.join(File.dirname(__FILE__), "../")))
15
15
 
16
16
  # How may bytes do we want to keep from the stdout in the buffer for when we need to print it
17
17
  MAX_BUFFER_OUTPUT = 307_200
@@ -36,9 +36,9 @@ class Scenario
36
36
  def initialize(path)
37
37
  @path = path
38
38
  # First 1024 characters from stdout
39
- @stdout_head = ''
39
+ @stdout_head = ""
40
40
  # Last 1024 characters from stdout
41
- @stdout_tail = ''
41
+ @stdout_tail = ""
42
42
  end
43
43
 
44
44
  # Starts running given scenario in a separate process
@@ -49,16 +49,15 @@ class Scenario
49
49
 
50
50
  # @return [String] integration spec name
51
51
  def name
52
- @path.gsub("#{ROOT_PATH}/spec/integrations/", '')
52
+ @path.gsub("#{ROOT_PATH}/spec/integrations/", "")
53
53
  end
54
54
 
55
-
56
55
  # @return [Boolean] did this scenario finished or is it still running
57
56
  def finished?
58
57
  # If the thread is running too long, kill it
59
58
  if current_time - @started_at > MAX_RUN_TIME
60
59
  begin
61
- Process.kill('TERM', pid)
60
+ Process.kill("TERM", pid)
62
61
  # It may finish right after we want to kill it, that's why we ignore this
63
62
  rescue Errno::ESRCH
64
63
  end
@@ -66,7 +65,7 @@ class Scenario
66
65
 
67
66
  # We read it so it won't grow as we use our default logger that prints to both test.log and
68
67
  # to stdout. Otherwise after reaching the buffer size, it would hang
69
- buffer = ''
68
+ buffer = ""
70
69
  @stdout.read_nonblock(MAX_BUFFER_OUTPUT, buffer, exception: false)
71
70
  @stdout_head = buffer if @stdout_head.empty?
72
71
  @stdout_tail << buffer
@@ -101,19 +100,19 @@ class Scenario
101
100
  # Prints a status report when scenario is finished and stdout if it failed
102
101
  def report
103
102
  if success?
104
- print "\e[#{32}m#{'.'}\e[0m"
103
+ print "\e[32m.\e[0m"
105
104
  else
106
- buffer = ''
105
+ buffer = ""
107
106
 
108
107
  @stderr.read_nonblock(MAX_BUFFER_OUTPUT, buffer, exception: false)
109
108
 
110
109
  puts
111
- puts "\e[#{31}m#{'[FAILED]'}\e[0m #{name}"
110
+ puts "\e[31m[FAILED]\e[0m #{name}"
112
111
  puts "Time taken: #{current_time - @started_at} seconds"
113
112
  puts "Exit code: #{exit_code}"
114
113
  puts "Exit status: #{exit_status}"
115
114
  puts @stdout_head
116
- puts '...'
115
+ puts "..."
117
116
  puts @stdout_tail
118
117
  puts buffer
119
118
  puts
@@ -163,38 +162,38 @@ class Scenario
163
162
  end
164
163
 
165
164
  # Load all the specs
166
- specs = Dir[ROOT_PATH.join('spec/integrations/**/*_spec.rb')]
165
+ specs = Dir[ROOT_PATH.join("spec/integrations/**/*_spec.rb")]
167
166
 
168
- FILTER_TYPE = ARGV[0] == '--exclude' ? 'exclude' : 'include'
167
+ FILTER_TYPE = (ARGV[0] == "--exclude") ? "exclude" : "include"
169
168
 
170
169
  # Remove the exclude flag
171
- ARGV.shift if FILTER_TYPE == '--exclude'
170
+ ARGV.shift if FILTER_TYPE == "--exclude"
172
171
 
173
172
  # If filters is provided, apply
174
173
  # Allows to provide several filters one after another and applies all of them
175
174
  ARGV.each do |filter|
176
175
  specs.delete_if do |name|
177
176
  case FILTER_TYPE
178
- when 'include'
177
+ when "include"
179
178
  !name.include?(filter)
180
- when 'exclude'
179
+ when "exclude"
181
180
  name.include?(filter)
182
181
  else
183
- raise 'Invalid filter type'
182
+ raise "Invalid filter type"
184
183
  end
185
184
  end
186
185
  end
187
186
 
188
187
  # Randomize order
189
- seed = (ENV['SPECS_SEED'] || rand(0..10_000)).to_i
188
+ seed = (ENV["SPECS_SEED"] || rand(0..10_000)).to_i
190
189
 
191
190
  puts "Random seed: #{seed}"
192
191
 
193
192
  scenarios = specs
194
- .shuffle(random: Random.new(seed))
195
- .map { |integration| Scenario.new(integration) }
193
+ .shuffle(random: Random.new(seed))
194
+ .map { |integration| Scenario.new(integration) }
196
195
 
197
- raise ArgumentError, "No integration specs with filters: #{ARGV.join(', ')}" if scenarios.empty?
196
+ raise ArgumentError, "No integration specs with filters: #{ARGV.join(", ")}" if scenarios.empty?
198
197
 
199
198
  puts "Running #{scenarios.size} scenarios"
200
199
 
@@ -217,8 +216,8 @@ end
217
216
  puts
218
217
  puts "\nLongest scenarios:\n\n"
219
218
 
220
- finished_scenarios.sort_by(&:time_taken).reverse.first(10).each do |long_scenario|
221
- puts "[#{'%6.2f' % long_scenario.time_taken}] #{long_scenario.name}"
219
+ finished_scenarios.sort_by(&:time_taken).last(10).reverse_each do |long_scenario|
220
+ puts "[#{"%6.2f" % long_scenario.time_taken}] #{long_scenario.name}"
222
221
  end
223
222
 
224
223
  failed_scenarios = finished_scenarios.reject(&:success?)
@@ -232,7 +231,7 @@ else
232
231
  puts "\nFailed scenarios:\n\n"
233
232
 
234
233
  failed_scenarios.each do |scenario|
235
- puts "\e[#{31}m#{'[FAILED]'}\e[0m #{scenario.name}"
234
+ puts "\e[31m[FAILED]\e[0m #{scenario.name}"
236
235
  end
237
236
 
238
237
  puts
@@ -8,18 +8,18 @@
8
8
  #
9
9
  # It also ensures we have one convention that we can follow.
10
10
 
11
- require 'bundler'
11
+ require "bundler"
12
12
  Bundler.setup(:default, :test, :integrations)
13
- require 'rdkafka'
13
+ require "rdkafka"
14
14
 
15
- admin = Rdkafka::Config.new('bootstrap.servers': 'localhost:9092').admin
15
+ admin = Rdkafka::Config.new("bootstrap.servers": "localhost:9092").admin
16
16
 
17
17
  invalid = admin
18
- .metadata
19
- .topics
20
- .map { |topic| topic[:topic_name] }
21
- .select { |topic| !topic.start_with?('it-') }
22
- .select { |topic| topic.length <= 6 }
18
+ .metadata
19
+ .topics
20
+ .map { |topic| topic[:topic_name] }
21
+ .select { |topic| !topic.start_with?("it-") }
22
+ .select { |topic| topic.length <= 6 }
23
23
 
24
24
  admin.close
25
25
 
@@ -26,6 +26,18 @@ en:
26
26
  non_reloadable_errors_format: must be an array of symbols
27
27
  oauth.token_provider_listener_format: 'must be false or respond to #on_oauthbearer_token_refresh'
28
28
  idle_disconnect_timeout_format: 'must be an integer that is equal to 0 or bigger than 30 000 (30 seconds)'
29
+ polling.mode_format: must be :thread or :fd
30
+ polling.poller_format: must be nil or a WaterDrop::Polling::Poller instance
31
+ polling.poller_poller_only_with_fd_mode: can only be set when polling.mode is :fd
32
+ polling.fd.max_time_format: must be an integer that is equal or bigger than 1
33
+ polling.fd.periodic_poll_interval_format: must be an integer that is equal or bigger than 1
34
+
35
+ poller:
36
+ thread_priority_format: must be an integer between -3 and 3
37
+ poll_timeout_format: must be an integer that is equal or bigger than 1
38
+ backoff_min_format: must be an integer that is equal or bigger than 1
39
+ backoff_max_format: must be an integer that is equal or bigger than 1
40
+ backoff_max_must_be_gte_backoff_min: backoff_max must be greater than or equal to backoff_min
29
41
 
30
42
  variant:
31
43
  missing: must be present
@@ -0,0 +1,56 @@
1
+ services:
2
+ keycloak:
3
+ image: quay.io/keycloak/keycloak:26.5
4
+ container_name: keycloak
5
+ command: start-dev --import-realm
6
+ ports:
7
+ - "8080:8080"
8
+ environment:
9
+ KEYCLOAK_ADMIN: admin
10
+ KEYCLOAK_ADMIN_PASSWORD: admin
11
+ volumes:
12
+ - ./spec/integrations/oauth_token_refresh/keycloak-realm.json:/opt/keycloak/data/import/realm.json
13
+ healthcheck:
14
+ test: ["CMD-SHELL", "exec 3<>/dev/tcp/localhost/8080 && echo -e 'GET /realms/kafka HTTP/1.1\\r\\nHost: localhost\\r\\n\\r\\n' >&3 && timeout 1 cat <&3 | grep -q '200 OK'"]
15
+ interval: 10s
16
+ timeout: 10s
17
+ retries: 30
18
+ start_period: 90s
19
+
20
+ kafka-oauth:
21
+ image: confluentinc/cp-kafka:8.1.1
22
+ container_name: kafka-oauth
23
+ depends_on:
24
+ keycloak:
25
+ condition: service_healthy
26
+ ports:
27
+ - "9094:9094"
28
+ environment:
29
+ CLUSTER_ID: kafka-oauth-cluster-1
30
+ KAFKA_BROKER_ID: 1
31
+ KAFKA_PROCESS_ROLES: broker,controller
32
+ KAFKA_CONTROLLER_QUORUM_VOTERS: 1@kafka-oauth:9093
33
+ KAFKA_CONTROLLER_LISTENER_NAMES: CONTROLLER
34
+ KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
35
+ KAFKA_LISTENERS: PLAINTEXT://:9092,CONTROLLER://:9093,SASL_PLAINTEXT://:9094
36
+ KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-oauth:9092,SASL_PLAINTEXT://127.0.0.1:9094
37
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,SASL_PLAINTEXT:SASL_PLAINTEXT
38
+ KAFKA_SASL_ENABLED_MECHANISMS: OAUTHBEARER
39
+ KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAINTEXT
40
+ # Per-listener callback handler
41
+ KAFKA_LISTENER_NAME_SASL__PLAINTEXT_OAUTHBEARER_SASL_SERVER_CALLBACK_HANDLER_CLASS: org.apache.kafka.common.security.oauthbearer.OAuthBearerValidatorCallbackHandler
42
+ KAFKA_LISTENER_NAME_SASL__PLAINTEXT_OAUTHBEARER_SASL_JAAS_CONFIG: |
43
+ org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required;
44
+ # Global OAUTHBEARER settings for token validation
45
+ KAFKA_SASL_OAUTHBEARER_JWKS_ENDPOINT_URL: http://keycloak:8080/realms/kafka/protocol/openid-connect/certs
46
+ KAFKA_SASL_OAUTHBEARER_EXPECTED_AUDIENCE: kafka
47
+ # Note: Not setting expected issuer - Keycloak uses request URL which varies
48
+ KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
49
+ KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
50
+ KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
51
+ KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
52
+ # Required by Confluent image when SASL is enabled
53
+ # Also allow HTTP URLs for JWKS endpoint (Kafka 3.0+ blocks non-HTTPS by default)
54
+ KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf -Dorg.apache.kafka.sasl.oauthbearer.allowed.urls=http://keycloak:8080/realms/kafka/protocol/openid-connect/certs"
55
+ volumes:
56
+ - ./spec/integrations/oauth_token_refresh/kafka_server_jaas.conf:/etc/kafka/kafka_server_jaas.conf:ro
data/docker-compose.yml CHANGED
@@ -1,7 +1,7 @@
1
1
  services:
2
2
  kafka:
3
3
  container_name: kafka
4
- image: confluentinc/cp-kafka:8.0.0
4
+ image: confluentinc/cp-kafka:8.1.1
5
5
 
6
6
  ports:
7
7
  - 9092:9092
@@ -57,6 +57,15 @@ module WaterDrop
57
57
  Handle.new(topic.to_s, partition, @counters["#{topic}#{partition}"] += 1)
58
58
  end
59
59
 
60
+ # Returns 0 as dummy client doesn't queue any real messages
61
+ #
62
+ # @return [Integer] always 0
63
+ def queue_size
64
+ 0
65
+ end
66
+
67
+ alias_method :queue_length, :queue_size
68
+
60
69
  # @param _args [Object] anything really, this dummy is suppose to support anything
61
70
  def respond_to_missing?(*_args)
62
71
  true
@@ -14,7 +14,13 @@ module WaterDrop
14
14
  kafka_config = producer.config.kafka.to_h
15
15
  monitor = producer.config.monitor
16
16
 
17
- client = ::Rdkafka::Config.new(kafka_config).producer(native_kafka_auto_start: false)
17
+ # When FD polling is enabled, we disable the native librdkafka polling thread
18
+ # and use our own Ruby-based poller instead
19
+ producer_options = { native_kafka_auto_start: false }
20
+
21
+ producer_options[:run_polling_thread] = false if producer.fd_polling?
22
+
23
+ client = ::Rdkafka::Config.new(kafka_config).producer(**producer_options)
18
24
 
19
25
  # Register statistics runner for this particular type of callbacks
20
26
  ::Karafka::Core::Instrumentation.statistics_callbacks.add(
@@ -53,8 +59,13 @@ module WaterDrop
53
59
 
54
60
  client.start
55
61
 
62
+ # Register with poller if FD polling is enabled
63
+ # Uses the producer's configured poller (custom or global singleton)
64
+ # This must happen after client.start to ensure the client is ready
65
+ producer.poller.register(producer, client) if producer.fd_polling?
66
+
56
67
  # Switch to the transactional mode if user provided the transactional id
57
- client.init_transactions if kafka_config.key?(:'transactional.id')
68
+ client.init_transactions if kafka_config.key?(:"transactional.id")
58
69
 
59
70
  client
60
71
  end
@@ -9,18 +9,18 @@ module WaterDrop
9
9
 
10
10
  # Defaults for kafka settings, that will be overwritten only if not present already
11
11
  KAFKA_DEFAULTS = {
12
- 'client.id': 'waterdrop',
12
+ "client.id": "waterdrop",
13
13
  # emit librdkafka statistics every five seconds. This is used in instrumentation.
14
14
  # When disabled, part of metrics will not be published and available.
15
- 'statistics.interval.ms': 5_000,
15
+ "statistics.interval.ms": 5_000,
16
16
  # We set it to a value that is lower than `max_wait_timeout` to have a final verdict upon
17
17
  # sync delivery
18
- 'message.timeout.ms': 50_000,
18
+ "message.timeout.ms": 50_000,
19
19
  # Must be more or equal to `message.timeout.ms` defaults
20
- 'transaction.timeout.ms': 55_000,
20
+ "transaction.timeout.ms": 55_000,
21
21
  # Lowers latency. Default in newer librdkafka but we want to make sure it is shipped to
22
22
  # users despite what librdkafka they run on
23
- 'socket.nagle.disable': true
23
+ "socket.nagle.disable": true
24
24
  }.freeze
25
25
 
26
26
  private_constant :KAFKA_DEFAULTS
@@ -140,6 +140,33 @@ module WaterDrop
140
140
  setting :token_provider_listener, default: false
141
141
  end
142
142
 
143
+ # Namespace for polling configuration
144
+ setting :polling do
145
+ # option [Symbol] Polling mode for handling producer callbacks
146
+ # :thread - uses librdkafka's native background polling threads (default)
147
+ # :fd - uses a single global Ruby thread with IO.select-based multiplexing
148
+ setting :mode, default: :thread
149
+
150
+ # option [WaterDrop::Polling::Poller, nil] Custom poller instance for isolation
151
+ # When nil (default), uses the global singleton poller shared by all FD-mode producers
152
+ # When set to a Poller instance, uses that dedicated poller for this producer only
153
+ # This allows isolating producers from each other's callback processing
154
+ # Only valid when mode is :fd
155
+ setting :poller, default: nil
156
+
157
+ # Namespace for FD-based polling configuration (only used when mode is :fd)
158
+ setting :fd do
159
+ # option [Integer] Max milliseconds to poll a single producer before rotating to the next
160
+ # This prevents any single producer from monopolizing the polling thread
161
+ setting :max_time, default: 100
162
+
163
+ # option [Integer] Interval in milliseconds for periodic polling of idle producers
164
+ # When one producer is busy, other producers still need to be polled periodically
165
+ # to ensure OAuth token refresh and statistics callbacks fire on time
166
+ setting :periodic_poll_interval, default: 1_000
167
+ end
168
+ end
169
+
143
170
  # Configuration method
144
171
  # @yield Runs a block of code providing a config singleton instance to it
145
172
  # @yieldparam [WaterDrop::Config] WaterDrop config instance
@@ -42,6 +42,8 @@ module WaterDrop
42
42
 
43
43
  class << self
44
44
  # Global connection pool instance
45
+ # @!attribute [rw] default_pool
46
+ # @return [ConnectionPool, nil] the global connection pool instance
45
47
  attr_accessor :default_pool
46
48
 
47
49
  # Sets up a global connection pool
@@ -73,7 +75,7 @@ module WaterDrop
73
75
 
74
76
  # Emit global event for pool setup
75
77
  WaterDrop.instrumentation.instrument(
76
- 'connection_pool.setup',
78
+ "connection_pool.setup",
77
79
  pool: @default_pool,
78
80
  size: size,
79
81
  timeout: timeout
@@ -93,7 +95,7 @@ module WaterDrop
93
95
  # producer.produce_sync(topic: 'events', payload: 'data')
94
96
  # end
95
97
  def with(...)
96
- raise 'No global connection pool configured. Call setup first.' unless @default_pool
98
+ raise "No global connection pool configured. Call setup first." unless @default_pool
97
99
 
98
100
  @default_pool.with(...)
99
101
  end
@@ -120,7 +122,7 @@ module WaterDrop
120
122
 
121
123
  # Emit global event for pool shutdown
122
124
  WaterDrop.instrumentation.instrument(
123
- 'connection_pool.shutdown',
125
+ "connection_pool.shutdown",
124
126
  pool: pool
125
127
  )
126
128
  end
@@ -128,7 +130,7 @@ module WaterDrop
128
130
  # Alias for shutdown to align with producer API
129
131
  # WaterDrop producers use #close, so we alias connection pool #shutdown to #close
130
132
  # for API consistency across both individual producers and connection pools
131
- alias close shutdown
133
+ alias_method :close, :shutdown
132
134
 
133
135
  # Reload the global connection pool
134
136
  def reload
@@ -138,7 +140,7 @@ module WaterDrop
138
140
 
139
141
  # Emit global event for pool reload
140
142
  WaterDrop.instrumentation.instrument(
141
- 'connection_pool.reload',
143
+ "connection_pool.reload",
142
144
  pool: @default_pool
143
145
  )
144
146
  end
@@ -163,7 +165,7 @@ module WaterDrop
163
165
  # producer.produce(topic: 'events', payload: 'data2')
164
166
  # end
165
167
  def transaction(...)
166
- raise 'No global connection pool configured. Call setup first.' unless @default_pool
168
+ raise "No global connection pool configured. Call setup first." unless @default_pool
167
169
 
168
170
  @default_pool.transaction(...)
169
171
  end
@@ -175,7 +177,7 @@ module WaterDrop
175
177
  def ensure_connection_pool_gem!
176
178
  return if defined?(::ConnectionPool)
177
179
 
178
- require 'connection_pool'
180
+ require "connection_pool"
179
181
  rescue LoadError
180
182
  raise LoadError, <<~ERROR
181
183
  WaterDrop::ConnectionPool requires the 'connection_pool' gem.
@@ -217,7 +219,7 @@ module WaterDrop
217
219
 
218
220
  # Emit event when a connection pool is created
219
221
  WaterDrop.instrumentation.instrument(
220
- 'connection_pool.created',
222
+ "connection_pool.created",
221
223
  pool: self,
222
224
  size: size,
223
225
  timeout: timeout
@@ -242,7 +244,7 @@ module WaterDrop
242
244
 
243
245
  # Emit event after pool is shut down
244
246
  WaterDrop.instrumentation.instrument(
245
- 'connection_pool.shutdown',
247
+ "connection_pool.shutdown",
246
248
  pool: self
247
249
  )
248
250
  end
@@ -250,7 +252,7 @@ module WaterDrop
250
252
  # Alias for shutdown to align with producer API
251
253
  # WaterDrop producers use #close, so we alias connection pool #shutdown to #close
252
254
  # for API consistency across both individual producers and connection pools
253
- alias close shutdown
255
+ alias_method :close, :shutdown
254
256
 
255
257
  # Reload all connections in the pool
256
258
  # Useful for configuration changes or error recovery
@@ -261,7 +263,7 @@ module WaterDrop
261
263
 
262
264
  # Emit event after pool is reloaded
263
265
  WaterDrop.instrumentation.instrument(
264
- 'connection_pool.reloaded',
266
+ "connection_pool.reloaded",
265
267
  pool: self
266
268
  )
267
269
  end
@@ -6,8 +6,8 @@ module WaterDrop
6
6
  class Config < ::Karafka::Core::Contractable::Contract
7
7
  configure do |config|
8
8
  config.error_messages = YAML.safe_load_file(
9
- File.join(WaterDrop.gem_root, 'config', 'locales', 'errors.yml')
10
- ).fetch('en').fetch('validations').fetch('config')
9
+ File.join(WaterDrop.gem_root, "config", "locales", "errors.yml")
10
+ ).fetch("en").fetch("validations").fetch("config")
11
11
  end
12
12
 
13
13
  required(:id) { |val| val.is_a?(String) && !val.empty? }
@@ -43,16 +43,40 @@ module WaterDrop
43
43
  end
44
44
  end
45
45
 
46
+ nested(:polling) do
47
+ required(:mode) { |val| %i[thread fd].include?(val) }
48
+ required(:poller) { |val| val.nil? || val.is_a?(Polling::Poller) }
49
+
50
+ nested(:fd) do
51
+ required(:max_time) { |val| val.is_a?(Integer) && val >= 1 }
52
+ required(:periodic_poll_interval) { |val| val.is_a?(Integer) && val >= 100 }
53
+ end
54
+ end
55
+
56
+ # Validate that poller is only set when mode is :fd
57
+ virtual do |config, errors|
58
+ next true unless errors.empty?
59
+
60
+ polling = config.fetch(:polling)
61
+ mode = polling.fetch(:mode)
62
+ poller = polling.fetch(:poller)
63
+
64
+ next true if poller.nil?
65
+ next true if mode == :fd
66
+
67
+ [[%i[polling poller], :poller_only_with_fd_mode]]
68
+ end
69
+
46
70
  # rdkafka allows both symbols and strings as keys for config but then casts them to strings
47
71
  # This can be confusing, so we expect all keys to be symbolized
48
72
  virtual do |config, errors|
49
73
  next true unless errors.empty?
50
74
 
51
75
  errors = config
52
- .fetch(:kafka)
53
- .keys
54
- .reject { |key| key.is_a?(Symbol) }
55
- .map { |key| [[:kafka, key], :kafka_key_must_be_a_symbol] }
76
+ .fetch(:kafka)
77
+ .keys
78
+ .reject { |key| key.is_a?(Symbol) }
79
+ .map { |key| [[:kafka, key], :kafka_key_must_be_a_symbol] }
56
80
 
57
81
  errors
58
82
  end
@@ -7,8 +7,8 @@ module WaterDrop
7
7
  class Message < ::Karafka::Core::Contractable::Contract
8
8
  configure do |config|
9
9
  config.error_messages = YAML.safe_load_file(
10
- File.join(WaterDrop.gem_root, 'config', 'locales', 'errors.yml')
11
- ).fetch('en').fetch('validations').fetch('message')
10
+ File.join(WaterDrop.gem_root, "config", "locales", "errors.yml")
11
+ ).fetch("en").fetch("validations").fetch("message")
12
12
  end
13
13
 
14
14
  # Regex to check that topic has a valid format
@@ -0,0 +1,26 @@
1
+ # frozen_string_literal: true
2
+
3
+ module WaterDrop
4
+ module Contracts
5
+ # Contract with validation rules for WaterDrop Poller configuration
6
+ class PollerConfig < ::Karafka::Core::Contractable::Contract
7
+ configure do |config|
8
+ config.error_messages = YAML.safe_load_file(
9
+ File.join(WaterDrop.gem_root, "config", "locales", "errors.yml")
10
+ ).fetch("en").fetch("validations").fetch("poller")
11
+ end
12
+
13
+ required(:thread_priority) { |val| val.is_a?(Integer) && val >= -3 && val <= 3 }
14
+ required(:poll_timeout) { |val| val.is_a?(Integer) && val >= 1 }
15
+ required(:backoff_min) { |val| val.is_a?(Integer) && val >= 1 }
16
+ required(:backoff_max) { |val| val.is_a?(Integer) && val >= 1 }
17
+
18
+ virtual do |config, errors|
19
+ next true unless errors.empty?
20
+ next true if config[:backoff_max] >= config[:backoff_min]
21
+
22
+ [[%i[backoff_max], :backoff_max_must_be_gte_backoff_min]]
23
+ end
24
+ end
25
+ end
26
+ end
@@ -7,8 +7,8 @@ module WaterDrop
7
7
  class TransactionalOffset < ::Karafka::Core::Contractable::Contract
8
8
  configure do |config|
9
9
  config.error_messages = YAML.safe_load_file(
10
- File.join(WaterDrop.gem_root, 'config', 'locales', 'errors.yml')
11
- ).fetch('en').fetch('validations').fetch('transactional_offset')
10
+ File.join(WaterDrop.gem_root, "config", "locales", "errors.yml")
11
+ ).fetch("en").fetch("validations").fetch("transactional_offset")
12
12
  end
13
13
 
14
14
  required(:consumer) { |val| val.respond_to?(:consumer_group_metadata_pointer) }
@@ -28,8 +28,8 @@ module WaterDrop
28
28
 
29
29
  configure do |config|
30
30
  config.error_messages = YAML.safe_load_file(
31
- File.join(WaterDrop.gem_root, 'config', 'locales', 'errors.yml')
32
- ).fetch('en').fetch('validations').fetch('variant')
31
+ File.join(WaterDrop.gem_root, "config", "locales", "errors.yml")
32
+ ).fetch("en").fetch("validations").fetch("variant")
33
33
  end
34
34
 
35
35
  required(:default) { |val| BOOLEANS.include?(val) }
@@ -40,10 +40,10 @@ module WaterDrop
40
40
  next true unless errors.empty?
41
41
 
42
42
  errors = config
43
- .fetch(:topic_config)
44
- .keys
45
- .reject { |key| key.is_a?(Symbol) }
46
- .map { |key| [[:kafka, key], :kafka_key_must_be_a_symbol] }
43
+ .fetch(:topic_config)
44
+ .keys
45
+ .reject { |key| key.is_a?(Symbol) }
46
+ .map { |key| [[:kafka, key], :kafka_key_must_be_a_symbol] }
47
47
 
48
48
  errors
49
49
  end
@@ -53,10 +53,10 @@ module WaterDrop
53
53
  next true unless errors.empty?
54
54
 
55
55
  errors = config
56
- .fetch(:topic_config)
57
- .keys
58
- .reject { |key| TOPIC_CONFIG_KEYS.include?(key) }
59
- .map { |key| [[:kafka, key], :kafka_key_not_per_topic] }
56
+ .fetch(:topic_config)
57
+ .keys
58
+ .reject { |key| TOPIC_CONFIG_KEYS.include?(key) }
59
+ .map { |key| [[:kafka, key], :kafka_key_not_per_topic] }
60
60
 
61
61
  errors
62
62
  end
@@ -70,10 +70,10 @@ module WaterDrop
70
70
  next true unless config.fetch(:transactional)
71
71
 
72
72
  errors = config
73
- .fetch(:topic_config)
74
- .keys
75
- .select { |key| key.to_s.include?('acks') }
76
- .map { |key| [[:kafka, key], :kafka_key_acks_not_changeable] }
73
+ .fetch(:topic_config)
74
+ .keys
75
+ .select { |key| key.to_s.include?("acks") }
76
+ .map { |key| [[:kafka, key], :kafka_key_acks_not_changeable] }
77
77
 
78
78
  errors
79
79
  end
@@ -85,10 +85,10 @@ module WaterDrop
85
85
  next true unless config.fetch(:idempotent)
86
86
 
87
87
  errors = config
88
- .fetch(:topic_config)
89
- .keys
90
- .select { |key| key.to_s.include?('acks') }
91
- .map { |key| [[:kafka, key], :kafka_key_acks_not_changeable] }
88
+ .fetch(:topic_config)
89
+ .keys
90
+ .select { |key| key.to_s.include?("acks") }
91
+ .map { |key| [[:kafka, key], :kafka_key_acks_not_changeable] }
92
92
 
93
93
  errors
94
94
  end