logstash-integration-kafka 11.8.1-java → 12.0.0-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +6 -6
  3. data/docs/index.asciidoc +1 -1
  4. data/docs/input-kafka.asciidoc +18 -2
  5. data/docs/output-kafka.asciidoc +4 -8
  6. data/lib/logstash/inputs/kafka.rb +29 -1
  7. data/lib/logstash/outputs/kafka.rb +9 -24
  8. data/lib/logstash-integration-kafka_jars.rb +7 -6
  9. data/logstash-integration-kafka.gemspec +3 -3
  10. data/spec/fixtures/jaas.config +1 -1
  11. data/spec/fixtures/jaas3.config +5 -0
  12. data/spec/integration/inputs/kafka_spec.rb +80 -3
  13. data/spec/integration/outputs/kafka_spec.rb +4 -9
  14. data/vendor/jar-dependencies/com/fasterxml/jackson/datatype/jackson-datatype-jdk8/2.16.0/jackson-datatype-jdk8-2.16.0.jar +0 -0
  15. data/vendor/jar-dependencies/com/github/luben/zstd-jni/{1.5.6-8/zstd-jni-1.5.6-8.jar → 1.5.6-10/zstd-jni-1.5.6-10.jar} +0 -0
  16. data/vendor/jar-dependencies/io/confluent/kafka-avro-serializer/8.0.0/kafka-avro-serializer-8.0.0.jar +0 -0
  17. data/vendor/jar-dependencies/io/confluent/kafka-schema-registry-client/8.0.0/kafka-schema-registry-client-8.0.0.jar +0 -0
  18. data/vendor/jar-dependencies/io/confluent/kafka-schema-serializer/8.0.0/kafka-schema-serializer-8.0.0.jar +0 -0
  19. data/vendor/jar-dependencies/org/apache/kafka/kafka-clients/{3.9.1/kafka-clients-3.9.1.jar → 4.1.0/kafka-clients-4.1.0.jar} +0 -0
  20. data/vendor/jar-dependencies/org/lz4/lz4-java/1.8.0/lz4-java-1.8.0.jar +0 -0
  21. metadata +26 -26
  22. data/vendor/jar-dependencies/at/yawk/lz4/lz4-java/1.10.1/lz4-java-1.10.1.jar +0 -0
  23. data/vendor/jar-dependencies/io/confluent/kafka-avro-serializer/7.9.1/kafka-avro-serializer-7.9.1.jar +0 -0
  24. data/vendor/jar-dependencies/io/confluent/kafka-schema-registry-client/7.9.1/kafka-schema-registry-client-7.9.1.jar +0 -0
  25. data/vendor/jar-dependencies/io/confluent/kafka-schema-serializer/7.9.1/kafka-schema-serializer-7.9.1.jar +0 -0
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 0f76e22baa3477a41ca18a430659a8e96be6c1d837a380295d897893827c3bcc
4
- data.tar.gz: 1b26a8ada2992d1620889126d0b3964095eabe78b9a1f5673778668609de0222
3
+ metadata.gz: 744abb784463992c54a691cf16cbab0eca25f9b5cc73a54d9808f15bca223021
4
+ data.tar.gz: d35f68e589ee17adb865b7edcb1c9627b1345e9de93c5037828c15e8e83defdb
5
5
  SHA512:
6
- metadata.gz: 29ee55d384ec752656233860676bc6d01a38f5b3390dc119966133c6fca58d9e27ddc60a478fa0786b339e027cbbed520f74d660c0b03611af8781ef27284790
7
- data.tar.gz: 0a939609e55eedeb7aace1c228919451f8a446ec51fe266dc5cafe3ff5cbde85d81f5732feafe2c357c6dc265d023fb98d04a17562bb9676a881b480b00e8890
6
+ metadata.gz: b85e870af8ae8dc030320ecd0c9ad312d72cceacb64b2e0ac313ca3629b3a1c24a3029f1512485b79ccd68e272eccddfee1ac32a314dc562af7cc1762b82aa53
7
+ data.tar.gz: 89fe6266982b250eb000650c91d8fdfc653cfc02d9cdf570c743edd5d7290ffd1735b0fbc3b68b018fb65215b67a909cdc5bb1dfb05e025256b35de0cca6b1af
data/CHANGELOG.md CHANGED
@@ -1,9 +1,9 @@
1
- ## 11.8.1
2
- - Upgrade lz4 dependency [#213](https://github.com/logstash-plugins/logstash-integration-kafka/pull/213)
3
-
4
- ## 11.8.0
5
- - Deprecate partitioner `default` and `uniform_sticky` options [#206](https://github.com/logstash-plugins/logstash-integration-kafka/pull/206)
6
- Both options are deprecated in Kafka client 3 and will be removed in the plugin 12.0.0.
1
+ ## 12.0.0
2
+ - Update kafka client to 4.1.0 and transitive dependencies [#205](https://github.com/logstash-plugins/logstash-integration-kafka/pull/205)
3
+ - Breaking Change: partitioner options `default` and `uniform_sticky` are removed
4
+ - `linger_ms` default value changed from 0 to 5
5
+ - Add `group_protocols` options for configuring Kafka consumer rebalance protocol
6
+ - Setting `group_protocols => consumer` opts in to the new consumer group protocol
7
7
 
8
8
  ## 11.7.0
9
9
  - Add `reconnect_backoff_max_ms` option for configuring kafka client [#204](https://github.com/logstash-plugins/logstash-integration-kafka/pull/204)
data/docs/index.asciidoc CHANGED
@@ -1,7 +1,7 @@
1
1
  :plugin: kafka
2
2
  :type: integration
3
3
  :no_codec:
4
- :kafka_client: 3.9.1
4
+ :kafka_client: 4.1.0
5
5
 
6
6
  ///////////////////////////////////////////
7
7
  START - GENERATED VARIABLES, DO NOT EDIT!
@@ -2,8 +2,8 @@
2
2
  :plugin: kafka
3
3
  :type: input
4
4
  :default_codec: plain
5
- :kafka_client: 3.9.1
6
- :kafka_client_doc: 39
5
+ :kafka_client: 4.1.0
6
+ :kafka_client_doc: 41
7
7
 
8
8
  ///////////////////////////////////////////
9
9
  START - GENERATED VARIABLES, DO NOT EDIT!
@@ -132,6 +132,7 @@ See the https://kafka.apache.org/{kafka_client_doc}/documentation for more detai
132
132
  | <<plugins-{type}s-{plugin}-fetch_min_bytes>> |<<number,number>>|No
133
133
  | <<plugins-{type}s-{plugin}-group_id>> |<<string,string>>|No
134
134
  | <<plugins-{type}s-{plugin}-group_instance_id>> |<<string,string>>|No
135
+ | <<plugins-{type}s-{plugin}-group_protocol>> |<<string,string>>|No
135
136
  | <<plugins-{type}s-{plugin}-heartbeat_interval_ms>> |<<number,number>>|No
136
137
  | <<plugins-{type}s-{plugin}-isolation_level>> |<<string,string>>|No
137
138
  | <<plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No
@@ -412,6 +413,21 @@ You can set this value to use information such as a hostname, an IP, or anything
412
413
  NOTE: In cases when multiple threads are configured and `consumer_threads` is greater than one, a suffix is appended to
413
414
  the `group_instance_id` to avoid collisions.
414
415
 
416
+ [id="plugins-{type}s-{plugin}-group_protocol"]
417
+ ===== `group_protocol`
418
+
419
+ * Value can be either of: `classic`, `consumer`
420
+ * Default value is `classic`.
421
+
422
+ Specifies the consumer group rebalance protocol used by the Kafka client.
423
+
424
+ `classic` is the default protocol. During a rebalance, all consumer instances pause message processing until partition assignments are complete.
425
+
426
+ `consumer` is an incremental rebalance protocol introduced in Kafka 4. It avoids global synchronization barriers by only pausing partitions that are reassigned.
427
+ When using `consumer`, the following settings **cannot be configured**:
428
+ `partition_assignment_strategy`, `heartbeat_interval_ms`, and `session_timeout_ms`.
429
+
430
+
415
431
  [id="plugins-{type}s-{plugin}-heartbeat_interval_ms"]
416
432
  ===== `heartbeat_interval_ms`
417
433
 
@@ -2,8 +2,8 @@
2
2
  :plugin: kafka
3
3
  :type: output
4
4
  :default_codec: plain
5
- :kafka_client: 3.9.1
6
- :kafka_client_doc: 39
5
+ :kafka_client: 4.1.0
6
+ :kafka_client_doc: 41
7
7
 
8
8
  ///////////////////////////////////////////
9
9
  START - GENERATED VARIABLES, DO NOT EDIT!
@@ -285,7 +285,7 @@ Serializer class for the key of the message
285
285
  ===== `linger_ms`
286
286
 
287
287
  * Value type is <<number,number>>
288
- * Default value is `0`
288
+ * Default value is `5`
289
289
 
290
290
  The producer groups together any records that arrive in between request
291
291
  transmissions into a single batched request. Normally this occurs only under
@@ -351,13 +351,9 @@ The max time in milliseconds before a metadata refresh is forced.
351
351
 
352
352
  By not setting this value, the plugin uses the built-in partitioning strategy provided by the Kafka client. Read more about the "partitioner.class" on the Kafka documentation.
353
353
 
354
- Available options are as follows:
354
+ Available option is as follows:
355
355
 
356
- * `default` hashes the `message_key` of an event to get the partition. When no message key is present, the plugin picks a partition in a round-robin fashion. Please note that this is a different strategy than the one used when `partitioner` is left unset.
357
356
  * `round_robin` distributes writes to all partitions equally, regardless of `message_key`
358
- * `uniform_sticky` hashes the `message_key` of an event to get the partition. When no message key is present, the plugin sticks to a partition for the duration of a batch than randomly picks a new one.
359
-
360
- NOTE: `default` and `uniform_sticky` are deprecated and will be removed in `12.0.0`.
361
357
 
362
358
  [id="plugins-{type}s-{plugin}-receive_buffer_bytes"]
363
359
  ===== `receive_buffer_bytes`
@@ -129,6 +129,12 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
129
129
  # consumer crated by each thread an artificial suffix is appended to the user provided `group_instance_id`
130
130
  # to avoid clashing.
131
131
  config :group_instance_id, :validate => :string
132
+ # `classic` is the "stop-the-world" rebalances.
133
+ # Any consumer restart or failure triggers a full-group rebalance, pausing processing for all consumers.
134
+ # `consumer` is an incremental rebalance protocol that avoids global sync barriers,
135
+ # pausing only the partitions that are reassigned.
136
+ # It cannot set along with `partition_assignment_strategy`, `heartbeat_interval_ms` and `session_timeout_ms`
137
+ config :group_protocol, :validate => ["classic", "consumer"], :default => "classic"
132
138
  # The expected time between heartbeats to the consumer coordinator. Heartbeats are used to ensure
133
139
  # that the consumer's session stays active and to facilitate rebalancing when new
134
140
  # consumers join or leave the group. The value must be set lower than
@@ -293,6 +299,8 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
293
299
  reassign_dns_lookup
294
300
  @pattern ||= java.util.regex.Pattern.compile(@topics_pattern) unless @topics_pattern.nil?
295
301
  check_schema_registry_parameters
302
+
303
+ set_group_protocol!
296
304
  end
297
305
 
298
306
  METADATA_NONE = Set[].freeze
@@ -450,6 +458,7 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
450
458
  props.put(kafka::FETCH_MIN_BYTES_CONFIG, fetch_min_bytes.to_s) unless fetch_min_bytes.nil?
451
459
  props.put(kafka::GROUP_ID_CONFIG, group_id)
452
460
  props.put(kafka::GROUP_INSTANCE_ID_CONFIG, group_instance_id) unless group_instance_id.nil?
461
+ props.put(kafka::GROUP_PROTOCOL_CONFIG, group_protocol)
453
462
  props.put(kafka::HEARTBEAT_INTERVAL_MS_CONFIG, heartbeat_interval_ms.to_s) unless heartbeat_interval_ms.nil?
454
463
  props.put(kafka::ISOLATION_LEVEL_CONFIG, isolation_level)
455
464
  props.put(kafka::KEY_DESERIALIZER_CLASS_CONFIG, key_deserializer_class)
@@ -471,7 +480,7 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
471
480
  props.put("security.protocol", security_protocol) unless security_protocol.nil?
472
481
  if schema_registry_url
473
482
  props.put(kafka::VALUE_DESERIALIZER_CLASS_CONFIG, Java::io.confluent.kafka.serializers.KafkaAvroDeserializer.java_class)
474
- serdes_config = Java::io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig
483
+ serdes_config = Java::io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig
475
484
  props.put(serdes_config::SCHEMA_REGISTRY_URL_CONFIG, schema_registry_url.uri.to_s)
476
485
  if schema_registry_proxy && !schema_registry_proxy.empty?
477
486
  props.put(serdes_config::PROXY_HOST, @schema_registry_proxy_host)
@@ -513,6 +522,25 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
513
522
  end
514
523
  end
515
524
 
525
+ # In order to use group_protocol => consumer, heartbeat_interval_ms, session_timeout_ms and partition_assignment_strategy need to be unset
526
+ # If any of these are not using the default value of the plugin, we raise a configuration error
527
+ def set_group_protocol!
528
+ return unless group_protocol == "consumer"
529
+
530
+ heartbeat_overridden = heartbeat_interval_ms != self.class.get_config.dig("heartbeat_interval_ms", :default)
531
+ session_overridden = session_timeout_ms != self.class.get_config.dig("session_timeout_ms", :default)
532
+ strategy_defined = !partition_assignment_strategy.nil?
533
+
534
+ if strategy_defined || heartbeat_overridden || session_overridden
535
+ raise LogStash::ConfigurationError, "group_protocol cannot be set to 'consumer' "\
536
+ "when any of partition_assignment_strategy, heartbeat_interval_ms or session_timeout_ms is set"
537
+ end
538
+
539
+ @heartbeat_interval_ms = nil
540
+ @session_timeout_ms = nil
541
+ logger.debug("Settings 'heartbeat_interval_ms' and 'session_timeout_ms' have been reset since 'group_protocol' is configured as 'consumer'")
542
+ end
543
+
516
544
  def partition_assignment_strategy_class
517
545
  case partition_assignment_strategy
518
546
  when 'range'
@@ -101,7 +101,7 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
101
101
  # This setting accomplishes this by adding a small amount of artificial delay—that is,
102
102
  # rather than immediately sending out a record the producer will wait for up to the given delay
103
103
  # to allow other records to be sent so that the sends can be batched together.
104
- config :linger_ms, :validate => :number, :default => 0 # Kafka default
104
+ config :linger_ms, :validate => :number, :default => 5 # Kafka default
105
105
  # The maximum size of a request
106
106
  config :max_request_size, :validate => :number, :default => 1_048_576 # (1MB) Kafka default
107
107
  # The key for the message
@@ -110,7 +110,7 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
110
110
  config :message_headers, :validate => :hash, :default => {}
111
111
  # the timeout setting for initial metadata request to fetch topic metadata.
112
112
  config :metadata_fetch_timeout_ms, :validate => :number, :default => 60_000
113
- # Partitioner to use - can be `default`, `uniform_sticky`, `round_robin` or a fully qualified class name of a custom partitioner.
113
+ # Partitioner to use - can be `round_robin` or a fully qualified class name of a custom partitioner.
114
114
  config :partitioner, :validate => :string
115
115
  # The size of the TCP receive buffer to use when reading data
116
116
  config :receive_buffer_bytes, :validate => :number, :default => 32_768 # (32KB) Kafka default
@@ -369,8 +369,8 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
369
369
  props.put(kafka::LINGER_MS_CONFIG, linger_ms.to_s)
370
370
  props.put(kafka::MAX_REQUEST_SIZE_CONFIG, max_request_size.to_s)
371
371
  props.put(kafka::METADATA_MAX_AGE_CONFIG, metadata_max_age_ms.to_s) unless metadata_max_age_ms.nil?
372
- unless partitioner.nil?
373
- props.put(kafka::PARTITIONER_CLASS_CONFIG, partitioner = partitioner_class)
372
+ partitioner_class&.tap do |partitioner|
373
+ props.put(kafka::PARTITIONER_CLASS_CONFIG, partitioner)
374
374
  logger.debug('producer configured using partitioner', :partitioner_class => partitioner)
375
375
  end
376
376
  props.put(kafka::RECEIVE_BUFFER_CONFIG, receive_buffer_bytes.to_s) unless receive_buffer_bytes.nil?
@@ -405,27 +405,12 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
405
405
  end
406
406
 
407
407
  def partitioner_class
408
- case partitioner
409
- when 'round_robin'
410
- 'org.apache.kafka.clients.producer.RoundRobinPartitioner'
411
- when 'uniform_sticky'
412
- log_partitioner_warning(partitioner, 'UniformStickyPartitioner')
413
- 'org.apache.kafka.clients.producer.UniformStickyPartitioner'
414
- when 'default'
415
- log_partitioner_warning(partitioner, 'DefaultPartitioner')
416
- 'org.apache.kafka.clients.producer.internals.DefaultPartitioner'
417
- else
418
- unless partitioner.index('.')
419
- raise LogStash::ConfigurationError, "unsupported partitioner: #{partitioner.inspect}"
420
- end
421
- partitioner # assume a fully qualified class-name
422
- end
423
- end
408
+ return nil if partitioner.nil?
409
+ return 'org.apache.kafka.clients.producer.RoundRobinPartitioner' if partitioner == 'round_robin'
410
+
411
+ raise LogStash::ConfigurationError, "unsupported partitioner: #{partitioner.inspect}" unless partitioner.include?('.')
424
412
 
425
- def log_partitioner_warning(partitioner, class_name)
426
- deprecation_logger.deprecated("Producer `partitioner` is configured with the deprecated option `#{partitioner}`. " \
427
- "#{class_name} is removed in kafka-client 4.0 and the `#{partitioner}` option will be removed in the plugin 12.0.0. "\
428
- 'Please update your configuration to use `round_robin` or unset the option to use the build-in partitioning strategy. ')
413
+ partitioner
429
414
  end
430
415
 
431
416
  end #class LogStash::Outputs::Kafka
@@ -1,12 +1,13 @@
1
1
  # AUTOGENERATED BY THE GRADLE SCRIPT. DO NOT EDIT.
2
2
 
3
3
  require 'jar_dependencies'
4
- require_jar('io.confluent', 'kafka-avro-serializer', '7.9.1')
5
- require_jar('io.confluent', 'kafka-schema-serializer', '7.9.1')
4
+ require_jar('io.confluent', 'kafka-avro-serializer', '8.0.0')
5
+ require_jar('io.confluent', 'kafka-schema-serializer', '8.0.0')
6
6
  require_jar('org.apache.avro', 'avro', '1.11.4')
7
- require_jar('io.confluent', 'kafka-schema-registry-client', '7.9.1')
8
- require_jar('org.apache.kafka', 'kafka-clients', '3.9.1')
7
+ require_jar('io.confluent', 'kafka-schema-registry-client', '8.0.0')
8
+ require_jar('com.fasterxml.jackson.datatype', 'jackson-datatype-jdk8', '2.16.0')
9
+ require_jar('org.apache.kafka', 'kafka-clients', '4.1.0')
9
10
  require_jar('org.slf4j', 'slf4j-api', '1.7.36')
10
- require_jar('com.github.luben', 'zstd-jni', '1.5.6-8')
11
- require_jar('at.yawk.lz4', 'lz4-java', '1.10.1')
11
+ require_jar('com.github.luben', 'zstd-jni', '1.5.6-10')
12
+ require_jar('org.lz4', 'lz4-java', '1.8.0')
12
13
  require_jar('org.xerial.snappy', 'snappy-java', '1.1.10.7')
@@ -1,14 +1,14 @@
1
1
  Gem::Specification.new do |s|
2
2
  s.name = 'logstash-integration-kafka'
3
- s.version = '11.8.1'
3
+ s.version = '12.0.0'
4
4
  s.licenses = ['Apache-2.0']
5
5
  s.summary = "Integration with Kafka - input and output plugins"
6
6
  s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline "+
7
7
  "using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program."
8
8
  s.authors = ["Elastic"]
9
9
  s.email = 'info@elastic.co'
10
- s.homepage = "https://www.elastic.co/logstash"
11
- s.require_paths = %w[lib vendor/jar-dependencies]
10
+ s.homepage = "http://www.elastic.co/guide/en/logstash/current/index.html"
11
+ s.require_paths = ['lib', 'vendor/jar-dependencies']
12
12
 
13
13
  # Files
14
14
  s.files = Dir.glob(%w(
@@ -1,5 +1,5 @@
1
1
  SchemaRegistry-Props {
2
- org.eclipse.jetty.jaas.spi.PropertyFileLoginModule required
2
+ org.eclipse.jetty.security.jaas.spi.PropertyFileLoginModule required
3
3
  file="build/confluent_platform/etc/schema-registry/pwd"
4
4
  debug="true";
5
5
  };
@@ -0,0 +1,5 @@
1
+ SchemaRegistry-Props {
2
+ org.eclipse.jetty.jaas.spi.PropertyFileLoginModule required
3
+ file="build/confluent_platform/etc/schema-registry/pwd"
4
+ debug="true";
5
+ };
@@ -187,6 +187,83 @@ describe "inputs/kafka", :integration => true do
187
187
  end
188
188
  end
189
189
 
190
+ context 'setting group_protocol' do
191
+ let(:test_topic) { 'logstash_integration_partitioner_topic' }
192
+ let(:consumer_config) do
193
+ plain_config.merge(
194
+ "topics" => [test_topic],
195
+ 'group_protocol' => group_protocol,
196
+ "partition_assignment_strategy" => partition_assignment_strategy,
197
+ "heartbeat_interval_ms" => heartbeat_interval_ms,
198
+ "session_timeout_ms" => session_timeout_ms
199
+ )
200
+ end
201
+ let(:group_protocol) { nil }
202
+ let(:partition_assignment_strategy) { nil }
203
+ let(:heartbeat_interval_ms) { LogStash::Inputs::Kafka.get_config().dig("heartbeat_interval_ms", :default) }
204
+ let(:session_timeout_ms) { LogStash::Inputs::Kafka.get_config().dig("session_timeout_ms", :default) }
205
+
206
+ describe "group_protocol = classic" do
207
+ let(:group_protocol) { 'classic' }
208
+
209
+ it 'passes register check' do
210
+ kafka_input = LogStash::Inputs::Kafka.new(consumer_config)
211
+ expect {
212
+ kafka_input.register
213
+ }.to_not raise_error
214
+
215
+ expect( kafka_input.instance_variable_get(:@heartbeat_interval_ms)).eql?(heartbeat_interval_ms)
216
+ expect( kafka_input.instance_variable_get(:@session_timeout_ms)).eql?(session_timeout_ms)
217
+ end
218
+ end
219
+
220
+ describe "group_protocol = consumer" do
221
+ let(:group_protocol) { 'consumer' }
222
+
223
+ describe "passes register check with supported config" do
224
+ it 'reset unsupported config to nil' do
225
+ kafka_input = LogStash::Inputs::Kafka.new(consumer_config)
226
+ expect {
227
+ kafka_input.register
228
+ }.to_not raise_error
229
+
230
+ expect( kafka_input.instance_variable_get(:@heartbeat_interval_ms)).to be_nil
231
+ expect( kafka_input.instance_variable_get(:@session_timeout_ms)).to be_nil
232
+ end
233
+ end
234
+
235
+ {
236
+ partition_assignment_strategy: 'range',
237
+ heartbeat_interval_ms: 2000,
238
+ session_timeout_ms: 6000
239
+ }.each do |config_key, config_value|
240
+ context "with unsupported config #{config_key}" do
241
+ let(config_key) { config_value }
242
+
243
+ it 'raises LogStash::ConfigurationError' do
244
+ kafka_input = LogStash::Inputs::Kafka.new(consumer_config)
245
+ expect {
246
+ kafka_input.register
247
+ }.to raise_error(LogStash::ConfigurationError, /group_protocol cannot be set to.*consumer.*/)
248
+ end
249
+ end
250
+ end
251
+
252
+ context "with valid config" do
253
+ let(:test_topic) { 'logstash_integration_topic_plain' }
254
+ let(:manual_commit_config) do
255
+ consumer_config.merge(
256
+ 'enable_auto_commit' => 'false'
257
+ )
258
+ end
259
+ it 'consume data' do
260
+ queue = consume_messages(manual_commit_config, timeout: timeout_seconds, event_count: num_events)
261
+ expect(queue.length).to eq(num_events)
262
+ end
263
+ end
264
+ end
265
+ end
266
+
190
267
  context "static membership 'group.instance.id' setting" do
191
268
  let(:base_config) do
192
269
  {
@@ -486,7 +563,7 @@ describe "Deserializing with the schema registry", :integration => true do
486
563
  def delete_topic_if_exists(topic_name, user = nil, password = nil)
487
564
  props = java.util.Properties.new
488
565
  props.put(Java::org.apache.kafka.clients.admin.AdminClientConfig::BOOTSTRAP_SERVERS_CONFIG, "localhost:9092")
489
- serdes_config = Java::io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig
566
+ serdes_config = Java::io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig
490
567
  unless user.nil?
491
568
  props.put(serdes_config::BASIC_AUTH_CREDENTIALS_SOURCE, 'USER_INFO')
492
569
  props.put(serdes_config::USER_INFO_CONFIG, "#{user}:#{password}")
@@ -495,7 +572,7 @@ describe "Deserializing with the schema registry", :integration => true do
495
572
  topics_list = admin_client.listTopics().names().get()
496
573
  if topics_list.contains(topic_name)
497
574
  result = admin_client.deleteTopics([topic_name])
498
- result.values.get(topic_name).get()
575
+ result.topicNameValues().get(topic_name).get()
499
576
  end
500
577
  end
501
578
 
@@ -503,7 +580,7 @@ describe "Deserializing with the schema registry", :integration => true do
503
580
  props = java.util.Properties.new
504
581
  config = org.apache.kafka.clients.producer.ProducerConfig
505
582
 
506
- serdes_config = Java::io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig
583
+ serdes_config = Java::io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig
507
584
  props.put(serdes_config::SCHEMA_REGISTRY_URL_CONFIG, "http://localhost:8081")
508
585
 
509
586
  props.put(config::BOOTSTRAP_SERVERS_CONFIG, "localhost:9092")
@@ -164,7 +164,7 @@ describe "outputs/kafka", :integration => true do
164
164
  let(:test_topic) { 'logstash_integration_topic3' }
165
165
 
166
166
  before :each do
167
- config = base_config.merge("topic_id" => test_topic, "partitioner" => 'org.apache.kafka.clients.producer.UniformStickyPartitioner')
167
+ config = base_config.merge("topic_id" => test_topic, "partitioner" => 'org.apache.kafka.clients.producer.RoundRobinPartitioner')
168
168
  load_kafka_data(config) do # let's have a bit more (diverse) dataset
169
169
  num_events.times.collect do
170
170
  LogStash::Event.new.tap do |e|
@@ -212,7 +212,7 @@ describe "outputs/kafka", :integration => true do
212
212
 
213
213
  context 'setting partitioner' do
214
214
  let(:test_topic) { 'logstash_integration_partitioner_topic' }
215
- let(:partitioner) { nil }
215
+ let(:partitioner) { 'round_robin' }
216
216
 
217
217
  before :each do
218
218
  @messages_offset = fetch_messages_from_all_partitions
@@ -221,13 +221,8 @@ describe "outputs/kafka", :integration => true do
221
221
  load_kafka_data(config)
222
222
  end
223
223
 
224
- [ 'default', 'round_robin', 'uniform_sticky' ].each do |partitioner|
225
- describe partitioner do
226
- let(:partitioner) { partitioner }
227
- it 'loads data' do
228
- expect(fetch_messages_from_all_partitions - @messages_offset).to eql num_events
229
- end
230
- end
224
+ it 'loads data' do
225
+ expect(fetch_messages_from_all_partitions - @messages_offset).to eql num_events
231
226
  end
232
227
 
233
228
  def fetch_messages_from_all_partitions
metadata CHANGED
@@ -1,16 +1,16 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-integration-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 11.8.1
4
+ version: 12.0.0
5
5
  platform: java
6
6
  authors:
7
7
  - Elastic
8
- autorequire:
9
8
  bindir: bin
10
9
  cert_chain: []
11
- date: 2025-12-22 00:00:00.000000000 Z
10
+ date: 2025-10-16 00:00:00.000000000 Z
12
11
  dependencies:
13
12
  - !ruby/object:Gem::Dependency
13
+ name: logstash-core-plugin-api
14
14
  requirement: !ruby/object:Gem::Requirement
15
15
  requirements:
16
16
  - - ">="
@@ -19,7 +19,6 @@ dependencies:
19
19
  - - "<="
20
20
  - !ruby/object:Gem::Version
21
21
  version: '2.99'
22
- name: logstash-core-plugin-api
23
22
  type: :runtime
24
23
  prerelease: false
25
24
  version_requirements: !ruby/object:Gem::Requirement
@@ -31,12 +30,12 @@ dependencies:
31
30
  - !ruby/object:Gem::Version
32
31
  version: '2.99'
33
32
  - !ruby/object:Gem::Dependency
33
+ name: logstash-core
34
34
  requirement: !ruby/object:Gem::Requirement
35
35
  requirements:
36
36
  - - ">="
37
37
  - !ruby/object:Gem::Version
38
38
  version: 8.3.0
39
- name: logstash-core
40
39
  type: :runtime
41
40
  prerelease: false
42
41
  version_requirements: !ruby/object:Gem::Requirement
@@ -45,12 +44,12 @@ dependencies:
45
44
  - !ruby/object:Gem::Version
46
45
  version: 8.3.0
47
46
  - !ruby/object:Gem::Dependency
47
+ name: logstash-codec-json
48
48
  requirement: !ruby/object:Gem::Requirement
49
49
  requirements:
50
50
  - - ">="
51
51
  - !ruby/object:Gem::Version
52
52
  version: '0'
53
- name: logstash-codec-json
54
53
  type: :runtime
55
54
  prerelease: false
56
55
  version_requirements: !ruby/object:Gem::Requirement
@@ -59,12 +58,12 @@ dependencies:
59
58
  - !ruby/object:Gem::Version
60
59
  version: '0'
61
60
  - !ruby/object:Gem::Dependency
61
+ name: logstash-codec-plain
62
62
  requirement: !ruby/object:Gem::Requirement
63
63
  requirements:
64
64
  - - ">="
65
65
  - !ruby/object:Gem::Version
66
66
  version: '0'
67
- name: logstash-codec-plain
68
67
  type: :runtime
69
68
  prerelease: false
70
69
  version_requirements: !ruby/object:Gem::Requirement
@@ -73,6 +72,7 @@ dependencies:
73
72
  - !ruby/object:Gem::Version
74
73
  version: '0'
75
74
  - !ruby/object:Gem::Dependency
75
+ name: stud
76
76
  requirement: !ruby/object:Gem::Requirement
77
77
  requirements:
78
78
  - - ">="
@@ -81,7 +81,6 @@ dependencies:
81
81
  - - "<"
82
82
  - !ruby/object:Gem::Version
83
83
  version: 0.1.0
84
- name: stud
85
84
  type: :runtime
86
85
  prerelease: false
87
86
  version_requirements: !ruby/object:Gem::Requirement
@@ -93,6 +92,7 @@ dependencies:
93
92
  - !ruby/object:Gem::Version
94
93
  version: 0.1.0
95
94
  - !ruby/object:Gem::Dependency
95
+ name: manticore
96
96
  requirement: !ruby/object:Gem::Requirement
97
97
  requirements:
98
98
  - - ">="
@@ -101,7 +101,6 @@ dependencies:
101
101
  - - "<"
102
102
  - !ruby/object:Gem::Version
103
103
  version: 1.0.0
104
- name: manticore
105
104
  type: :runtime
106
105
  prerelease: false
107
106
  version_requirements: !ruby/object:Gem::Requirement
@@ -113,12 +112,12 @@ dependencies:
113
112
  - !ruby/object:Gem::Version
114
113
  version: 1.0.0
115
114
  - !ruby/object:Gem::Dependency
115
+ name: logstash-mixin-deprecation_logger_support
116
116
  requirement: !ruby/object:Gem::Requirement
117
117
  requirements:
118
118
  - - "~>"
119
119
  - !ruby/object:Gem::Version
120
120
  version: '1.0'
121
- name: logstash-mixin-deprecation_logger_support
122
121
  type: :runtime
123
122
  prerelease: false
124
123
  version_requirements: !ruby/object:Gem::Requirement
@@ -127,12 +126,12 @@ dependencies:
127
126
  - !ruby/object:Gem::Version
128
127
  version: '1.0'
129
128
  - !ruby/object:Gem::Dependency
129
+ name: logstash-devutils
130
130
  requirement: !ruby/object:Gem::Requirement
131
131
  requirements:
132
132
  - - ">="
133
133
  - !ruby/object:Gem::Version
134
134
  version: '0'
135
- name: logstash-devutils
136
135
  type: :development
137
136
  prerelease: false
138
137
  version_requirements: !ruby/object:Gem::Requirement
@@ -141,12 +140,12 @@ dependencies:
141
140
  - !ruby/object:Gem::Version
142
141
  version: '0'
143
142
  - !ruby/object:Gem::Dependency
143
+ name: logstash-codec-line
144
144
  requirement: !ruby/object:Gem::Requirement
145
145
  requirements:
146
146
  - - ">="
147
147
  - !ruby/object:Gem::Version
148
148
  version: '0'
149
- name: logstash-codec-line
150
149
  type: :development
151
150
  prerelease: false
152
151
  version_requirements: !ruby/object:Gem::Requirement
@@ -155,12 +154,12 @@ dependencies:
155
154
  - !ruby/object:Gem::Version
156
155
  version: '0'
157
156
  - !ruby/object:Gem::Dependency
157
+ name: rspec-wait
158
158
  requirement: !ruby/object:Gem::Requirement
159
159
  requirements:
160
160
  - - ">="
161
161
  - !ruby/object:Gem::Version
162
162
  version: '0'
163
- name: rspec-wait
164
163
  type: :development
165
164
  prerelease: false
166
165
  version_requirements: !ruby/object:Gem::Requirement
@@ -169,12 +168,12 @@ dependencies:
169
168
  - !ruby/object:Gem::Version
170
169
  version: '0'
171
170
  - !ruby/object:Gem::Dependency
171
+ name: digest-crc
172
172
  requirement: !ruby/object:Gem::Requirement
173
173
  requirements:
174
174
  - - "~>"
175
175
  - !ruby/object:Gem::Version
176
176
  version: 0.5.1
177
- name: digest-crc
178
177
  type: :development
179
178
  prerelease: false
180
179
  version_requirements: !ruby/object:Gem::Requirement
@@ -183,12 +182,12 @@ dependencies:
183
182
  - !ruby/object:Gem::Version
184
183
  version: 0.5.1
185
184
  - !ruby/object:Gem::Dependency
185
+ name: ruby-kafka
186
186
  requirement: !ruby/object:Gem::Requirement
187
187
  requirements:
188
188
  - - ">="
189
189
  - !ruby/object:Gem::Version
190
190
  version: '0'
191
- name: ruby-kafka
192
191
  type: :development
193
192
  prerelease: false
194
193
  version_requirements: !ruby/object:Gem::Requirement
@@ -197,12 +196,12 @@ dependencies:
197
196
  - !ruby/object:Gem::Version
198
197
  version: '0'
199
198
  - !ruby/object:Gem::Dependency
199
+ name: snappy
200
200
  requirement: !ruby/object:Gem::Requirement
201
201
  requirements:
202
202
  - - ">="
203
203
  - !ruby/object:Gem::Version
204
204
  version: '0'
205
- name: snappy
206
205
  type: :development
207
206
  prerelease: false
208
207
  version_requirements: !ruby/object:Gem::Requirement
@@ -236,6 +235,7 @@ files:
236
235
  - logstash-integration-kafka.gemspec
237
236
  - spec/check_docs_spec.rb
238
237
  - spec/fixtures/jaas.config
238
+ - spec/fixtures/jaas3.config
239
239
  - spec/fixtures/pwd
240
240
  - spec/fixtures/trust-store_stub.jks
241
241
  - spec/integration/inputs/kafka_spec.rb
@@ -243,23 +243,23 @@ files:
243
243
  - spec/unit/inputs/avro_schema_fixture_payment.asvc
244
244
  - spec/unit/inputs/kafka_spec.rb
245
245
  - spec/unit/outputs/kafka_spec.rb
246
- - vendor/jar-dependencies/at/yawk/lz4/lz4-java/1.10.1/lz4-java-1.10.1.jar
247
- - vendor/jar-dependencies/com/github/luben/zstd-jni/1.5.6-8/zstd-jni-1.5.6-8.jar
248
- - vendor/jar-dependencies/io/confluent/kafka-avro-serializer/7.9.1/kafka-avro-serializer-7.9.1.jar
249
- - vendor/jar-dependencies/io/confluent/kafka-schema-registry-client/7.9.1/kafka-schema-registry-client-7.9.1.jar
250
- - vendor/jar-dependencies/io/confluent/kafka-schema-serializer/7.9.1/kafka-schema-serializer-7.9.1.jar
246
+ - vendor/jar-dependencies/com/fasterxml/jackson/datatype/jackson-datatype-jdk8/2.16.0/jackson-datatype-jdk8-2.16.0.jar
247
+ - vendor/jar-dependencies/com/github/luben/zstd-jni/1.5.6-10/zstd-jni-1.5.6-10.jar
248
+ - vendor/jar-dependencies/io/confluent/kafka-avro-serializer/8.0.0/kafka-avro-serializer-8.0.0.jar
249
+ - vendor/jar-dependencies/io/confluent/kafka-schema-registry-client/8.0.0/kafka-schema-registry-client-8.0.0.jar
250
+ - vendor/jar-dependencies/io/confluent/kafka-schema-serializer/8.0.0/kafka-schema-serializer-8.0.0.jar
251
251
  - vendor/jar-dependencies/org/apache/avro/avro/1.11.4/avro-1.11.4.jar
252
- - vendor/jar-dependencies/org/apache/kafka/kafka-clients/3.9.1/kafka-clients-3.9.1.jar
252
+ - vendor/jar-dependencies/org/apache/kafka/kafka-clients/4.1.0/kafka-clients-4.1.0.jar
253
+ - vendor/jar-dependencies/org/lz4/lz4-java/1.8.0/lz4-java-1.8.0.jar
253
254
  - vendor/jar-dependencies/org/slf4j/slf4j-api/1.7.36/slf4j-api-1.7.36.jar
254
255
  - vendor/jar-dependencies/org/xerial/snappy/snappy-java/1.1.10.7/snappy-java-1.1.10.7.jar
255
- homepage: https://www.elastic.co/logstash
256
+ homepage: http://www.elastic.co/guide/en/logstash/current/index.html
256
257
  licenses:
257
258
  - Apache-2.0
258
259
  metadata:
259
260
  logstash_plugin: 'true'
260
261
  logstash_group: integration
261
262
  integration_plugins: logstash-input-kafka,logstash-output-kafka
262
- post_install_message:
263
263
  rdoc_options: []
264
264
  require_paths:
265
265
  - lib
@@ -275,13 +275,13 @@ required_rubygems_version: !ruby/object:Gem::Requirement
275
275
  - !ruby/object:Gem::Version
276
276
  version: '0'
277
277
  requirements: []
278
- rubygems_version: 3.3.26
279
- signing_key:
278
+ rubygems_version: 3.6.3
280
279
  specification_version: 4
281
280
  summary: Integration with Kafka - input and output plugins
282
281
  test_files:
283
282
  - spec/check_docs_spec.rb
284
283
  - spec/fixtures/jaas.config
284
+ - spec/fixtures/jaas3.config
285
285
  - spec/fixtures/pwd
286
286
  - spec/fixtures/trust-store_stub.jks
287
287
  - spec/integration/inputs/kafka_spec.rb