logstash-integration-kafka 11.6.4-java → 12.0.0-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +10 -0
  3. data/docs/index.asciidoc +1 -1
  4. data/docs/input-kafka.asciidoc +28 -2
  5. data/docs/output-kafka.asciidoc +15 -8
  6. data/lib/logstash/inputs/kafka.rb +33 -1
  7. data/lib/logstash/outputs/kafka.rb +14 -17
  8. data/lib/logstash-integration-kafka_jars.rb +6 -5
  9. data/logstash-integration-kafka.gemspec +1 -1
  10. data/spec/fixtures/jaas.config +1 -1
  11. data/spec/fixtures/jaas3.config +5 -0
  12. data/spec/integration/inputs/kafka_spec.rb +80 -3
  13. data/spec/integration/outputs/kafka_spec.rb +4 -9
  14. data/spec/unit/inputs/kafka_spec.rb +12 -4
  15. data/spec/unit/outputs/kafka_spec.rb +1 -1
  16. data/vendor/jar-dependencies/com/fasterxml/jackson/datatype/jackson-datatype-jdk8/2.16.0/jackson-datatype-jdk8-2.16.0.jar +0 -0
  17. data/vendor/jar-dependencies/com/github/luben/zstd-jni/{1.5.6-8/zstd-jni-1.5.6-8.jar → 1.5.6-10/zstd-jni-1.5.6-10.jar} +0 -0
  18. data/vendor/jar-dependencies/io/confluent/kafka-avro-serializer/8.0.0/kafka-avro-serializer-8.0.0.jar +0 -0
  19. data/vendor/jar-dependencies/io/confluent/kafka-schema-registry-client/8.0.0/kafka-schema-registry-client-8.0.0.jar +0 -0
  20. data/vendor/jar-dependencies/io/confluent/kafka-schema-serializer/8.0.0/kafka-schema-serializer-8.0.0.jar +0 -0
  21. data/vendor/jar-dependencies/org/apache/kafka/kafka-clients/{3.9.1/kafka-clients-3.9.1.jar → 4.1.0/kafka-clients-4.1.0.jar} +0 -0
  22. metadata +10 -7
  23. data/vendor/jar-dependencies/io/confluent/kafka-avro-serializer/7.9.1/kafka-avro-serializer-7.9.1.jar +0 -0
  24. data/vendor/jar-dependencies/io/confluent/kafka-schema-registry-client/7.9.1/kafka-schema-registry-client-7.9.1.jar +0 -0
  25. data/vendor/jar-dependencies/io/confluent/kafka-schema-serializer/7.9.1/kafka-schema-serializer-7.9.1.jar +0 -0
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 24a6a245c978681eeadad520b621e5f7a85bdbc21ba73917f3e70cf7a043c74f
4
- data.tar.gz: 3ab1acf046db153809a1fee3ecc12129bf19545a2519e868bb2d90e7f394acaa
3
+ metadata.gz: 744abb784463992c54a691cf16cbab0eca25f9b5cc73a54d9808f15bca223021
4
+ data.tar.gz: d35f68e589ee17adb865b7edcb1c9627b1345e9de93c5037828c15e8e83defdb
5
5
  SHA512:
6
- metadata.gz: 48acaae432ac06a78ecae3010c0b89285ae13315c1e2e08641c8b3d983bd1e4374f9563b0ec92b09763316c56cccbe6b80cf2d5aca6b0f95f60b9e35c4c1514e
7
- data.tar.gz: 1f35ff308f3d30afde4215b4ccf68683ba71218ab78860d0addc37fe82c74b480dc5c2be423d0abbc89e8ad43264b7e8c29d7b331fdf3c51a17466eb5abff3fa
6
+ metadata.gz: b85e870af8ae8dc030320ecd0c9ad312d72cceacb64b2e0ac313ca3629b3a1c24a3029f1512485b79ccd68e272eccddfee1ac32a314dc562af7cc1762b82aa53
7
+ data.tar.gz: 89fe6266982b250eb000650c91d8fdfc653cfc02d9cdf570c743edd5d7290ffd1735b0fbc3b68b018fb65215b67a909cdc5bb1dfb05e025256b35de0cca6b1af
data/CHANGELOG.md CHANGED
@@ -1,3 +1,13 @@
1
+ ## 12.0.0
2
+ - Update kafka client to 4.1.0 and transitive dependencies [#205](https://github.com/logstash-plugins/logstash-integration-kafka/pull/205)
3
+ - Breaking Change: partitioner options `default` and `uniform_sticky` are removed
4
+ - `linger_ms` default value changed from 0 to 5
5
+ - Add `group_protocols` options for configuring Kafka consumer rebalance protocol
6
+ - Setting `group_protocols => consumer` opts in to the new consumer group protocol
7
+
8
+ ## 11.7.0
9
+ - Add `reconnect_backoff_max_ms` option for configuring kafka client [#204](https://github.com/logstash-plugins/logstash-integration-kafka/pull/204)
10
+
1
11
  ## 11.6.4
2
12
  - Display exception chain comes from kafka client [#200](https://github.com/logstash-plugins/logstash-integration-kafka/pull/200)
3
13
 
data/docs/index.asciidoc CHANGED
@@ -1,7 +1,7 @@
1
1
  :plugin: kafka
2
2
  :type: integration
3
3
  :no_codec:
4
- :kafka_client: 3.9.1
4
+ :kafka_client: 4.1.0
5
5
 
6
6
  ///////////////////////////////////////////
7
7
  START - GENERATED VARIABLES, DO NOT EDIT!
@@ -2,8 +2,8 @@
2
2
  :plugin: kafka
3
3
  :type: input
4
4
  :default_codec: plain
5
- :kafka_client: 3.9.1
6
- :kafka_client_doc: 39
5
+ :kafka_client: 4.1.0
6
+ :kafka_client_doc: 41
7
7
 
8
8
  ///////////////////////////////////////////
9
9
  START - GENERATED VARIABLES, DO NOT EDIT!
@@ -132,6 +132,7 @@ See the https://kafka.apache.org/{kafka_client_doc}/documentation for more detai
132
132
  | <<plugins-{type}s-{plugin}-fetch_min_bytes>> |<<number,number>>|No
133
133
  | <<plugins-{type}s-{plugin}-group_id>> |<<string,string>>|No
134
134
  | <<plugins-{type}s-{plugin}-group_instance_id>> |<<string,string>>|No
135
+ | <<plugins-{type}s-{plugin}-group_protocol>> |<<string,string>>|No
135
136
  | <<plugins-{type}s-{plugin}-heartbeat_interval_ms>> |<<number,number>>|No
136
137
  | <<plugins-{type}s-{plugin}-isolation_level>> |<<string,string>>|No
137
138
  | <<plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No
@@ -145,6 +146,7 @@ See the https://kafka.apache.org/{kafka_client_doc}/documentation for more detai
145
146
  | <<plugins-{type}s-{plugin}-poll_timeout_ms>> |<<number,number>>|No
146
147
  | <<plugins-{type}s-{plugin}-receive_buffer_bytes>> |<<number,number>>|No
147
148
  | <<plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<<number,number>>|No
149
+ | <<plugins-{type}s-{plugin}-reconnect_backoff_max_ms>> |<<number,number>>|No
148
150
  | <<plugins-{type}s-{plugin}-request_timeout_ms>> |<<number,number>>|No
149
151
  | <<plugins-{type}s-{plugin}-retry_backoff_ms>> |<<number,number>>|No
150
152
  | <<plugins-{type}s-{plugin}-sasl_client_callback_handler_class>> |<<string,string>>|No
@@ -411,6 +413,21 @@ You can set this value to use information such as a hostname, an IP, or anything
411
413
  NOTE: In cases when multiple threads are configured and `consumer_threads` is greater than one, a suffix is appended to
412
414
  the `group_instance_id` to avoid collisions.
413
415
 
416
+ [id="plugins-{type}s-{plugin}-group_protocol"]
417
+ ===== `group_protocol`
418
+
419
+ * Value can be either of: `classic`, `consumer`
420
+ * Default value is `classic`.
421
+
422
+ Specifies the consumer group rebalance protocol used by the Kafka client.
423
+
424
+ `classic` is the default protocol. During a rebalance, all consumer instances pause message processing until partition assignments are complete.
425
+
426
+ `consumer` is an incremental rebalance protocol introduced in Kafka 4. It avoids global synchronization barriers by only pausing partitions that are reassigned.
427
+ When using `consumer`, the following settings **cannot be configured**:
428
+ `partition_assignment_strategy`, `heartbeat_interval_ms`, and `session_timeout_ms`.
429
+
430
+
414
431
  [id="plugins-{type}s-{plugin}-heartbeat_interval_ms"]
415
432
  ===== `heartbeat_interval_ms`
416
433
 
@@ -561,6 +578,15 @@ The amount of time to wait before attempting to reconnect to a given host.
561
578
  This avoids repeatedly connecting to a host in a tight loop.
562
579
  This backoff applies to all requests sent by the consumer to the broker.
563
580
 
581
+ [id="plugins-{type}s-{plugin}-reconnect_backoff_max_ms"]
582
+ ===== `reconnect_backoff_max_ms`
583
+
584
+ * Value type is <<number,number>>
585
+ * Default value is `1000` milliseconds.
586
+
587
+ The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect.
588
+ If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum.
589
+
564
590
  [id="plugins-{type}s-{plugin}-request_timeout_ms"]
565
591
  ===== `request_timeout_ms`
566
592
 
@@ -2,8 +2,8 @@
2
2
  :plugin: kafka
3
3
  :type: output
4
4
  :default_codec: plain
5
- :kafka_client: 3.9.1
6
- :kafka_client_doc: 39
5
+ :kafka_client: 4.1.0
6
+ :kafka_client_doc: 41
7
7
 
8
8
  ///////////////////////////////////////////
9
9
  START - GENERATED VARIABLES, DO NOT EDIT!
@@ -115,6 +115,7 @@ See the https://kafka.apache.org/{kafka_client_doc}/documentation for more detai
115
115
  | <<plugins-{type}s-{plugin}-partitioner>> |<<string,string>>|No
116
116
  | <<plugins-{type}s-{plugin}-receive_buffer_bytes>> |<<number,number>>|No
117
117
  | <<plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<<number,number>>|No
118
+ | <<plugins-{type}s-{plugin}-reconnect_backoff_max_ms>> |<<number,number>>|No
118
119
  | <<plugins-{type}s-{plugin}-request_timeout_ms>> |<<number,number>>|No
119
120
  | <<plugins-{type}s-{plugin}-retries>> |<<number,number>>|No
120
121
  | <<plugins-{type}s-{plugin}-retry_backoff_ms>> |<<number,number>>|No
@@ -284,7 +285,7 @@ Serializer class for the key of the message
284
285
  ===== `linger_ms`
285
286
 
286
287
  * Value type is <<number,number>>
287
- * Default value is `0`
288
+ * Default value is `5`
288
289
 
289
290
  The producer groups together any records that arrive in between request
290
291
  transmissions into a single batched request. Normally this occurs only under
@@ -348,14 +349,11 @@ The max time in milliseconds before a metadata refresh is forced.
348
349
  * Value type is <<string,string>>
349
350
  * There is no default value for this setting.
350
351
 
351
- The default behavior is to hash the `message_key` of an event to get the partition.
352
- When no message key is present, the plugin picks a partition in a round-robin fashion.
352
+ By not setting this value, the plugin uses the built-in partitioning strategy provided by the Kafka client. Read more about the "partitioner.class" on the Kafka documentation.
353
353
 
354
- Available options for choosing a partitioning strategy are as follows:
354
+ Available option is as follows:
355
355
 
356
- * `default` use the default partitioner as described above
357
356
  * `round_robin` distributes writes to all partitions equally, regardless of `message_key`
358
- * `uniform_sticky` sticks to a partition for the duration of a batch than randomly picks a new one
359
357
 
360
358
  [id="plugins-{type}s-{plugin}-receive_buffer_bytes"]
361
359
  ===== `receive_buffer_bytes`
@@ -373,6 +371,15 @@ The size of the TCP receive buffer to use when reading data
373
371
 
374
372
  The amount of time to wait before attempting to reconnect to a given host when a connection fails.
375
373
 
374
+ [id="plugins-{type}s-{plugin}-reconnect_backoff_max_ms"]
375
+ ===== `reconnect_backoff_max_ms`
376
+
377
+ * Value type is <<number,number>>
378
+ * Default value is `1000`.
379
+
380
+ The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect.
381
+ If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum.
382
+
376
383
  [id="plugins-{type}s-{plugin}-request_timeout_ms"]
377
384
  ===== `request_timeout_ms`
378
385
 
@@ -129,6 +129,12 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
129
129
  # consumer crated by each thread an artificial suffix is appended to the user provided `group_instance_id`
130
130
  # to avoid clashing.
131
131
  config :group_instance_id, :validate => :string
132
+ # `classic` is the "stop-the-world" rebalances.
133
+ # Any consumer restart or failure triggers a full-group rebalance, pausing processing for all consumers.
134
+ # `consumer` is an incremental rebalance protocol that avoids global sync barriers,
135
+ # pausing only the partitions that are reassigned.
136
+ # It cannot set along with `partition_assignment_strategy`, `heartbeat_interval_ms` and `session_timeout_ms`
137
+ config :group_protocol, :validate => ["classic", "consumer"], :default => "classic"
132
138
  # The expected time between heartbeats to the consumer coordinator. Heartbeats are used to ensure
133
139
  # that the consumer's session stays active and to facilitate rebalancing when new
134
140
  # consumers join or leave the group. The value must be set lower than
@@ -167,6 +173,9 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
167
173
  # This avoids repeatedly connecting to a host in a tight loop.
168
174
  # This backoff applies to all connection attempts by the client to a broker.
169
175
  config :reconnect_backoff_ms, :validate => :number, :default => 50 # Kafka default
176
+ # The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect.
177
+ # If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum.
178
+ config :reconnect_backoff_max_ms, :validate => :number, :default => 1000 # Kafka default
170
179
  # The amount of time to wait before attempting to retry a failed fetch request
171
180
  # to a given topic partition. This avoids repeated fetching-and-failing in a tight loop.
172
181
  config :retry_backoff_ms, :validate => :number, :default => 100 # Kafka default
@@ -290,6 +299,8 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
290
299
  reassign_dns_lookup
291
300
  @pattern ||= java.util.regex.Pattern.compile(@topics_pattern) unless @topics_pattern.nil?
292
301
  check_schema_registry_parameters
302
+
303
+ set_group_protocol!
293
304
  end
294
305
 
295
306
  METADATA_NONE = Set[].freeze
@@ -447,6 +458,7 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
447
458
  props.put(kafka::FETCH_MIN_BYTES_CONFIG, fetch_min_bytes.to_s) unless fetch_min_bytes.nil?
448
459
  props.put(kafka::GROUP_ID_CONFIG, group_id)
449
460
  props.put(kafka::GROUP_INSTANCE_ID_CONFIG, group_instance_id) unless group_instance_id.nil?
461
+ props.put(kafka::GROUP_PROTOCOL_CONFIG, group_protocol)
450
462
  props.put(kafka::HEARTBEAT_INTERVAL_MS_CONFIG, heartbeat_interval_ms.to_s) unless heartbeat_interval_ms.nil?
451
463
  props.put(kafka::ISOLATION_LEVEL_CONFIG, isolation_level)
452
464
  props.put(kafka::KEY_DESERIALIZER_CLASS_CONFIG, key_deserializer_class)
@@ -457,6 +469,7 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
457
469
  props.put(kafka::PARTITION_ASSIGNMENT_STRATEGY_CONFIG, partition_assignment_strategy_class) unless partition_assignment_strategy.nil?
458
470
  props.put(kafka::RECEIVE_BUFFER_CONFIG, receive_buffer_bytes.to_s) unless receive_buffer_bytes.nil?
459
471
  props.put(kafka::RECONNECT_BACKOFF_MS_CONFIG, reconnect_backoff_ms.to_s) unless reconnect_backoff_ms.nil?
472
+ props.put(kafka::RECONNECT_BACKOFF_MAX_MS_CONFIG, reconnect_backoff_max_ms.to_s) unless reconnect_backoff_max_ms.nil?
460
473
  props.put(kafka::REQUEST_TIMEOUT_MS_CONFIG, request_timeout_ms.to_s) unless request_timeout_ms.nil?
461
474
  props.put(kafka::RETRY_BACKOFF_MS_CONFIG, retry_backoff_ms.to_s) unless retry_backoff_ms.nil?
462
475
  props.put(kafka::SEND_BUFFER_CONFIG, send_buffer_bytes.to_s) unless send_buffer_bytes.nil?
@@ -467,7 +480,7 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
467
480
  props.put("security.protocol", security_protocol) unless security_protocol.nil?
468
481
  if schema_registry_url
469
482
  props.put(kafka::VALUE_DESERIALIZER_CLASS_CONFIG, Java::io.confluent.kafka.serializers.KafkaAvroDeserializer.java_class)
470
- serdes_config = Java::io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig
483
+ serdes_config = Java::io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig
471
484
  props.put(serdes_config::SCHEMA_REGISTRY_URL_CONFIG, schema_registry_url.uri.to_s)
472
485
  if schema_registry_proxy && !schema_registry_proxy.empty?
473
486
  props.put(serdes_config::PROXY_HOST, @schema_registry_proxy_host)
@@ -509,6 +522,25 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
509
522
  end
510
523
  end
511
524
 
525
+ # In order to use group_protocol => consumer, heartbeat_interval_ms, session_timeout_ms and partition_assignment_strategy need to be unset
526
+ # If any of these are not using the default value of the plugin, we raise a configuration error
527
+ def set_group_protocol!
528
+ return unless group_protocol == "consumer"
529
+
530
+ heartbeat_overridden = heartbeat_interval_ms != self.class.get_config.dig("heartbeat_interval_ms", :default)
531
+ session_overridden = session_timeout_ms != self.class.get_config.dig("session_timeout_ms", :default)
532
+ strategy_defined = !partition_assignment_strategy.nil?
533
+
534
+ if strategy_defined || heartbeat_overridden || session_overridden
535
+ raise LogStash::ConfigurationError, "group_protocol cannot be set to 'consumer' "\
536
+ "when any of partition_assignment_strategy, heartbeat_interval_ms or session_timeout_ms is set"
537
+ end
538
+
539
+ @heartbeat_interval_ms = nil
540
+ @session_timeout_ms = nil
541
+ logger.debug("Settings 'heartbeat_interval_ms' and 'session_timeout_ms' have been reset since 'group_protocol' is configured as 'consumer'")
542
+ end
543
+
512
544
  def partition_assignment_strategy_class
513
545
  case partition_assignment_strategy
514
546
  when 'range'
@@ -101,7 +101,7 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
101
101
  # This setting accomplishes this by adding a small amount of artificial delay—that is,
102
102
  # rather than immediately sending out a record the producer will wait for up to the given delay
103
103
  # to allow other records to be sent so that the sends can be batched together.
104
- config :linger_ms, :validate => :number, :default => 0 # Kafka default
104
+ config :linger_ms, :validate => :number, :default => 5 # Kafka default
105
105
  # The maximum size of a request
106
106
  config :max_request_size, :validate => :number, :default => 1_048_576 # (1MB) Kafka default
107
107
  # The key for the message
@@ -110,12 +110,15 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
110
110
  config :message_headers, :validate => :hash, :default => {}
111
111
  # the timeout setting for initial metadata request to fetch topic metadata.
112
112
  config :metadata_fetch_timeout_ms, :validate => :number, :default => 60_000
113
- # Partitioner to use - can be `default`, `uniform_sticky`, `round_robin` or a fully qualified class name of a custom partitioner.
113
+ # Partitioner to use - can be `round_robin` or a fully qualified class name of a custom partitioner.
114
114
  config :partitioner, :validate => :string
115
115
  # The size of the TCP receive buffer to use when reading data
116
116
  config :receive_buffer_bytes, :validate => :number, :default => 32_768 # (32KB) Kafka default
117
117
  # The amount of time to wait before attempting to reconnect to a given host when a connection fails.
118
118
  config :reconnect_backoff_ms, :validate => :number, :default => 50 # Kafka default
119
+ # The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect.
120
+ # If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum.
121
+ config :reconnect_backoff_max_ms, :validate => :number, :default => 1000 # Kafka default
119
122
  # The default retry behavior is to retry until successful. To prevent data loss,
120
123
  # the use of this setting is discouraged.
121
124
  #
@@ -366,12 +369,13 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
366
369
  props.put(kafka::LINGER_MS_CONFIG, linger_ms.to_s)
367
370
  props.put(kafka::MAX_REQUEST_SIZE_CONFIG, max_request_size.to_s)
368
371
  props.put(kafka::METADATA_MAX_AGE_CONFIG, metadata_max_age_ms.to_s) unless metadata_max_age_ms.nil?
369
- unless partitioner.nil?
370
- props.put(kafka::PARTITIONER_CLASS_CONFIG, partitioner = partitioner_class)
372
+ partitioner_class&.tap do |partitioner|
373
+ props.put(kafka::PARTITIONER_CLASS_CONFIG, partitioner)
371
374
  logger.debug('producer configured using partitioner', :partitioner_class => partitioner)
372
375
  end
373
376
  props.put(kafka::RECEIVE_BUFFER_CONFIG, receive_buffer_bytes.to_s) unless receive_buffer_bytes.nil?
374
377
  props.put(kafka::RECONNECT_BACKOFF_MS_CONFIG, reconnect_backoff_ms.to_s) unless reconnect_backoff_ms.nil?
378
+ props.put(kafka::RECONNECT_BACKOFF_MAX_MS_CONFIG, reconnect_backoff_max_ms.to_s) unless reconnect_backoff_max_ms.nil?
375
379
  props.put(kafka::REQUEST_TIMEOUT_MS_CONFIG, request_timeout_ms.to_s) unless request_timeout_ms.nil?
376
380
  props.put(kafka::RETRIES_CONFIG, retries.to_s) unless retries.nil?
377
381
  props.put(kafka::RETRY_BACKOFF_MS_CONFIG, retry_backoff_ms.to_s)
@@ -401,19 +405,12 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
401
405
  end
402
406
 
403
407
  def partitioner_class
404
- case partitioner
405
- when 'round_robin'
406
- 'org.apache.kafka.clients.producer.RoundRobinPartitioner'
407
- when 'uniform_sticky'
408
- 'org.apache.kafka.clients.producer.UniformStickyPartitioner'
409
- when 'default'
410
- 'org.apache.kafka.clients.producer.internals.DefaultPartitioner'
411
- else
412
- unless partitioner.index('.')
413
- raise LogStash::ConfigurationError, "unsupported partitioner: #{partitioner.inspect}"
414
- end
415
- partitioner # assume a fully qualified class-name
416
- end
408
+ return nil if partitioner.nil?
409
+ return 'org.apache.kafka.clients.producer.RoundRobinPartitioner' if partitioner == 'round_robin'
410
+
411
+ raise LogStash::ConfigurationError, "unsupported partitioner: #{partitioner.inspect}" unless partitioner.include?('.')
412
+
413
+ partitioner
417
414
  end
418
415
 
419
416
  end #class LogStash::Outputs::Kafka
@@ -1,12 +1,13 @@
1
1
  # AUTOGENERATED BY THE GRADLE SCRIPT. DO NOT EDIT.
2
2
 
3
3
  require 'jar_dependencies'
4
- require_jar('io.confluent', 'kafka-avro-serializer', '7.9.1')
5
- require_jar('io.confluent', 'kafka-schema-serializer', '7.9.1')
4
+ require_jar('io.confluent', 'kafka-avro-serializer', '8.0.0')
5
+ require_jar('io.confluent', 'kafka-schema-serializer', '8.0.0')
6
6
  require_jar('org.apache.avro', 'avro', '1.11.4')
7
- require_jar('io.confluent', 'kafka-schema-registry-client', '7.9.1')
8
- require_jar('org.apache.kafka', 'kafka-clients', '3.9.1')
7
+ require_jar('io.confluent', 'kafka-schema-registry-client', '8.0.0')
8
+ require_jar('com.fasterxml.jackson.datatype', 'jackson-datatype-jdk8', '2.16.0')
9
+ require_jar('org.apache.kafka', 'kafka-clients', '4.1.0')
9
10
  require_jar('org.slf4j', 'slf4j-api', '1.7.36')
10
- require_jar('com.github.luben', 'zstd-jni', '1.5.6-8')
11
+ require_jar('com.github.luben', 'zstd-jni', '1.5.6-10')
11
12
  require_jar('org.lz4', 'lz4-java', '1.8.0')
12
13
  require_jar('org.xerial.snappy', 'snappy-java', '1.1.10.7')
@@ -1,6 +1,6 @@
1
1
  Gem::Specification.new do |s|
2
2
  s.name = 'logstash-integration-kafka'
3
- s.version = '11.6.4'
3
+ s.version = '12.0.0'
4
4
  s.licenses = ['Apache-2.0']
5
5
  s.summary = "Integration with Kafka - input and output plugins"
6
6
  s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline "+
@@ -1,5 +1,5 @@
1
1
  SchemaRegistry-Props {
2
- org.eclipse.jetty.jaas.spi.PropertyFileLoginModule required
2
+ org.eclipse.jetty.security.jaas.spi.PropertyFileLoginModule required
3
3
  file="build/confluent_platform/etc/schema-registry/pwd"
4
4
  debug="true";
5
5
  };
@@ -0,0 +1,5 @@
1
+ SchemaRegistry-Props {
2
+ org.eclipse.jetty.jaas.spi.PropertyFileLoginModule required
3
+ file="build/confluent_platform/etc/schema-registry/pwd"
4
+ debug="true";
5
+ };
@@ -187,6 +187,83 @@ describe "inputs/kafka", :integration => true do
187
187
  end
188
188
  end
189
189
 
190
+ context 'setting group_protocol' do
191
+ let(:test_topic) { 'logstash_integration_partitioner_topic' }
192
+ let(:consumer_config) do
193
+ plain_config.merge(
194
+ "topics" => [test_topic],
195
+ 'group_protocol' => group_protocol,
196
+ "partition_assignment_strategy" => partition_assignment_strategy,
197
+ "heartbeat_interval_ms" => heartbeat_interval_ms,
198
+ "session_timeout_ms" => session_timeout_ms
199
+ )
200
+ end
201
+ let(:group_protocol) { nil }
202
+ let(:partition_assignment_strategy) { nil }
203
+ let(:heartbeat_interval_ms) { LogStash::Inputs::Kafka.get_config().dig("heartbeat_interval_ms", :default) }
204
+ let(:session_timeout_ms) { LogStash::Inputs::Kafka.get_config().dig("session_timeout_ms", :default) }
205
+
206
+ describe "group_protocol = classic" do
207
+ let(:group_protocol) { 'classic' }
208
+
209
+ it 'passes register check' do
210
+ kafka_input = LogStash::Inputs::Kafka.new(consumer_config)
211
+ expect {
212
+ kafka_input.register
213
+ }.to_not raise_error
214
+
215
+ expect( kafka_input.instance_variable_get(:@heartbeat_interval_ms)).eql?(heartbeat_interval_ms)
216
+ expect( kafka_input.instance_variable_get(:@session_timeout_ms)).eql?(session_timeout_ms)
217
+ end
218
+ end
219
+
220
+ describe "group_protocol = consumer" do
221
+ let(:group_protocol) { 'consumer' }
222
+
223
+ describe "passes register check with supported config" do
224
+ it 'reset unsupported config to nil' do
225
+ kafka_input = LogStash::Inputs::Kafka.new(consumer_config)
226
+ expect {
227
+ kafka_input.register
228
+ }.to_not raise_error
229
+
230
+ expect( kafka_input.instance_variable_get(:@heartbeat_interval_ms)).to be_nil
231
+ expect( kafka_input.instance_variable_get(:@session_timeout_ms)).to be_nil
232
+ end
233
+ end
234
+
235
+ {
236
+ partition_assignment_strategy: 'range',
237
+ heartbeat_interval_ms: 2000,
238
+ session_timeout_ms: 6000
239
+ }.each do |config_key, config_value|
240
+ context "with unsupported config #{config_key}" do
241
+ let(config_key) { config_value }
242
+
243
+ it 'raises LogStash::ConfigurationError' do
244
+ kafka_input = LogStash::Inputs::Kafka.new(consumer_config)
245
+ expect {
246
+ kafka_input.register
247
+ }.to raise_error(LogStash::ConfigurationError, /group_protocol cannot be set to.*consumer.*/)
248
+ end
249
+ end
250
+ end
251
+
252
+ context "with valid config" do
253
+ let(:test_topic) { 'logstash_integration_topic_plain' }
254
+ let(:manual_commit_config) do
255
+ consumer_config.merge(
256
+ 'enable_auto_commit' => 'false'
257
+ )
258
+ end
259
+ it 'consume data' do
260
+ queue = consume_messages(manual_commit_config, timeout: timeout_seconds, event_count: num_events)
261
+ expect(queue.length).to eq(num_events)
262
+ end
263
+ end
264
+ end
265
+ end
266
+
190
267
  context "static membership 'group.instance.id' setting" do
191
268
  let(:base_config) do
192
269
  {
@@ -486,7 +563,7 @@ describe "Deserializing with the schema registry", :integration => true do
486
563
  def delete_topic_if_exists(topic_name, user = nil, password = nil)
487
564
  props = java.util.Properties.new
488
565
  props.put(Java::org.apache.kafka.clients.admin.AdminClientConfig::BOOTSTRAP_SERVERS_CONFIG, "localhost:9092")
489
- serdes_config = Java::io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig
566
+ serdes_config = Java::io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig
490
567
  unless user.nil?
491
568
  props.put(serdes_config::BASIC_AUTH_CREDENTIALS_SOURCE, 'USER_INFO')
492
569
  props.put(serdes_config::USER_INFO_CONFIG, "#{user}:#{password}")
@@ -495,7 +572,7 @@ describe "Deserializing with the schema registry", :integration => true do
495
572
  topics_list = admin_client.listTopics().names().get()
496
573
  if topics_list.contains(topic_name)
497
574
  result = admin_client.deleteTopics([topic_name])
498
- result.values.get(topic_name).get()
575
+ result.topicNameValues().get(topic_name).get()
499
576
  end
500
577
  end
501
578
 
@@ -503,7 +580,7 @@ describe "Deserializing with the schema registry", :integration => true do
503
580
  props = java.util.Properties.new
504
581
  config = org.apache.kafka.clients.producer.ProducerConfig
505
582
 
506
- serdes_config = Java::io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig
583
+ serdes_config = Java::io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig
507
584
  props.put(serdes_config::SCHEMA_REGISTRY_URL_CONFIG, "http://localhost:8081")
508
585
 
509
586
  props.put(config::BOOTSTRAP_SERVERS_CONFIG, "localhost:9092")
@@ -164,7 +164,7 @@ describe "outputs/kafka", :integration => true do
164
164
  let(:test_topic) { 'logstash_integration_topic3' }
165
165
 
166
166
  before :each do
167
- config = base_config.merge("topic_id" => test_topic, "partitioner" => 'org.apache.kafka.clients.producer.UniformStickyPartitioner')
167
+ config = base_config.merge("topic_id" => test_topic, "partitioner" => 'org.apache.kafka.clients.producer.RoundRobinPartitioner')
168
168
  load_kafka_data(config) do # let's have a bit more (diverse) dataset
169
169
  num_events.times.collect do
170
170
  LogStash::Event.new.tap do |e|
@@ -212,7 +212,7 @@ describe "outputs/kafka", :integration => true do
212
212
 
213
213
  context 'setting partitioner' do
214
214
  let(:test_topic) { 'logstash_integration_partitioner_topic' }
215
- let(:partitioner) { nil }
215
+ let(:partitioner) { 'round_robin' }
216
216
 
217
217
  before :each do
218
218
  @messages_offset = fetch_messages_from_all_partitions
@@ -221,13 +221,8 @@ describe "outputs/kafka", :integration => true do
221
221
  load_kafka_data(config)
222
222
  end
223
223
 
224
- [ 'default', 'round_robin', 'uniform_sticky' ].each do |partitioner|
225
- describe partitioner do
226
- let(:partitioner) { partitioner }
227
- it 'loads data' do
228
- expect(fetch_messages_from_all_partitions - @messages_offset).to eql num_events
229
- end
230
- end
224
+ it 'loads data' do
225
+ expect(fetch_messages_from_all_partitions - @messages_offset).to eql num_events
231
226
  end
232
227
 
233
228
  def fetch_messages_from_all_partitions
@@ -378,11 +378,15 @@ describe LogStash::Inputs::Kafka do
378
378
  end
379
379
 
380
380
  context 'string integer config' do
381
- let(:config) { super().merge('session_timeout_ms' => '25000', 'max_poll_interval_ms' => '345000') }
381
+ let(:config) { super().merge('session_timeout_ms' => '25000',
382
+ 'max_poll_interval_ms' => '345000',
383
+ 'reconnect_backoff_max_ms' => '1500') }
382
384
 
383
385
  it "sets integer values" do
384
386
  expect(org.apache.kafka.clients.consumer.KafkaConsumer).
385
- to receive(:new).with(hash_including('session.timeout.ms' => '25000', 'max.poll.interval.ms' => '345000')).
387
+ to receive(:new).with(hash_including('session.timeout.ms' => '25000',
388
+ 'max.poll.interval.ms' => '345000',
389
+ 'reconnect.backoff.max.ms' => '1500')).
386
390
  and_return kafka_client = double('kafka-consumer')
387
391
 
388
392
  expect( subject.send(:create_consumer, 'sample_client-1', 'group_instance_id') ).to be kafka_client
@@ -390,11 +394,15 @@ describe LogStash::Inputs::Kafka do
390
394
  end
391
395
 
392
396
  context 'integer config' do
393
- let(:config) { super().merge('session_timeout_ms' => 25200, 'max_poll_interval_ms' => 123_000) }
397
+ let(:config) { super().merge('session_timeout_ms' => 25200,
398
+ 'max_poll_interval_ms' => 123_000,
399
+ 'reconnect_backoff_max_ms' => 1500) }
394
400
 
395
401
  it "sets integer values" do
396
402
  expect(org.apache.kafka.clients.consumer.KafkaConsumer).
397
- to receive(:new).with(hash_including('session.timeout.ms' => '25200', 'max.poll.interval.ms' => '123000')).
403
+ to receive(:new).with(hash_including('session.timeout.ms' => '25200',
404
+ 'max.poll.interval.ms' => '123000',
405
+ 'reconnect.backoff.max.ms' => '1500')).
398
406
  and_return kafka_client = double('kafka-consumer')
399
407
 
400
408
  expect( subject.send(:create_consumer, 'sample_client-2', 'group_instance_id') ).to be kafka_client
@@ -114,7 +114,7 @@ describe "outputs/kafka" do
114
114
  end
115
115
 
116
116
  context "when KafkaProducer#send() raises a non-retriable exception" do
117
- let(:failcount) { (rand * 10).to_i }
117
+ let(:failcount) { 3 }
118
118
 
119
119
  let(:exception_classes) { [
120
120
  org.apache.kafka.common.errors.SerializationException,
metadata CHANGED
@@ -1,13 +1,13 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-integration-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 11.6.4
4
+ version: 12.0.0
5
5
  platform: java
6
6
  authors:
7
7
  - Elastic
8
8
  bindir: bin
9
9
  cert_chain: []
10
- date: 2025-08-28 00:00:00.000000000 Z
10
+ date: 2025-10-16 00:00:00.000000000 Z
11
11
  dependencies:
12
12
  - !ruby/object:Gem::Dependency
13
13
  name: logstash-core-plugin-api
@@ -235,6 +235,7 @@ files:
235
235
  - logstash-integration-kafka.gemspec
236
236
  - spec/check_docs_spec.rb
237
237
  - spec/fixtures/jaas.config
238
+ - spec/fixtures/jaas3.config
238
239
  - spec/fixtures/pwd
239
240
  - spec/fixtures/trust-store_stub.jks
240
241
  - spec/integration/inputs/kafka_spec.rb
@@ -242,12 +243,13 @@ files:
242
243
  - spec/unit/inputs/avro_schema_fixture_payment.asvc
243
244
  - spec/unit/inputs/kafka_spec.rb
244
245
  - spec/unit/outputs/kafka_spec.rb
245
- - vendor/jar-dependencies/com/github/luben/zstd-jni/1.5.6-8/zstd-jni-1.5.6-8.jar
246
- - vendor/jar-dependencies/io/confluent/kafka-avro-serializer/7.9.1/kafka-avro-serializer-7.9.1.jar
247
- - vendor/jar-dependencies/io/confluent/kafka-schema-registry-client/7.9.1/kafka-schema-registry-client-7.9.1.jar
248
- - vendor/jar-dependencies/io/confluent/kafka-schema-serializer/7.9.1/kafka-schema-serializer-7.9.1.jar
246
+ - vendor/jar-dependencies/com/fasterxml/jackson/datatype/jackson-datatype-jdk8/2.16.0/jackson-datatype-jdk8-2.16.0.jar
247
+ - vendor/jar-dependencies/com/github/luben/zstd-jni/1.5.6-10/zstd-jni-1.5.6-10.jar
248
+ - vendor/jar-dependencies/io/confluent/kafka-avro-serializer/8.0.0/kafka-avro-serializer-8.0.0.jar
249
+ - vendor/jar-dependencies/io/confluent/kafka-schema-registry-client/8.0.0/kafka-schema-registry-client-8.0.0.jar
250
+ - vendor/jar-dependencies/io/confluent/kafka-schema-serializer/8.0.0/kafka-schema-serializer-8.0.0.jar
249
251
  - vendor/jar-dependencies/org/apache/avro/avro/1.11.4/avro-1.11.4.jar
250
- - vendor/jar-dependencies/org/apache/kafka/kafka-clients/3.9.1/kafka-clients-3.9.1.jar
252
+ - vendor/jar-dependencies/org/apache/kafka/kafka-clients/4.1.0/kafka-clients-4.1.0.jar
251
253
  - vendor/jar-dependencies/org/lz4/lz4-java/1.8.0/lz4-java-1.8.0.jar
252
254
  - vendor/jar-dependencies/org/slf4j/slf4j-api/1.7.36/slf4j-api-1.7.36.jar
253
255
  - vendor/jar-dependencies/org/xerial/snappy/snappy-java/1.1.10.7/snappy-java-1.1.10.7.jar
@@ -279,6 +281,7 @@ summary: Integration with Kafka - input and output plugins
279
281
  test_files:
280
282
  - spec/check_docs_spec.rb
281
283
  - spec/fixtures/jaas.config
284
+ - spec/fixtures/jaas3.config
282
285
  - spec/fixtures/pwd
283
286
  - spec/fixtures/trust-store_stub.jks
284
287
  - spec/integration/inputs/kafka_spec.rb