logstash-integration-kafka 11.8.0-java → 12.0.0-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (23) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +6 -3
  3. data/docs/index.asciidoc +1 -1
  4. data/docs/input-kafka.asciidoc +18 -2
  5. data/docs/output-kafka.asciidoc +4 -8
  6. data/lib/logstash/inputs/kafka.rb +29 -1
  7. data/lib/logstash/outputs/kafka.rb +9 -24
  8. data/lib/logstash-integration-kafka_jars.rb +6 -5
  9. data/logstash-integration-kafka.gemspec +1 -1
  10. data/spec/fixtures/jaas.config +1 -1
  11. data/spec/fixtures/jaas3.config +5 -0
  12. data/spec/integration/inputs/kafka_spec.rb +80 -3
  13. data/spec/integration/outputs/kafka_spec.rb +4 -9
  14. data/vendor/jar-dependencies/com/fasterxml/jackson/datatype/jackson-datatype-jdk8/2.16.0/jackson-datatype-jdk8-2.16.0.jar +0 -0
  15. data/vendor/jar-dependencies/com/github/luben/zstd-jni/{1.5.6-8/zstd-jni-1.5.6-8.jar → 1.5.6-10/zstd-jni-1.5.6-10.jar} +0 -0
  16. data/vendor/jar-dependencies/io/confluent/kafka-avro-serializer/8.0.0/kafka-avro-serializer-8.0.0.jar +0 -0
  17. data/vendor/jar-dependencies/io/confluent/kafka-schema-registry-client/8.0.0/kafka-schema-registry-client-8.0.0.jar +0 -0
  18. data/vendor/jar-dependencies/io/confluent/kafka-schema-serializer/8.0.0/kafka-schema-serializer-8.0.0.jar +0 -0
  19. data/vendor/jar-dependencies/org/apache/kafka/kafka-clients/{3.9.1/kafka-clients-3.9.1.jar → 4.1.0/kafka-clients-4.1.0.jar} +0 -0
  20. metadata +9 -6
  21. data/vendor/jar-dependencies/io/confluent/kafka-avro-serializer/7.9.1/kafka-avro-serializer-7.9.1.jar +0 -0
  22. data/vendor/jar-dependencies/io/confluent/kafka-schema-registry-client/7.9.1/kafka-schema-registry-client-7.9.1.jar +0 -0
  23. data/vendor/jar-dependencies/io/confluent/kafka-schema-serializer/7.9.1/kafka-schema-serializer-7.9.1.jar +0 -0
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 101a2c1fc70011cadd866f4aeca79850045a51272eac7d704e766a2e08353c52
4
- data.tar.gz: e51c20830a9d22d4e049288a3ab1c152db33121a87a73d08d493f681df707fec
3
+ metadata.gz: 744abb784463992c54a691cf16cbab0eca25f9b5cc73a54d9808f15bca223021
4
+ data.tar.gz: d35f68e589ee17adb865b7edcb1c9627b1345e9de93c5037828c15e8e83defdb
5
5
  SHA512:
6
- metadata.gz: f17d5f0e28dff57741485e1d3aed9bca31cbf4a6b7a1b1950220cda6d2e8b23f0afbb29a9a3fa6797469941c8e116ab0fbff8bc6a6573f6b9b7a91d472a53323
7
- data.tar.gz: 293e08920a1560bd5405479ea8313336fed652abd51f64038b2e7628a5978da486e7eb00fa2aa577b0f54b93bff90943051bb76509f597f18e2417a7a2fbb3d9
6
+ metadata.gz: b85e870af8ae8dc030320ecd0c9ad312d72cceacb64b2e0ac313ca3629b3a1c24a3029f1512485b79ccd68e272eccddfee1ac32a314dc562af7cc1762b82aa53
7
+ data.tar.gz: 89fe6266982b250eb000650c91d8fdfc653cfc02d9cdf570c743edd5d7290ffd1735b0fbc3b68b018fb65215b67a909cdc5bb1dfb05e025256b35de0cca6b1af
data/CHANGELOG.md CHANGED
@@ -1,6 +1,9 @@
1
- ## 11.8.0
2
- - Deprecate partitioner `default` and `uniform_sticky` options [#206](https://github.com/logstash-plugins/logstash-integration-kafka/pull/206)
3
- Both options are deprecated in Kafka client 3 and will be removed in the plugin 12.0.0.
1
+ ## 12.0.0
2
+ - Update kafka client to 4.1.0 and transitive dependencies [#205](https://github.com/logstash-plugins/logstash-integration-kafka/pull/205)
3
+ - Breaking Change: partitioner options `default` and `uniform_sticky` are removed
4
+ - `linger_ms` default value changed from 0 to 5
5
+ - Add `group_protocols` options for configuring Kafka consumer rebalance protocol
6
+ - Setting `group_protocols => consumer` opts in to the new consumer group protocol
4
7
 
5
8
  ## 11.7.0
6
9
  - Add `reconnect_backoff_max_ms` option for configuring kafka client [#204](https://github.com/logstash-plugins/logstash-integration-kafka/pull/204)
data/docs/index.asciidoc CHANGED
@@ -1,7 +1,7 @@
1
1
  :plugin: kafka
2
2
  :type: integration
3
3
  :no_codec:
4
- :kafka_client: 3.9.1
4
+ :kafka_client: 4.1.0
5
5
 
6
6
  ///////////////////////////////////////////
7
7
  START - GENERATED VARIABLES, DO NOT EDIT!
@@ -2,8 +2,8 @@
2
2
  :plugin: kafka
3
3
  :type: input
4
4
  :default_codec: plain
5
- :kafka_client: 3.9.1
6
- :kafka_client_doc: 39
5
+ :kafka_client: 4.1.0
6
+ :kafka_client_doc: 41
7
7
 
8
8
  ///////////////////////////////////////////
9
9
  START - GENERATED VARIABLES, DO NOT EDIT!
@@ -132,6 +132,7 @@ See the https://kafka.apache.org/{kafka_client_doc}/documentation for more detai
132
132
  | <<plugins-{type}s-{plugin}-fetch_min_bytes>> |<<number,number>>|No
133
133
  | <<plugins-{type}s-{plugin}-group_id>> |<<string,string>>|No
134
134
  | <<plugins-{type}s-{plugin}-group_instance_id>> |<<string,string>>|No
135
+ | <<plugins-{type}s-{plugin}-group_protocol>> |<<string,string>>|No
135
136
  | <<plugins-{type}s-{plugin}-heartbeat_interval_ms>> |<<number,number>>|No
136
137
  | <<plugins-{type}s-{plugin}-isolation_level>> |<<string,string>>|No
137
138
  | <<plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No
@@ -412,6 +413,21 @@ You can set this value to use information such as a hostname, an IP, or anything
412
413
  NOTE: In cases when multiple threads are configured and `consumer_threads` is greater than one, a suffix is appended to
413
414
  the `group_instance_id` to avoid collisions.
414
415
 
416
+ [id="plugins-{type}s-{plugin}-group_protocol"]
417
+ ===== `group_protocol`
418
+
419
+ * Value can be either of: `classic`, `consumer`
420
+ * Default value is `classic`.
421
+
422
+ Specifies the consumer group rebalance protocol used by the Kafka client.
423
+
424
+ `classic` is the default protocol. During a rebalance, all consumer instances pause message processing until partition assignments are complete.
425
+
426
+ `consumer` is an incremental rebalance protocol introduced in Kafka 4. It avoids global synchronization barriers by only pausing partitions that are reassigned.
427
+ When using `consumer`, the following settings **cannot be configured**:
428
+ `partition_assignment_strategy`, `heartbeat_interval_ms`, and `session_timeout_ms`.
429
+
430
+
415
431
  [id="plugins-{type}s-{plugin}-heartbeat_interval_ms"]
416
432
  ===== `heartbeat_interval_ms`
417
433
 
@@ -2,8 +2,8 @@
2
2
  :plugin: kafka
3
3
  :type: output
4
4
  :default_codec: plain
5
- :kafka_client: 3.9.1
6
- :kafka_client_doc: 39
5
+ :kafka_client: 4.1.0
6
+ :kafka_client_doc: 41
7
7
 
8
8
  ///////////////////////////////////////////
9
9
  START - GENERATED VARIABLES, DO NOT EDIT!
@@ -285,7 +285,7 @@ Serializer class for the key of the message
285
285
  ===== `linger_ms`
286
286
 
287
287
  * Value type is <<number,number>>
288
- * Default value is `0`
288
+ * Default value is `5`
289
289
 
290
290
  The producer groups together any records that arrive in between request
291
291
  transmissions into a single batched request. Normally this occurs only under
@@ -351,13 +351,9 @@ The max time in milliseconds before a metadata refresh is forced.
351
351
 
352
352
  By not setting this value, the plugin uses the built-in partitioning strategy provided by the Kafka client. Read more about the "partitioner.class" on the Kafka documentation.
353
353
 
354
- Available options are as follows:
354
+ Available option is as follows:
355
355
 
356
- * `default` hashes the `message_key` of an event to get the partition. When no message key is present, the plugin picks a partition in a round-robin fashion. Please note that this is a different strategy than the one used when `partitioner` is left unset.
357
356
  * `round_robin` distributes writes to all partitions equally, regardless of `message_key`
358
- * `uniform_sticky` hashes the `message_key` of an event to get the partition. When no message key is present, the plugin sticks to a partition for the duration of a batch than randomly picks a new one.
359
-
360
- NOTE: `default` and `uniform_sticky` are deprecated and will be removed in `12.0.0`.
361
357
 
362
358
  [id="plugins-{type}s-{plugin}-receive_buffer_bytes"]
363
359
  ===== `receive_buffer_bytes`
@@ -129,6 +129,12 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
129
129
  # consumer crated by each thread an artificial suffix is appended to the user provided `group_instance_id`
130
130
  # to avoid clashing.
131
131
  config :group_instance_id, :validate => :string
132
+ # `classic` is the "stop-the-world" rebalances.
133
+ # Any consumer restart or failure triggers a full-group rebalance, pausing processing for all consumers.
134
+ # `consumer` is an incremental rebalance protocol that avoids global sync barriers,
135
+ # pausing only the partitions that are reassigned.
136
+ # It cannot set along with `partition_assignment_strategy`, `heartbeat_interval_ms` and `session_timeout_ms`
137
+ config :group_protocol, :validate => ["classic", "consumer"], :default => "classic"
132
138
  # The expected time between heartbeats to the consumer coordinator. Heartbeats are used to ensure
133
139
  # that the consumer's session stays active and to facilitate rebalancing when new
134
140
  # consumers join or leave the group. The value must be set lower than
@@ -293,6 +299,8 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
293
299
  reassign_dns_lookup
294
300
  @pattern ||= java.util.regex.Pattern.compile(@topics_pattern) unless @topics_pattern.nil?
295
301
  check_schema_registry_parameters
302
+
303
+ set_group_protocol!
296
304
  end
297
305
 
298
306
  METADATA_NONE = Set[].freeze
@@ -450,6 +458,7 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
450
458
  props.put(kafka::FETCH_MIN_BYTES_CONFIG, fetch_min_bytes.to_s) unless fetch_min_bytes.nil?
451
459
  props.put(kafka::GROUP_ID_CONFIG, group_id)
452
460
  props.put(kafka::GROUP_INSTANCE_ID_CONFIG, group_instance_id) unless group_instance_id.nil?
461
+ props.put(kafka::GROUP_PROTOCOL_CONFIG, group_protocol)
453
462
  props.put(kafka::HEARTBEAT_INTERVAL_MS_CONFIG, heartbeat_interval_ms.to_s) unless heartbeat_interval_ms.nil?
454
463
  props.put(kafka::ISOLATION_LEVEL_CONFIG, isolation_level)
455
464
  props.put(kafka::KEY_DESERIALIZER_CLASS_CONFIG, key_deserializer_class)
@@ -471,7 +480,7 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
471
480
  props.put("security.protocol", security_protocol) unless security_protocol.nil?
472
481
  if schema_registry_url
473
482
  props.put(kafka::VALUE_DESERIALIZER_CLASS_CONFIG, Java::io.confluent.kafka.serializers.KafkaAvroDeserializer.java_class)
474
- serdes_config = Java::io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig
483
+ serdes_config = Java::io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig
475
484
  props.put(serdes_config::SCHEMA_REGISTRY_URL_CONFIG, schema_registry_url.uri.to_s)
476
485
  if schema_registry_proxy && !schema_registry_proxy.empty?
477
486
  props.put(serdes_config::PROXY_HOST, @schema_registry_proxy_host)
@@ -513,6 +522,25 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
513
522
  end
514
523
  end
515
524
 
525
+ # In order to use group_protocol => consumer, heartbeat_interval_ms, session_timeout_ms and partition_assignment_strategy need to be unset
526
+ # If any of these are not using the default value of the plugin, we raise a configuration error
527
+ def set_group_protocol!
528
+ return unless group_protocol == "consumer"
529
+
530
+ heartbeat_overridden = heartbeat_interval_ms != self.class.get_config.dig("heartbeat_interval_ms", :default)
531
+ session_overridden = session_timeout_ms != self.class.get_config.dig("session_timeout_ms", :default)
532
+ strategy_defined = !partition_assignment_strategy.nil?
533
+
534
+ if strategy_defined || heartbeat_overridden || session_overridden
535
+ raise LogStash::ConfigurationError, "group_protocol cannot be set to 'consumer' "\
536
+ "when any of partition_assignment_strategy, heartbeat_interval_ms or session_timeout_ms is set"
537
+ end
538
+
539
+ @heartbeat_interval_ms = nil
540
+ @session_timeout_ms = nil
541
+ logger.debug("Settings 'heartbeat_interval_ms' and 'session_timeout_ms' have been reset since 'group_protocol' is configured as 'consumer'")
542
+ end
543
+
516
544
  def partition_assignment_strategy_class
517
545
  case partition_assignment_strategy
518
546
  when 'range'
@@ -101,7 +101,7 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
101
101
  # This setting accomplishes this by adding a small amount of artificial delay—that is,
102
102
  # rather than immediately sending out a record the producer will wait for up to the given delay
103
103
  # to allow other records to be sent so that the sends can be batched together.
104
- config :linger_ms, :validate => :number, :default => 0 # Kafka default
104
+ config :linger_ms, :validate => :number, :default => 5 # Kafka default
105
105
  # The maximum size of a request
106
106
  config :max_request_size, :validate => :number, :default => 1_048_576 # (1MB) Kafka default
107
107
  # The key for the message
@@ -110,7 +110,7 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
110
110
  config :message_headers, :validate => :hash, :default => {}
111
111
  # the timeout setting for initial metadata request to fetch topic metadata.
112
112
  config :metadata_fetch_timeout_ms, :validate => :number, :default => 60_000
113
- # Partitioner to use - can be `default`, `uniform_sticky`, `round_robin` or a fully qualified class name of a custom partitioner.
113
+ # Partitioner to use - can be `round_robin` or a fully qualified class name of a custom partitioner.
114
114
  config :partitioner, :validate => :string
115
115
  # The size of the TCP receive buffer to use when reading data
116
116
  config :receive_buffer_bytes, :validate => :number, :default => 32_768 # (32KB) Kafka default
@@ -369,8 +369,8 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
369
369
  props.put(kafka::LINGER_MS_CONFIG, linger_ms.to_s)
370
370
  props.put(kafka::MAX_REQUEST_SIZE_CONFIG, max_request_size.to_s)
371
371
  props.put(kafka::METADATA_MAX_AGE_CONFIG, metadata_max_age_ms.to_s) unless metadata_max_age_ms.nil?
372
- unless partitioner.nil?
373
- props.put(kafka::PARTITIONER_CLASS_CONFIG, partitioner = partitioner_class)
372
+ partitioner_class&.tap do |partitioner|
373
+ props.put(kafka::PARTITIONER_CLASS_CONFIG, partitioner)
374
374
  logger.debug('producer configured using partitioner', :partitioner_class => partitioner)
375
375
  end
376
376
  props.put(kafka::RECEIVE_BUFFER_CONFIG, receive_buffer_bytes.to_s) unless receive_buffer_bytes.nil?
@@ -405,27 +405,12 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
405
405
  end
406
406
 
407
407
  def partitioner_class
408
- case partitioner
409
- when 'round_robin'
410
- 'org.apache.kafka.clients.producer.RoundRobinPartitioner'
411
- when 'uniform_sticky'
412
- log_partitioner_warning(partitioner, 'UniformStickyPartitioner')
413
- 'org.apache.kafka.clients.producer.UniformStickyPartitioner'
414
- when 'default'
415
- log_partitioner_warning(partitioner, 'DefaultPartitioner')
416
- 'org.apache.kafka.clients.producer.internals.DefaultPartitioner'
417
- else
418
- unless partitioner.index('.')
419
- raise LogStash::ConfigurationError, "unsupported partitioner: #{partitioner.inspect}"
420
- end
421
- partitioner # assume a fully qualified class-name
422
- end
423
- end
408
+ return nil if partitioner.nil?
409
+ return 'org.apache.kafka.clients.producer.RoundRobinPartitioner' if partitioner == 'round_robin'
410
+
411
+ raise LogStash::ConfigurationError, "unsupported partitioner: #{partitioner.inspect}" unless partitioner.include?('.')
424
412
 
425
- def log_partitioner_warning(partitioner, class_name)
426
- deprecation_logger.deprecated("Producer `partitioner` is configured with the deprecated option `#{partitioner}`. " \
427
- "#{class_name} is removed in kafka-client 4.0 and the `#{partitioner}` option will be removed in the plugin 12.0.0. "\
428
- 'Please update your configuration to use `round_robin` or unset the option to use the build-in partitioning strategy. ')
413
+ partitioner
429
414
  end
430
415
 
431
416
  end #class LogStash::Outputs::Kafka
@@ -1,12 +1,13 @@
1
1
  # AUTOGENERATED BY THE GRADLE SCRIPT. DO NOT EDIT.
2
2
 
3
3
  require 'jar_dependencies'
4
- require_jar('io.confluent', 'kafka-avro-serializer', '7.9.1')
5
- require_jar('io.confluent', 'kafka-schema-serializer', '7.9.1')
4
+ require_jar('io.confluent', 'kafka-avro-serializer', '8.0.0')
5
+ require_jar('io.confluent', 'kafka-schema-serializer', '8.0.0')
6
6
  require_jar('org.apache.avro', 'avro', '1.11.4')
7
- require_jar('io.confluent', 'kafka-schema-registry-client', '7.9.1')
8
- require_jar('org.apache.kafka', 'kafka-clients', '3.9.1')
7
+ require_jar('io.confluent', 'kafka-schema-registry-client', '8.0.0')
8
+ require_jar('com.fasterxml.jackson.datatype', 'jackson-datatype-jdk8', '2.16.0')
9
+ require_jar('org.apache.kafka', 'kafka-clients', '4.1.0')
9
10
  require_jar('org.slf4j', 'slf4j-api', '1.7.36')
10
- require_jar('com.github.luben', 'zstd-jni', '1.5.6-8')
11
+ require_jar('com.github.luben', 'zstd-jni', '1.5.6-10')
11
12
  require_jar('org.lz4', 'lz4-java', '1.8.0')
12
13
  require_jar('org.xerial.snappy', 'snappy-java', '1.1.10.7')
@@ -1,6 +1,6 @@
1
1
  Gem::Specification.new do |s|
2
2
  s.name = 'logstash-integration-kafka'
3
- s.version = '11.8.0'
3
+ s.version = '12.0.0'
4
4
  s.licenses = ['Apache-2.0']
5
5
  s.summary = "Integration with Kafka - input and output plugins"
6
6
  s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline "+
@@ -1,5 +1,5 @@
1
1
  SchemaRegistry-Props {
2
- org.eclipse.jetty.jaas.spi.PropertyFileLoginModule required
2
+ org.eclipse.jetty.security.jaas.spi.PropertyFileLoginModule required
3
3
  file="build/confluent_platform/etc/schema-registry/pwd"
4
4
  debug="true";
5
5
  };
@@ -0,0 +1,5 @@
1
+ SchemaRegistry-Props {
2
+ org.eclipse.jetty.jaas.spi.PropertyFileLoginModule required
3
+ file="build/confluent_platform/etc/schema-registry/pwd"
4
+ debug="true";
5
+ };
@@ -187,6 +187,83 @@ describe "inputs/kafka", :integration => true do
187
187
  end
188
188
  end
189
189
 
190
+ context 'setting group_protocol' do
191
+ let(:test_topic) { 'logstash_integration_partitioner_topic' }
192
+ let(:consumer_config) do
193
+ plain_config.merge(
194
+ "topics" => [test_topic],
195
+ 'group_protocol' => group_protocol,
196
+ "partition_assignment_strategy" => partition_assignment_strategy,
197
+ "heartbeat_interval_ms" => heartbeat_interval_ms,
198
+ "session_timeout_ms" => session_timeout_ms
199
+ )
200
+ end
201
+ let(:group_protocol) { nil }
202
+ let(:partition_assignment_strategy) { nil }
203
+ let(:heartbeat_interval_ms) { LogStash::Inputs::Kafka.get_config().dig("heartbeat_interval_ms", :default) }
204
+ let(:session_timeout_ms) { LogStash::Inputs::Kafka.get_config().dig("session_timeout_ms", :default) }
205
+
206
+ describe "group_protocol = classic" do
207
+ let(:group_protocol) { 'classic' }
208
+
209
+ it 'passes register check' do
210
+ kafka_input = LogStash::Inputs::Kafka.new(consumer_config)
211
+ expect {
212
+ kafka_input.register
213
+ }.to_not raise_error
214
+
215
+ expect( kafka_input.instance_variable_get(:@heartbeat_interval_ms)).eql?(heartbeat_interval_ms)
216
+ expect( kafka_input.instance_variable_get(:@session_timeout_ms)).eql?(session_timeout_ms)
217
+ end
218
+ end
219
+
220
+ describe "group_protocol = consumer" do
221
+ let(:group_protocol) { 'consumer' }
222
+
223
+ describe "passes register check with supported config" do
224
+ it 'reset unsupported config to nil' do
225
+ kafka_input = LogStash::Inputs::Kafka.new(consumer_config)
226
+ expect {
227
+ kafka_input.register
228
+ }.to_not raise_error
229
+
230
+ expect( kafka_input.instance_variable_get(:@heartbeat_interval_ms)).to be_nil
231
+ expect( kafka_input.instance_variable_get(:@session_timeout_ms)).to be_nil
232
+ end
233
+ end
234
+
235
+ {
236
+ partition_assignment_strategy: 'range',
237
+ heartbeat_interval_ms: 2000,
238
+ session_timeout_ms: 6000
239
+ }.each do |config_key, config_value|
240
+ context "with unsupported config #{config_key}" do
241
+ let(config_key) { config_value }
242
+
243
+ it 'raises LogStash::ConfigurationError' do
244
+ kafka_input = LogStash::Inputs::Kafka.new(consumer_config)
245
+ expect {
246
+ kafka_input.register
247
+ }.to raise_error(LogStash::ConfigurationError, /group_protocol cannot be set to.*consumer.*/)
248
+ end
249
+ end
250
+ end
251
+
252
+ context "with valid config" do
253
+ let(:test_topic) { 'logstash_integration_topic_plain' }
254
+ let(:manual_commit_config) do
255
+ consumer_config.merge(
256
+ 'enable_auto_commit' => 'false'
257
+ )
258
+ end
259
+ it 'consume data' do
260
+ queue = consume_messages(manual_commit_config, timeout: timeout_seconds, event_count: num_events)
261
+ expect(queue.length).to eq(num_events)
262
+ end
263
+ end
264
+ end
265
+ end
266
+
190
267
  context "static membership 'group.instance.id' setting" do
191
268
  let(:base_config) do
192
269
  {
@@ -486,7 +563,7 @@ describe "Deserializing with the schema registry", :integration => true do
486
563
  def delete_topic_if_exists(topic_name, user = nil, password = nil)
487
564
  props = java.util.Properties.new
488
565
  props.put(Java::org.apache.kafka.clients.admin.AdminClientConfig::BOOTSTRAP_SERVERS_CONFIG, "localhost:9092")
489
- serdes_config = Java::io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig
566
+ serdes_config = Java::io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig
490
567
  unless user.nil?
491
568
  props.put(serdes_config::BASIC_AUTH_CREDENTIALS_SOURCE, 'USER_INFO')
492
569
  props.put(serdes_config::USER_INFO_CONFIG, "#{user}:#{password}")
@@ -495,7 +572,7 @@ describe "Deserializing with the schema registry", :integration => true do
495
572
  topics_list = admin_client.listTopics().names().get()
496
573
  if topics_list.contains(topic_name)
497
574
  result = admin_client.deleteTopics([topic_name])
498
- result.values.get(topic_name).get()
575
+ result.topicNameValues().get(topic_name).get()
499
576
  end
500
577
  end
501
578
 
@@ -503,7 +580,7 @@ describe "Deserializing with the schema registry", :integration => true do
503
580
  props = java.util.Properties.new
504
581
  config = org.apache.kafka.clients.producer.ProducerConfig
505
582
 
506
- serdes_config = Java::io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig
583
+ serdes_config = Java::io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig
507
584
  props.put(serdes_config::SCHEMA_REGISTRY_URL_CONFIG, "http://localhost:8081")
508
585
 
509
586
  props.put(config::BOOTSTRAP_SERVERS_CONFIG, "localhost:9092")
@@ -164,7 +164,7 @@ describe "outputs/kafka", :integration => true do
164
164
  let(:test_topic) { 'logstash_integration_topic3' }
165
165
 
166
166
  before :each do
167
- config = base_config.merge("topic_id" => test_topic, "partitioner" => 'org.apache.kafka.clients.producer.UniformStickyPartitioner')
167
+ config = base_config.merge("topic_id" => test_topic, "partitioner" => 'org.apache.kafka.clients.producer.RoundRobinPartitioner')
168
168
  load_kafka_data(config) do # let's have a bit more (diverse) dataset
169
169
  num_events.times.collect do
170
170
  LogStash::Event.new.tap do |e|
@@ -212,7 +212,7 @@ describe "outputs/kafka", :integration => true do
212
212
 
213
213
  context 'setting partitioner' do
214
214
  let(:test_topic) { 'logstash_integration_partitioner_topic' }
215
- let(:partitioner) { nil }
215
+ let(:partitioner) { 'round_robin' }
216
216
 
217
217
  before :each do
218
218
  @messages_offset = fetch_messages_from_all_partitions
@@ -221,13 +221,8 @@ describe "outputs/kafka", :integration => true do
221
221
  load_kafka_data(config)
222
222
  end
223
223
 
224
- [ 'default', 'round_robin', 'uniform_sticky' ].each do |partitioner|
225
- describe partitioner do
226
- let(:partitioner) { partitioner }
227
- it 'loads data' do
228
- expect(fetch_messages_from_all_partitions - @messages_offset).to eql num_events
229
- end
230
- end
224
+ it 'loads data' do
225
+ expect(fetch_messages_from_all_partitions - @messages_offset).to eql num_events
231
226
  end
232
227
 
233
228
  def fetch_messages_from_all_partitions
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-integration-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 11.8.0
4
+ version: 12.0.0
5
5
  platform: java
6
6
  authors:
7
7
  - Elastic
@@ -235,6 +235,7 @@ files:
235
235
  - logstash-integration-kafka.gemspec
236
236
  - spec/check_docs_spec.rb
237
237
  - spec/fixtures/jaas.config
238
+ - spec/fixtures/jaas3.config
238
239
  - spec/fixtures/pwd
239
240
  - spec/fixtures/trust-store_stub.jks
240
241
  - spec/integration/inputs/kafka_spec.rb
@@ -242,12 +243,13 @@ files:
242
243
  - spec/unit/inputs/avro_schema_fixture_payment.asvc
243
244
  - spec/unit/inputs/kafka_spec.rb
244
245
  - spec/unit/outputs/kafka_spec.rb
245
- - vendor/jar-dependencies/com/github/luben/zstd-jni/1.5.6-8/zstd-jni-1.5.6-8.jar
246
- - vendor/jar-dependencies/io/confluent/kafka-avro-serializer/7.9.1/kafka-avro-serializer-7.9.1.jar
247
- - vendor/jar-dependencies/io/confluent/kafka-schema-registry-client/7.9.1/kafka-schema-registry-client-7.9.1.jar
248
- - vendor/jar-dependencies/io/confluent/kafka-schema-serializer/7.9.1/kafka-schema-serializer-7.9.1.jar
246
+ - vendor/jar-dependencies/com/fasterxml/jackson/datatype/jackson-datatype-jdk8/2.16.0/jackson-datatype-jdk8-2.16.0.jar
247
+ - vendor/jar-dependencies/com/github/luben/zstd-jni/1.5.6-10/zstd-jni-1.5.6-10.jar
248
+ - vendor/jar-dependencies/io/confluent/kafka-avro-serializer/8.0.0/kafka-avro-serializer-8.0.0.jar
249
+ - vendor/jar-dependencies/io/confluent/kafka-schema-registry-client/8.0.0/kafka-schema-registry-client-8.0.0.jar
250
+ - vendor/jar-dependencies/io/confluent/kafka-schema-serializer/8.0.0/kafka-schema-serializer-8.0.0.jar
249
251
  - vendor/jar-dependencies/org/apache/avro/avro/1.11.4/avro-1.11.4.jar
250
- - vendor/jar-dependencies/org/apache/kafka/kafka-clients/3.9.1/kafka-clients-3.9.1.jar
252
+ - vendor/jar-dependencies/org/apache/kafka/kafka-clients/4.1.0/kafka-clients-4.1.0.jar
251
253
  - vendor/jar-dependencies/org/lz4/lz4-java/1.8.0/lz4-java-1.8.0.jar
252
254
  - vendor/jar-dependencies/org/slf4j/slf4j-api/1.7.36/slf4j-api-1.7.36.jar
253
255
  - vendor/jar-dependencies/org/xerial/snappy/snappy-java/1.1.10.7/snappy-java-1.1.10.7.jar
@@ -279,6 +281,7 @@ summary: Integration with Kafka - input and output plugins
279
281
  test_files:
280
282
  - spec/check_docs_spec.rb
281
283
  - spec/fixtures/jaas.config
284
+ - spec/fixtures/jaas3.config
282
285
  - spec/fixtures/pwd
283
286
  - spec/fixtures/trust-store_stub.jks
284
287
  - spec/integration/inputs/kafka_spec.rb