logstash-integration-kafka 11.6.4-java → 11.8.0-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 24a6a245c978681eeadad520b621e5f7a85bdbc21ba73917f3e70cf7a043c74f
4
- data.tar.gz: 3ab1acf046db153809a1fee3ecc12129bf19545a2519e868bb2d90e7f394acaa
3
+ metadata.gz: 101a2c1fc70011cadd866f4aeca79850045a51272eac7d704e766a2e08353c52
4
+ data.tar.gz: e51c20830a9d22d4e049288a3ab1c152db33121a87a73d08d493f681df707fec
5
5
  SHA512:
6
- metadata.gz: 48acaae432ac06a78ecae3010c0b89285ae13315c1e2e08641c8b3d983bd1e4374f9563b0ec92b09763316c56cccbe6b80cf2d5aca6b0f95f60b9e35c4c1514e
7
- data.tar.gz: 1f35ff308f3d30afde4215b4ccf68683ba71218ab78860d0addc37fe82c74b480dc5c2be423d0abbc89e8ad43264b7e8c29d7b331fdf3c51a17466eb5abff3fa
6
+ metadata.gz: f17d5f0e28dff57741485e1d3aed9bca31cbf4a6b7a1b1950220cda6d2e8b23f0afbb29a9a3fa6797469941c8e116ab0fbff8bc6a6573f6b9b7a91d472a53323
7
+ data.tar.gz: 293e08920a1560bd5405479ea8313336fed652abd51f64038b2e7628a5978da486e7eb00fa2aa577b0f54b93bff90943051bb76509f597f18e2417a7a2fbb3d9
data/CHANGELOG.md CHANGED
@@ -1,3 +1,10 @@
1
+ ## 11.8.0
2
+ - Deprecate partitioner `default` and `uniform_sticky` options [#206](https://github.com/logstash-plugins/logstash-integration-kafka/pull/206)
3
+ Both options are deprecated in Kafka client 3 and will be removed in the plugin 12.0.0.
4
+
5
+ ## 11.7.0
6
+ - Add `reconnect_backoff_max_ms` option for configuring kafka client [#204](https://github.com/logstash-plugins/logstash-integration-kafka/pull/204)
7
+
1
8
  ## 11.6.4
2
9
  - Display exception chain comes from kafka client [#200](https://github.com/logstash-plugins/logstash-integration-kafka/pull/200)
3
10
 
@@ -145,6 +145,7 @@ See the https://kafka.apache.org/{kafka_client_doc}/documentation for more detai
145
145
  | <<plugins-{type}s-{plugin}-poll_timeout_ms>> |<<number,number>>|No
146
146
  | <<plugins-{type}s-{plugin}-receive_buffer_bytes>> |<<number,number>>|No
147
147
  | <<plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<<number,number>>|No
148
+ | <<plugins-{type}s-{plugin}-reconnect_backoff_max_ms>> |<<number,number>>|No
148
149
  | <<plugins-{type}s-{plugin}-request_timeout_ms>> |<<number,number>>|No
149
150
  | <<plugins-{type}s-{plugin}-retry_backoff_ms>> |<<number,number>>|No
150
151
  | <<plugins-{type}s-{plugin}-sasl_client_callback_handler_class>> |<<string,string>>|No
@@ -561,6 +562,15 @@ The amount of time to wait before attempting to reconnect to a given host.
561
562
  This avoids repeatedly connecting to a host in a tight loop.
562
563
  This backoff applies to all requests sent by the consumer to the broker.
563
564
 
565
+ [id="plugins-{type}s-{plugin}-reconnect_backoff_max_ms"]
566
+ ===== `reconnect_backoff_max_ms`
567
+
568
+ * Value type is <<number,number>>
569
+ * Default value is `1000` milliseconds.
570
+
571
+ The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect.
572
+ If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum.
573
+
564
574
  [id="plugins-{type}s-{plugin}-request_timeout_ms"]
565
575
  ===== `request_timeout_ms`
566
576
 
@@ -115,6 +115,7 @@ See the https://kafka.apache.org/{kafka_client_doc}/documentation for more detai
115
115
  | <<plugins-{type}s-{plugin}-partitioner>> |<<string,string>>|No
116
116
  | <<plugins-{type}s-{plugin}-receive_buffer_bytes>> |<<number,number>>|No
117
117
  | <<plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<<number,number>>|No
118
+ | <<plugins-{type}s-{plugin}-reconnect_backoff_max_ms>> |<<number,number>>|No
118
119
  | <<plugins-{type}s-{plugin}-request_timeout_ms>> |<<number,number>>|No
119
120
  | <<plugins-{type}s-{plugin}-retries>> |<<number,number>>|No
120
121
  | <<plugins-{type}s-{plugin}-retry_backoff_ms>> |<<number,number>>|No
@@ -348,14 +349,15 @@ The max time in milliseconds before a metadata refresh is forced.
348
349
  * Value type is <<string,string>>
349
350
  * There is no default value for this setting.
350
351
 
351
- The default behavior is to hash the `message_key` of an event to get the partition.
352
- When no message key is present, the plugin picks a partition in a round-robin fashion.
352
+ By not setting this value, the plugin uses the built-in partitioning strategy provided by the Kafka client. Read more about the "partitioner.class" on the Kafka documentation.
353
353
 
354
- Available options for choosing a partitioning strategy are as follows:
354
+ Available options are as follows:
355
355
 
356
- * `default` use the default partitioner as described above
356
+ * `default` hashes the `message_key` of an event to get the partition. When no message key is present, the plugin picks a partition in a round-robin fashion. Please note that this is a different strategy than the one used when `partitioner` is left unset.
357
357
  * `round_robin` distributes writes to all partitions equally, regardless of `message_key`
358
- * `uniform_sticky` sticks to a partition for the duration of a batch than randomly picks a new one
358
+ * `uniform_sticky` hashes the `message_key` of an event to get the partition. When no message key is present, the plugin sticks to a partition for the duration of a batch than randomly picks a new one.
359
+
360
+ NOTE: `default` and `uniform_sticky` are deprecated and will be removed in `12.0.0`.
359
361
 
360
362
  [id="plugins-{type}s-{plugin}-receive_buffer_bytes"]
361
363
  ===== `receive_buffer_bytes`
@@ -373,6 +375,15 @@ The size of the TCP receive buffer to use when reading data
373
375
 
374
376
  The amount of time to wait before attempting to reconnect to a given host when a connection fails.
375
377
 
378
+ [id="plugins-{type}s-{plugin}-reconnect_backoff_max_ms"]
379
+ ===== `reconnect_backoff_max_ms`
380
+
381
+ * Value type is <<number,number>>
382
+ * Default value is `1000`.
383
+
384
+ The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect.
385
+ If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum.
386
+
376
387
  [id="plugins-{type}s-{plugin}-request_timeout_ms"]
377
388
  ===== `request_timeout_ms`
378
389
 
@@ -167,6 +167,9 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
167
167
  # This avoids repeatedly connecting to a host in a tight loop.
168
168
  # This backoff applies to all connection attempts by the client to a broker.
169
169
  config :reconnect_backoff_ms, :validate => :number, :default => 50 # Kafka default
170
+ # The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect.
171
+ # If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum.
172
+ config :reconnect_backoff_max_ms, :validate => :number, :default => 1000 # Kafka default
170
173
  # The amount of time to wait before attempting to retry a failed fetch request
171
174
  # to a given topic partition. This avoids repeated fetching-and-failing in a tight loop.
172
175
  config :retry_backoff_ms, :validate => :number, :default => 100 # Kafka default
@@ -457,6 +460,7 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
457
460
  props.put(kafka::PARTITION_ASSIGNMENT_STRATEGY_CONFIG, partition_assignment_strategy_class) unless partition_assignment_strategy.nil?
458
461
  props.put(kafka::RECEIVE_BUFFER_CONFIG, receive_buffer_bytes.to_s) unless receive_buffer_bytes.nil?
459
462
  props.put(kafka::RECONNECT_BACKOFF_MS_CONFIG, reconnect_backoff_ms.to_s) unless reconnect_backoff_ms.nil?
463
+ props.put(kafka::RECONNECT_BACKOFF_MAX_MS_CONFIG, reconnect_backoff_max_ms.to_s) unless reconnect_backoff_max_ms.nil?
460
464
  props.put(kafka::REQUEST_TIMEOUT_MS_CONFIG, request_timeout_ms.to_s) unless request_timeout_ms.nil?
461
465
  props.put(kafka::RETRY_BACKOFF_MS_CONFIG, retry_backoff_ms.to_s) unless retry_backoff_ms.nil?
462
466
  props.put(kafka::SEND_BUFFER_CONFIG, send_buffer_bytes.to_s) unless send_buffer_bytes.nil?
@@ -116,6 +116,9 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
116
116
  config :receive_buffer_bytes, :validate => :number, :default => 32_768 # (32KB) Kafka default
117
117
  # The amount of time to wait before attempting to reconnect to a given host when a connection fails.
118
118
  config :reconnect_backoff_ms, :validate => :number, :default => 50 # Kafka default
119
+ # The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect.
120
+ # If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum.
121
+ config :reconnect_backoff_max_ms, :validate => :number, :default => 1000 # Kafka default
119
122
  # The default retry behavior is to retry until successful. To prevent data loss,
120
123
  # the use of this setting is discouraged.
121
124
  #
@@ -372,6 +375,7 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
372
375
  end
373
376
  props.put(kafka::RECEIVE_BUFFER_CONFIG, receive_buffer_bytes.to_s) unless receive_buffer_bytes.nil?
374
377
  props.put(kafka::RECONNECT_BACKOFF_MS_CONFIG, reconnect_backoff_ms.to_s) unless reconnect_backoff_ms.nil?
378
+ props.put(kafka::RECONNECT_BACKOFF_MAX_MS_CONFIG, reconnect_backoff_max_ms.to_s) unless reconnect_backoff_max_ms.nil?
375
379
  props.put(kafka::REQUEST_TIMEOUT_MS_CONFIG, request_timeout_ms.to_s) unless request_timeout_ms.nil?
376
380
  props.put(kafka::RETRIES_CONFIG, retries.to_s) unless retries.nil?
377
381
  props.put(kafka::RETRY_BACKOFF_MS_CONFIG, retry_backoff_ms.to_s)
@@ -405,8 +409,10 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
405
409
  when 'round_robin'
406
410
  'org.apache.kafka.clients.producer.RoundRobinPartitioner'
407
411
  when 'uniform_sticky'
412
+ log_partitioner_warning(partitioner, 'UniformStickyPartitioner')
408
413
  'org.apache.kafka.clients.producer.UniformStickyPartitioner'
409
414
  when 'default'
415
+ log_partitioner_warning(partitioner, 'DefaultPartitioner')
410
416
  'org.apache.kafka.clients.producer.internals.DefaultPartitioner'
411
417
  else
412
418
  unless partitioner.index('.')
@@ -416,4 +422,10 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
416
422
  end
417
423
  end
418
424
 
425
+ def log_partitioner_warning(partitioner, class_name)
426
+ deprecation_logger.deprecated("Producer `partitioner` is configured with the deprecated option `#{partitioner}`. " \
427
+ "#{class_name} is removed in kafka-client 4.0 and the `#{partitioner}` option will be removed in the plugin 12.0.0. "\
428
+ 'Please update your configuration to use `round_robin` or unset the option to use the build-in partitioning strategy. ')
429
+ end
430
+
419
431
  end #class LogStash::Outputs::Kafka
@@ -1,6 +1,6 @@
1
1
  Gem::Specification.new do |s|
2
2
  s.name = 'logstash-integration-kafka'
3
- s.version = '11.6.4'
3
+ s.version = '11.8.0'
4
4
  s.licenses = ['Apache-2.0']
5
5
  s.summary = "Integration with Kafka - input and output plugins"
6
6
  s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline "+
@@ -378,11 +378,15 @@ describe LogStash::Inputs::Kafka do
378
378
  end
379
379
 
380
380
  context 'string integer config' do
381
- let(:config) { super().merge('session_timeout_ms' => '25000', 'max_poll_interval_ms' => '345000') }
381
+ let(:config) { super().merge('session_timeout_ms' => '25000',
382
+ 'max_poll_interval_ms' => '345000',
383
+ 'reconnect_backoff_max_ms' => '1500') }
382
384
 
383
385
  it "sets integer values" do
384
386
  expect(org.apache.kafka.clients.consumer.KafkaConsumer).
385
- to receive(:new).with(hash_including('session.timeout.ms' => '25000', 'max.poll.interval.ms' => '345000')).
387
+ to receive(:new).with(hash_including('session.timeout.ms' => '25000',
388
+ 'max.poll.interval.ms' => '345000',
389
+ 'reconnect.backoff.max.ms' => '1500')).
386
390
  and_return kafka_client = double('kafka-consumer')
387
391
 
388
392
  expect( subject.send(:create_consumer, 'sample_client-1', 'group_instance_id') ).to be kafka_client
@@ -390,11 +394,15 @@ describe LogStash::Inputs::Kafka do
390
394
  end
391
395
 
392
396
  context 'integer config' do
393
- let(:config) { super().merge('session_timeout_ms' => 25200, 'max_poll_interval_ms' => 123_000) }
397
+ let(:config) { super().merge('session_timeout_ms' => 25200,
398
+ 'max_poll_interval_ms' => 123_000,
399
+ 'reconnect_backoff_max_ms' => 1500) }
394
400
 
395
401
  it "sets integer values" do
396
402
  expect(org.apache.kafka.clients.consumer.KafkaConsumer).
397
- to receive(:new).with(hash_including('session.timeout.ms' => '25200', 'max.poll.interval.ms' => '123000')).
403
+ to receive(:new).with(hash_including('session.timeout.ms' => '25200',
404
+ 'max.poll.interval.ms' => '123000',
405
+ 'reconnect.backoff.max.ms' => '1500')).
398
406
  and_return kafka_client = double('kafka-consumer')
399
407
 
400
408
  expect( subject.send(:create_consumer, 'sample_client-2', 'group_instance_id') ).to be kafka_client
@@ -114,7 +114,7 @@ describe "outputs/kafka" do
114
114
  end
115
115
 
116
116
  context "when KafkaProducer#send() raises a non-retriable exception" do
117
- let(:failcount) { (rand * 10).to_i }
117
+ let(:failcount) { 3 }
118
118
 
119
119
  let(:exception_classes) { [
120
120
  org.apache.kafka.common.errors.SerializationException,
metadata CHANGED
@@ -1,13 +1,13 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-integration-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 11.6.4
4
+ version: 11.8.0
5
5
  platform: java
6
6
  authors:
7
7
  - Elastic
8
8
  bindir: bin
9
9
  cert_chain: []
10
- date: 2025-08-28 00:00:00.000000000 Z
10
+ date: 2025-10-16 00:00:00.000000000 Z
11
11
  dependencies:
12
12
  - !ruby/object:Gem::Dependency
13
13
  name: logstash-core-plugin-api