logstash-integration-kafka 11.6.4-java → 11.7.0-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 24a6a245c978681eeadad520b621e5f7a85bdbc21ba73917f3e70cf7a043c74f
4
- data.tar.gz: 3ab1acf046db153809a1fee3ecc12129bf19545a2519e868bb2d90e7f394acaa
3
+ metadata.gz: 73e5319cf0e521282ced1615e3cb864ae58fd4195b39a67e42ff995cb22d5004
4
+ data.tar.gz: 380f7e07b99231ae047fc8545e7be56132f92b7d8179ae97a1f7f95c49a01033
5
5
  SHA512:
6
- metadata.gz: 48acaae432ac06a78ecae3010c0b89285ae13315c1e2e08641c8b3d983bd1e4374f9563b0ec92b09763316c56cccbe6b80cf2d5aca6b0f95f60b9e35c4c1514e
7
- data.tar.gz: 1f35ff308f3d30afde4215b4ccf68683ba71218ab78860d0addc37fe82c74b480dc5c2be423d0abbc89e8ad43264b7e8c29d7b331fdf3c51a17466eb5abff3fa
6
+ metadata.gz: 88d9dcbab467d2dc1af0e79e3b76af0cfdb3e2398644f1d4337414965c67813ee6d6829478c3bce8d082bbde269e34c134867a0831d0c4235cf9855c388597cf
7
+ data.tar.gz: 840eb2502efc99a1517790f315bb4a181697345eb422d36e64f7cf4d577999415acc43c32174ce905e31d0cd5cc00bd8f1b16962e26c78617db8fef3397280dd
data/CHANGELOG.md CHANGED
@@ -1,3 +1,6 @@
1
+ ## 11.7.0
2
+ - Add `reconnect_backoff_max_ms` option for configuring kafka client [#204](https://github.com/logstash-plugins/logstash-integration-kafka/pull/204)
3
+
1
4
  ## 11.6.4
2
5
  - Display exception chain comes from kafka client [#200](https://github.com/logstash-plugins/logstash-integration-kafka/pull/200)
3
6
 
@@ -145,6 +145,7 @@ See the https://kafka.apache.org/{kafka_client_doc}/documentation for more detai
145
145
  | <<plugins-{type}s-{plugin}-poll_timeout_ms>> |<<number,number>>|No
146
146
  | <<plugins-{type}s-{plugin}-receive_buffer_bytes>> |<<number,number>>|No
147
147
  | <<plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<<number,number>>|No
148
+ | <<plugins-{type}s-{plugin}-reconnect_backoff_max_ms>> |<<number,number>>|No
148
149
  | <<plugins-{type}s-{plugin}-request_timeout_ms>> |<<number,number>>|No
149
150
  | <<plugins-{type}s-{plugin}-retry_backoff_ms>> |<<number,number>>|No
150
151
  | <<plugins-{type}s-{plugin}-sasl_client_callback_handler_class>> |<<string,string>>|No
@@ -561,6 +562,15 @@ The amount of time to wait before attempting to reconnect to a given host.
561
562
  This avoids repeatedly connecting to a host in a tight loop.
562
563
  This backoff applies to all requests sent by the consumer to the broker.
563
564
 
565
+ [id="plugins-{type}s-{plugin}-reconnect_backoff_max_ms"]
566
+ ===== `reconnect_backoff_max_ms`
567
+
568
+ * Value type is <<number,number>>
569
+ * Default value is `1000` milliseconds.
570
+
571
+ The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect.
572
+ If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum.
573
+
564
574
  [id="plugins-{type}s-{plugin}-request_timeout_ms"]
565
575
  ===== `request_timeout_ms`
566
576
 
@@ -115,6 +115,7 @@ See the https://kafka.apache.org/{kafka_client_doc}/documentation for more detai
115
115
  | <<plugins-{type}s-{plugin}-partitioner>> |<<string,string>>|No
116
116
  | <<plugins-{type}s-{plugin}-receive_buffer_bytes>> |<<number,number>>|No
117
117
  | <<plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<<number,number>>|No
118
+ | <<plugins-{type}s-{plugin}-reconnect_backoff_max_ms>> |<<number,number>>|No
118
119
  | <<plugins-{type}s-{plugin}-request_timeout_ms>> |<<number,number>>|No
119
120
  | <<plugins-{type}s-{plugin}-retries>> |<<number,number>>|No
120
121
  | <<plugins-{type}s-{plugin}-retry_backoff_ms>> |<<number,number>>|No
@@ -373,6 +374,15 @@ The size of the TCP receive buffer to use when reading data
373
374
 
374
375
  The amount of time to wait before attempting to reconnect to a given host when a connection fails.
375
376
 
377
+ [id="plugins-{type}s-{plugin}-reconnect_backoff_max_ms"]
378
+ ===== `reconnect_backoff_max_ms`
379
+
380
+ * Value type is <<number,number>>
381
+ * Default value is `1000`.
382
+
383
+ The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect.
384
+ If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum.
385
+
376
386
  [id="plugins-{type}s-{plugin}-request_timeout_ms"]
377
387
  ===== `request_timeout_ms`
378
388
 
@@ -167,6 +167,9 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
167
167
  # This avoids repeatedly connecting to a host in a tight loop.
168
168
  # This backoff applies to all connection attempts by the client to a broker.
169
169
  config :reconnect_backoff_ms, :validate => :number, :default => 50 # Kafka default
170
+ # The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect.
171
+ # If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum.
172
+ config :reconnect_backoff_max_ms, :validate => :number, :default => 1000 # Kafka default
170
173
  # The amount of time to wait before attempting to retry a failed fetch request
171
174
  # to a given topic partition. This avoids repeated fetching-and-failing in a tight loop.
172
175
  config :retry_backoff_ms, :validate => :number, :default => 100 # Kafka default
@@ -457,6 +460,7 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
457
460
  props.put(kafka::PARTITION_ASSIGNMENT_STRATEGY_CONFIG, partition_assignment_strategy_class) unless partition_assignment_strategy.nil?
458
461
  props.put(kafka::RECEIVE_BUFFER_CONFIG, receive_buffer_bytes.to_s) unless receive_buffer_bytes.nil?
459
462
  props.put(kafka::RECONNECT_BACKOFF_MS_CONFIG, reconnect_backoff_ms.to_s) unless reconnect_backoff_ms.nil?
463
+ props.put(kafka::RECONNECT_BACKOFF_MAX_MS_CONFIG, reconnect_backoff_max_ms.to_s) unless reconnect_backoff_max_ms.nil?
460
464
  props.put(kafka::REQUEST_TIMEOUT_MS_CONFIG, request_timeout_ms.to_s) unless request_timeout_ms.nil?
461
465
  props.put(kafka::RETRY_BACKOFF_MS_CONFIG, retry_backoff_ms.to_s) unless retry_backoff_ms.nil?
462
466
  props.put(kafka::SEND_BUFFER_CONFIG, send_buffer_bytes.to_s) unless send_buffer_bytes.nil?
@@ -116,6 +116,9 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
116
116
  config :receive_buffer_bytes, :validate => :number, :default => 32_768 # (32KB) Kafka default
117
117
  # The amount of time to wait before attempting to reconnect to a given host when a connection fails.
118
118
  config :reconnect_backoff_ms, :validate => :number, :default => 50 # Kafka default
119
+ # The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect.
120
+ # If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum.
121
+ config :reconnect_backoff_max_ms, :validate => :number, :default => 1000 # Kafka default
119
122
  # The default retry behavior is to retry until successful. To prevent data loss,
120
123
  # the use of this setting is discouraged.
121
124
  #
@@ -372,6 +375,7 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
372
375
  end
373
376
  props.put(kafka::RECEIVE_BUFFER_CONFIG, receive_buffer_bytes.to_s) unless receive_buffer_bytes.nil?
374
377
  props.put(kafka::RECONNECT_BACKOFF_MS_CONFIG, reconnect_backoff_ms.to_s) unless reconnect_backoff_ms.nil?
378
+ props.put(kafka::RECONNECT_BACKOFF_MAX_MS_CONFIG, reconnect_backoff_max_ms.to_s) unless reconnect_backoff_max_ms.nil?
375
379
  props.put(kafka::REQUEST_TIMEOUT_MS_CONFIG, request_timeout_ms.to_s) unless request_timeout_ms.nil?
376
380
  props.put(kafka::RETRIES_CONFIG, retries.to_s) unless retries.nil?
377
381
  props.put(kafka::RETRY_BACKOFF_MS_CONFIG, retry_backoff_ms.to_s)
@@ -1,6 +1,6 @@
1
1
  Gem::Specification.new do |s|
2
2
  s.name = 'logstash-integration-kafka'
3
- s.version = '11.6.4'
3
+ s.version = '11.7.0'
4
4
  s.licenses = ['Apache-2.0']
5
5
  s.summary = "Integration with Kafka - input and output plugins"
6
6
  s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline "+
@@ -378,11 +378,15 @@ describe LogStash::Inputs::Kafka do
378
378
  end
379
379
 
380
380
  context 'string integer config' do
381
- let(:config) { super().merge('session_timeout_ms' => '25000', 'max_poll_interval_ms' => '345000') }
381
+ let(:config) { super().merge('session_timeout_ms' => '25000',
382
+ 'max_poll_interval_ms' => '345000',
383
+ 'reconnect_backoff_max_ms' => '1500') }
382
384
 
383
385
  it "sets integer values" do
384
386
  expect(org.apache.kafka.clients.consumer.KafkaConsumer).
385
- to receive(:new).with(hash_including('session.timeout.ms' => '25000', 'max.poll.interval.ms' => '345000')).
387
+ to receive(:new).with(hash_including('session.timeout.ms' => '25000',
388
+ 'max.poll.interval.ms' => '345000',
389
+ 'reconnect.backoff.max.ms' => '1500')).
386
390
  and_return kafka_client = double('kafka-consumer')
387
391
 
388
392
  expect( subject.send(:create_consumer, 'sample_client-1', 'group_instance_id') ).to be kafka_client
@@ -390,11 +394,15 @@ describe LogStash::Inputs::Kafka do
390
394
  end
391
395
 
392
396
  context 'integer config' do
393
- let(:config) { super().merge('session_timeout_ms' => 25200, 'max_poll_interval_ms' => 123_000) }
397
+ let(:config) { super().merge('session_timeout_ms' => 25200,
398
+ 'max_poll_interval_ms' => 123_000,
399
+ 'reconnect_backoff_max_ms' => 1500) }
394
400
 
395
401
  it "sets integer values" do
396
402
  expect(org.apache.kafka.clients.consumer.KafkaConsumer).
397
- to receive(:new).with(hash_including('session.timeout.ms' => '25200', 'max.poll.interval.ms' => '123000')).
403
+ to receive(:new).with(hash_including('session.timeout.ms' => '25200',
404
+ 'max.poll.interval.ms' => '123000',
405
+ 'reconnect.backoff.max.ms' => '1500')).
398
406
  and_return kafka_client = double('kafka-consumer')
399
407
 
400
408
  expect( subject.send(:create_consumer, 'sample_client-2', 'group_instance_id') ).to be kafka_client
@@ -114,7 +114,7 @@ describe "outputs/kafka" do
114
114
  end
115
115
 
116
116
  context "when KafkaProducer#send() raises a non-retriable exception" do
117
- let(:failcount) { (rand * 10).to_i }
117
+ let(:failcount) { 3 }
118
118
 
119
119
  let(:exception_classes) { [
120
120
  org.apache.kafka.common.errors.SerializationException,
metadata CHANGED
@@ -1,13 +1,13 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-integration-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 11.6.4
4
+ version: 11.7.0
5
5
  platform: java
6
6
  authors:
7
7
  - Elastic
8
8
  bindir: bin
9
9
  cert_chain: []
10
- date: 2025-08-28 00:00:00.000000000 Z
10
+ date: 2025-10-03 00:00:00.000000000 Z
11
11
  dependencies:
12
12
  - !ruby/object:Gem::Dependency
13
13
  name: logstash-core-plugin-api