logstash-input-kafka 6.1.0 → 6.2.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: ca18508e563e481b0e21452577fd2ab764126df5
4
- data.tar.gz: f5d951ba0229244db1a08eae618c99caea32b9e9
3
+ metadata.gz: b03d2e1ba09c2b8d379222908f610340a86d495c
4
+ data.tar.gz: 31dce488f638fc6b32ef72c222979dfef845c033
5
5
  SHA512:
6
- metadata.gz: 92df3686bcac5a2056acaf6ae59fe6ee185174568250d298e1158678708ff9da47700b85600b301f31435c550411c6495d838499009fbc088da826aa62ff33ea
7
- data.tar.gz: 5675c2530563e407aec9bd3a15554b9644d4d9f393b6c5d9ac8e1b49985c27439415a4468704ff204890bef124fb92d88b09553a6e87ea569f0ef9cc5a47b64f
6
+ metadata.gz: 45bb9dba8b2a3d168c3466c47263cd084962e3e518872a33580856ee7dfd71ec7110693190ac5ef15328632dae9c7df28aa8d36985ccfc45498f3fece58295ae
7
+ data.tar.gz: 3fa72f812e3084d04c1f311d271f63a89637777b553f13b5563b53486fec728d1a975ed95a3a41b1bd191912335ff05fd5529113fe8cf2dc30b6c4261aafde1e
@@ -1,3 +1,7 @@
1
+ ## 6.2.0
2
+ - Expose config `max_poll_interval_ms` to allow consumer to send heartbeats from a background thread
3
+ - Expose config `fetch_max_bytes` to control client's fetch response size limit
4
+
1
5
  ## 6.1.0
2
6
  - Add Kerberos authentication support.
3
7
 
@@ -14,10 +14,10 @@ require 'logstash-input-kafka_jars.rb'
14
14
  # |==========================================================
15
15
  # |Kafka Client Version |Logstash Version |Plugin Version |Security Features |Why?
16
16
  # |0.8 |2.0.0 - 2.x.x |<3.0.0 | |Legacy, 0.8 is still popular
17
- # |0.9 |2.0.0 - 2.3.x | 3.x.x |Basic Auth, SSL |Works with the old Ruby Event API (`event['product']['price'] = 10`)
18
- # |0.9 |2.4.0 - 5.0.x | 4.x.x |Basic Auth, SSL |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`)
19
- # |0.10.0.x |2.4.0 - 5.0.x | 5.x.x |Basic Auth, SSL |Not compatible with the <= 0.9 broker
20
- # |0.10.1.x |2.4.0 - 5.0.x | 6.x.x |Basic Auth, SSL |Not compatible with <= 0.10.0.x broker
17
+ # |0.9 |2.0.0 - 2.3.x | 3.x.x |SSL |Works with the old Ruby Event API (`event['product']['price'] = 10`)
18
+ # |0.9 |2.4.0 - 5.0.x | 4.x.x |SSL |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`)
19
+ # |0.10.0.x |2.4.0 - 5.0.x | 5.x.x |SSL |Not compatible with the <= 0.9 broker
20
+ # |0.10.1.x |2.4.0 - 5.0.x | 6.x.x |SSL |Not compatible with <= 0.10.0.x broker
21
21
  # |==========================================================
22
22
  #
23
23
  # NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should
@@ -83,6 +83,10 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
83
83
  # Whether records from internal topics (such as offsets) should be exposed to the consumer.
84
84
  # If set to true the only way to receive records from an internal topic is subscribing to it.
85
85
  config :exclude_internal_topics, :validate => :string
86
+ # The maximum amount of data the server should return for a fetch request. This is not an
87
+ # absolute maximum, if the first message in the first non-empty partition of the fetch is larger
88
+ # than this value, the message will still be returned to ensure that the consumer can make progress.
89
+ config :fetch_max_bytes, :validate => :string
86
90
  # The maximum amount of time the server will block before answering the fetch request if
87
91
  # there isn't sufficient data to immediately satisfy `fetch_min_bytes`. This
88
92
  # should be less than or equal to the timeout used in `poll_timeout_ms`
@@ -103,6 +107,12 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
103
107
  config :heartbeat_interval_ms, :validate => :string
104
108
  # Java Class used to deserialize the record's key
105
109
  config :key_deserializer_class, :validate => :string, :default => "org.apache.kafka.common.serialization.StringDeserializer"
110
+ # The maximum delay between invocations of poll() when using consumer group management. This places
111
+ # an upper bound on the amount of time that the consumer can be idle before fetching more records.
112
+ # If poll() is not called before expiration of this timeout, then the consumer is considered failed and
113
+ # the group will rebalance in order to reassign the partitions to another member.
114
+ # The value of the configuration `request_timeout_ms` must always be larger than max_poll_interval_ms
115
+ config :max_poll_interval_ms, :validate => :string
106
116
  # The maximum amount of data per-partition the server will return. The maximum total memory used for a
107
117
  # request will be <code>#partitions * max.partition.fetch.bytes</code>. This size must be at least
108
118
  # as large as the maximum message size the server allows or else it is possible for the producer to
@@ -270,6 +280,7 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
270
280
  props.put(kafka::CONNECTIONS_MAX_IDLE_MS_CONFIG, connections_max_idle_ms) unless connections_max_idle_ms.nil?
271
281
  props.put(kafka::ENABLE_AUTO_COMMIT_CONFIG, enable_auto_commit)
272
282
  props.put(kafka::EXCLUDE_INTERNAL_TOPICS_CONFIG, exclude_internal_topics) unless exclude_internal_topics.nil?
283
+ props.put(kafka::FETCH_MAX_BYTES_CONFIG, fetch_max_bytes) unless fetch_max_bytes.nil?
273
284
  props.put(kafka::FETCH_MAX_WAIT_MS_CONFIG, fetch_max_wait_ms) unless fetch_max_wait_ms.nil?
274
285
  props.put(kafka::FETCH_MIN_BYTES_CONFIG, fetch_min_bytes) unless fetch_min_bytes.nil?
275
286
  props.put(kafka::GROUP_ID_CONFIG, group_id)
@@ -277,6 +288,7 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
277
288
  props.put(kafka::KEY_DESERIALIZER_CLASS_CONFIG, key_deserializer_class)
278
289
  props.put(kafka::MAX_PARTITION_FETCH_BYTES_CONFIG, max_partition_fetch_bytes) unless max_partition_fetch_bytes.nil?
279
290
  props.put(kafka::MAX_POLL_RECORDS_CONFIG, max_poll_records) unless max_poll_records.nil?
291
+ props.put(kafka::MAX_POLL_INTERVAL_MS_CONFIG, max_poll_interval_ms) unless max_poll_interval_ms.nil?
280
292
  props.put(kafka::METADATA_MAX_AGE_MS_CONFIG, metadata_max_age_ms) unless metadata_max_age_ms.nil?
281
293
  props.put(kafka::PARTITION_ASSIGNMENT_STRATEGY_CONFIG, partition_assignment_strategy) unless partition_assignment_strategy.nil?
282
294
  props.put(kafka::RECEIVE_BUFFER_CONFIG, receive_buffer_bytes) unless receive_buffer_bytes.nil?
@@ -1,6 +1,6 @@
1
1
  Gem::Specification.new do |s|
2
2
  s.name = 'logstash-input-kafka'
3
- s.version = '6.1.0'
3
+ s.version = '6.2.0'
4
4
  s.licenses = ['Apache License (2.0)']
5
5
  s.summary = 'This input will read events from a Kafka topic. It uses the high level consumer API provided by Kafka to read messages from the broker'
6
6
  s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program"
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-input-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 6.1.0
4
+ version: 6.2.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Elasticsearch
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2016-11-17 00:00:00.000000000 Z
11
+ date: 2016-12-01 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  requirement: !ruby/object:Gem::Requirement