logstash-integration-kafka 10.9.0-java → 10.10.0-java

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: df9c89cdfcc2db6702409ec45ffb8d9f8f2b9274304889df1527e0697ccfcf95
4
- data.tar.gz: e907ad2e277d27c8cdbe98ebd203af86a3505a85574d64ace4402607c075a69e
3
+ metadata.gz: 1033b1bc88694b441cc6b117c431792780093b27ead742684f4e903048ed54a5
4
+ data.tar.gz: e2a74687db7bba3ccc192a544142226dccb3b144bb11e5cfecd84dd4c26cbdf0
5
5
  SHA512:
6
- metadata.gz: bac93eb957af9028a6efc6e31a66c94818ae61333fa738daa6606abdd325dbea206fea2cd905d891f2b341a7bc983f8eaf5a5471015ac9548bc902f941b4a0d9
7
- data.tar.gz: 456739a2409ef5a42f007a23c8d0dbfceb3518e8e65b528c0f48266f2a219c2415a83a507fdab3ba028cbc5493d645c080ce191a0d39d7c1787557abded9a0e1
6
+ metadata.gz: b1e206f1bfbd4acbf6ca66d11f974c2116faf357da1212dfa740675dbce47ca1dea661fb0c185df687798d2f6a053dabb781d63d687cd981769d38de938c148a
7
+ data.tar.gz: ca5f79ea95cd3901b1f47b06e9465a4962c4aa585cb1441254a916fe38a5603c98b87178eb85883caac5c28215b0e42de39c1d810e742d79d33bb8c3df82501b
data/CHANGELOG.md CHANGED
@@ -1,3 +1,7 @@
1
+ ## 10.10.0
2
+
3
+ - Added config setting to enable 'zstd' compression in the Kafka output [#112](https://github.com/logstash-plugins/logstash-integration-kafka/pull/112)
4
+
1
5
  ## 10.9.0
2
6
  - Refactor: leverage codec when using schema registry [#106](https://github.com/logstash-plugins/logstash-integration-kafka/pull/106)
3
7
 
data/DEVELOPER.md CHANGED
@@ -62,7 +62,7 @@ See http://kafka.apache.org/documentation.html#producerconfigs for details about
62
62
  kafka {
63
63
  topic_id => ... # string (required), The topic to produce the messages to
64
64
  broker_list => ... # string (optional), default: "localhost:9092", This is for bootstrapping and the producer will only use it for getting metadata
65
- compression_codec => ... # string (optional), one of ["none", "gzip", "snappy"], default: "none"
65
+ compression_codec => ... # string (optional), one of ["none", "gzip", "snappy", "lz4", "zstd"], default: "none"
66
66
  compressed_topics => ... # string (optional), default: "", This parameter allows you to set whether compression should be turned on for particular
67
67
  request_required_acks => ... # number (optional), one of [-1, 0, 1], default: 0, This value controls when a produce request is considered completed
68
68
  serializer_class => ... # string, (optional) default: "kafka.serializer.StringEncoder", The serializer class for messages. The default encoder takes a byte[] and returns the same byte[]
@@ -84,7 +84,7 @@ See the https://kafka.apache.org/{kafka_client_doc}/documentation for more detai
84
84
  | <<plugins-{type}s-{plugin}-buffer_memory>> |<<number,number>>|No
85
85
  | <<plugins-{type}s-{plugin}-client_dns_lookup>> |<<string,string>>|No
86
86
  | <<plugins-{type}s-{plugin}-client_id>> |<<string,string>>|No
87
- | <<plugins-{type}s-{plugin}-compression_type>> |<<string,string>>, one of `["none", "gzip", "snappy", "lz4"]`|No
87
+ | <<plugins-{type}s-{plugin}-compression_type>> |<<string,string>>, one of `["none", "gzip", "snappy", "lz4", "zstd"]`|No
88
88
  | <<plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No
89
89
  | <<plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No
90
90
  | <<plugins-{type}s-{plugin}-key_serializer>> |<<string,string>>|No
@@ -193,11 +193,11 @@ ip/port by allowing a logical application name to be included with the request
193
193
  [id="plugins-{type}s-{plugin}-compression_type"]
194
194
  ===== `compression_type`
195
195
 
196
- * Value can be any of: `none`, `gzip`, `snappy`, `lz4`
196
+ * Value can be any of: `none`, `gzip`, `snappy`, `lz4`, `zstd`
197
197
  * Default value is `"none"`
198
198
 
199
199
  The compression type for all data generated by the producer.
200
- The default is none (i.e. no compression). Valid values are none, gzip, snappy, or lz4.
200
+ The default is none (meaning no compression). Valid values are none, gzip, snappy, lz4, or zstd.
201
201
 
202
202
  [id="plugins-{type}s-{plugin}-jaas_path"]
203
203
  ===== `jaas_path`
@@ -80,8 +80,8 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
80
80
  # The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
81
81
  config :buffer_memory, :validate => :number, :default => 33_554_432 # (32M) Kafka default
82
82
  # The compression type for all data generated by the producer.
83
- # The default is none (i.e. no compression). Valid values are none, gzip, or snappy.
84
- config :compression_type, :validate => ["none", "gzip", "snappy", "lz4"], :default => "none"
83
+ # The default is none (i.e. no compression). Valid values are none, gzip, snappy, lz4 or zstd.
84
+ config :compression_type, :validate => ["none", "gzip", "snappy", "lz4", "zstd"], :default => "none"
85
85
  # How DNS lookups should be done. If set to `use_all_dns_ips`, when the lookup returns multiple
86
86
  # IP addresses for a hostname, they will all be attempted to connect to before failing the
87
87
  # connection. If the value is `resolve_canonical_bootstrap_servers_only` each entry will be
@@ -1,6 +1,6 @@
1
1
  Gem::Specification.new do |s|
2
2
  s.name = 'logstash-integration-kafka'
3
- s.version = '10.9.0'
3
+ s.version = '10.10.0'
4
4
  s.licenses = ['Apache-2.0']
5
5
  s.summary = "Integration with Kafka - input and output plugins"
6
6
  s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline "+
@@ -139,6 +139,25 @@ describe "outputs/kafka", :integration => true do
139
139
  # end
140
140
  end
141
141
 
142
+ context 'when using zstd compression' do
143
+ let(:test_topic) { 'logstash_integration_zstd_topic' }
144
+
145
+ before :each do
146
+ config = base_config.merge({"topic_id" => test_topic, "compression_type" => "zstd"})
147
+ load_kafka_data(config)
148
+ end
149
+
150
+ # NOTE: depends on zstd-ruby gem which is using a C-extension
151
+ # it 'should have data integrity' do
152
+ # messages = fetch_messages(test_topic)
153
+ #
154
+ # expect(messages.size).to eq(num_events)
155
+ # messages.each do |m|
156
+ # expect(m.value).to eq(event.to_s)
157
+ # end
158
+ # end
159
+ end
160
+
142
161
  context 'when using multi partition topic' do
143
162
  let(:num_events) { 100 } # ~ more than (batch.size) 16,384 bytes
144
163
  let(:test_topic) { 'logstash_integration_topic3' }
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-integration-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 10.9.0
4
+ version: 10.10.0
5
5
  platform: java
6
6
  authors:
7
7
  - Elastic
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2021-12-14 00:00:00.000000000 Z
11
+ date: 2022-02-02 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  requirement: !ruby/object:Gem::Requirement