fluent-plugin-kafka 0.11.1 → 0.11.2

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: fa8bd187d82171d2d909caee040959b99380852c82c94e7ffb12d9c9b4ab824e
4
- data.tar.gz: ef617b72fd2908a3119fd086c330f01da867c16314d85dc78151d7489e557332
3
+ metadata.gz: a32cfbf1850b74e02b8b698db2b3676f89239e647debf62ae7d71d0b3afc229c
4
+ data.tar.gz: c076287c2ea443fd645193432e93ab6ed0c1263043dac7c171943ac4a268da73
5
5
  SHA512:
6
- metadata.gz: fda25bea7239416714baae9ba577aad680617f1482f48aae8243b570d5e3fd090cab86fa6da0efb7ea8b4542745e4ad13cb131bcaacf9a5e0c653df044ced319
7
- data.tar.gz: 53a84fed9f9bb11fdf7b4306af0e1900c216afc4546dc012c532db459d69f95047205c1e020884410cd30ebef915ed8e424f0734b97afc59169a2fe3aa459f5a
6
+ metadata.gz: 5bb0d0d1628ad5c3193c1fd9c47af0531330dd6f6359b2cdd6d6e13821b2c023fee9d82b8758a022e6502bf09dc7c594583da2815d87d11dde1e7c0601517bf9
7
+ data.tar.gz: 05e1f5c8e66ef96a938ee79eacbdd95ecccea3f7fdce76e4888fc38c9b06c5166b57b9df5e7fdc4aeb09bae4099f1fd7907251a6d99cc3a384cb0568939739f6
data/ChangeLog CHANGED
@@ -1,3 +1,7 @@
1
+ Release 0.11.2 - 2019/09/27
2
+
3
+ * out_rdkafka2: Add share_producer parameter
4
+
1
5
  Release 0.11.1 - 2019/08/22
2
6
 
3
7
  * out_kafka2: Fix mutating issue of headers
data/README.md CHANGED
@@ -276,7 +276,7 @@ The configuration format is jsonpath. It is descibed in https://docs.fluentd.org
276
276
 
277
277
  ### Buffered output plugin
278
278
 
279
- This plugin uses ruby-kafka producer for writing data. This plugin works with recent kafka versions. This plugin is for v0.12. If you use v1, see `kafka2`.
279
+ This plugin uses ruby-kafka producer for writing data. This plugin is for v0.12. If you use v1, see `kafka2`.
280
280
 
281
281
  <match app.**>
282
282
  @type kafka_buffered
@@ -373,8 +373,10 @@ You need to install rdkafka gem.
373
373
  default_message_key (string) :default => nil
374
374
  exclude_topic_key (bool) :default => false
375
375
  exclude_partition_key (bool) :default => false
376
- headers (hash) :default => {}
377
- headers_from_record (hash) :default => {}
376
+
377
+ # same with kafka2
378
+ headers (hash) :default => {}
379
+ headers_from_record (hash) :default => {}
378
380
 
379
381
  <format>
380
382
  @type (json|ltsv|msgpack|attr:<record name>|<formatter name>) :default => json
@@ -396,6 +398,15 @@ You need to install rdkafka gem.
396
398
  rdkafka_options {
397
399
  "log_level" : 7
398
400
  }
401
+
402
+ # rdkafka2 specific parameters
403
+
404
+ # share kafka producer between flush threads. This is mainly for reducing kafka operations like kerberos
405
+ share_producer (bool) :default => false
406
+ # Timeout for polling message wait. If 0, no wait.
407
+ rdkafka_delivery_handle_poll_timeout (integer) :default => 30
408
+ # If the record size is larger than this value, such records are ignored. Default is no limit
409
+ max_send_limit_bytes (integer) :default => nil
399
410
  </match>
400
411
 
401
412
  If you use v0.12, use `rdkafka` instead.
@@ -13,7 +13,7 @@ Gem::Specification.new do |gem|
13
13
  gem.test_files = gem.files.grep(%r{^(test|spec|features)/})
14
14
  gem.name = "fluent-plugin-kafka"
15
15
  gem.require_paths = ["lib"]
16
- gem.version = '0.11.1'
16
+ gem.version = '0.11.2'
17
17
  gem.required_ruby_version = ">= 2.1.0"
18
18
 
19
19
  gem.add_dependency "fluentd", [">= 0.10.58", "< 2"]
@@ -77,6 +77,7 @@ DESC
77
77
  config_param :rdkafka_message_max_num, :integer, :default => nil, :desc => 'Used for batch.num.messages'
78
78
  config_param :rdkafka_delivery_handle_poll_timeout, :integer, :default => 30, :desc => 'Timeout for polling message wait'
79
79
  config_param :rdkafka_options, :hash, :default => {}, :desc => 'Set any rdkafka configuration. See https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md'
80
+ config_param :share_producer, :bool, :default => false, :desc => 'share kafka producer between flush threads'
80
81
 
81
82
  config_param :max_enqueue_retries, :integer, :default => 3
82
83
  config_param :enqueue_retry_backoff, :integer, :default => 3
@@ -97,8 +98,10 @@ DESC
97
98
 
98
99
  def initialize
99
100
  super
100
- @producers = {}
101
- @producers_mutex = Mutex.new
101
+
102
+ @producers = nil
103
+ @producers_mutex = nil
104
+ @shared_producer = nil
102
105
  end
103
106
 
104
107
  def configure(conf)
@@ -187,6 +190,13 @@ DESC
187
190
  end
188
191
 
189
192
  def start
193
+ if @share_producer
194
+ @shared_producer = @rdkafka.producer
195
+ else
196
+ @producers = {}
197
+ @producers_mutex = Mutex.new
198
+ end
199
+
190
200
  super
191
201
  end
192
202
 
@@ -200,30 +210,43 @@ DESC
200
210
  end
201
211
 
202
212
  def shutdown_producers
203
- @producers_mutex.synchronize {
204
- shutdown_threads = @producers.map { |key, producer|
205
- th = Thread.new {
206
- unless producer.close(10)
207
- log.warn("Queue is forcefully closed after 10 seconds wait")
208
- end
213
+ if @share_producer
214
+ close_producer(@shared_producer)
215
+ @shared_producer = nil
216
+ else
217
+ @producers_mutex.synchronize {
218
+ shutdown_threads = @producers.map { |key, producer|
219
+ th = Thread.new {
220
+ close_producer(producer)
221
+ }
222
+ th.abort_on_exception = true
223
+ th
209
224
  }
210
- th.abort_on_exception = true
211
- th
225
+ shutdown_threads.each { |th| th.join }
226
+ @producers = {}
212
227
  }
213
- shutdown_threads.each { |th| th.join }
214
- @producers = {}
215
- }
228
+ end
229
+ end
230
+
231
+ def close_producer(producer)
232
+ unless producer.close(10)
233
+ log.warn("Queue is forcefully closed after 10 seconds wait")
234
+ end
216
235
  end
217
236
 
218
237
  def get_producer
219
- @producers_mutex.synchronize {
220
- producer = @producers[Thread.current.object_id]
221
- unless producer
222
- producer = @rdkafka.producer
223
- @producers[Thread.current.object_id] = producer
224
- end
225
- producer
226
- }
238
+ if @share_producer
239
+ @shared_producer
240
+ else
241
+ @producers_mutex.synchronize {
242
+ producer = @producers[Thread.current.object_id]
243
+ unless producer
244
+ producer = @rdkafka.producer
245
+ @producers[Thread.current.object_id] = producer
246
+ end
247
+ producer
248
+ }
249
+ end
227
250
  end
228
251
 
229
252
  def setup_formatter(conf)
@@ -272,10 +295,13 @@ DESC
272
295
  next
273
296
  end
274
297
 
275
- handlers << enqueue_with_retry(producer, topic, record_buf, message_key, partition, headers)
298
+ handler = enqueue_with_retry(producer, topic, record_buf, message_key, partition, headers)
299
+ if @rdkafka_delivery_handle_poll_timeout != 0
300
+ handlers << handler
301
+ end
276
302
  }
277
303
  handlers.each { |handler|
278
- handler.wait(@rdkafka_delivery_handle_poll_timeout) if @rdkafka_delivery_handle_poll_timeout != 0
304
+ handler.wait(@rdkafka_delivery_handle_poll_timeout)
279
305
  }
280
306
  end
281
307
  rescue Exception => e
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: fluent-plugin-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.11.1
4
+ version: 0.11.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Hidemasa Togashi
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2019-08-22 00:00:00.000000000 Z
12
+ date: 2019-09-27 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: fluentd