fluent-plugin-kafka 0.17.3 → 0.18.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 380a588c3128af581568815d5fdec9edea856c702832de8be87a9e83a012a8b7
4
- data.tar.gz: afd5d2e7ba2b89a6948637cbb325b3b0683e0c23ac37a591f717902a78a5ba1b
3
+ metadata.gz: 0a8590f6d34bbdb2faa991bba6e32a1424623c7ddd9609dc1d2ffcbeabaa20e1
4
+ data.tar.gz: a653d9dba00fa82f18071304be9093cd6d620bf4bda29d5e00a75113d05e8e3e
5
5
  SHA512:
6
- metadata.gz: fe4f49213a66d2e58bb4177f888ab3c0f3cf0fefe3ca2fa5972bc6394ee46c34c7f054088efd035c10e3beb4d277a5368f0f17ff9aa81954d0340e0962e31902
7
- data.tar.gz: 1c59d58e36828d61eb8d42b9c60578850dd8a439b21faadc0261d7f6b873a7d3874797923b341b9fc6fc6ffce59e1bff4ac90433e8b329580df2031b41d4005a
6
+ metadata.gz: 69a784e7d69e2229a036110997b2691143008d4ca786e18415e9a806a6d577b38465a4a3931166418a5b7e022dddbc62d014c0c784277ccaaecc9b6570822564
7
+ data.tar.gz: 9f14d8d10a45bf390c801c93d79b893f4248ded7e37dc87f8c4453b04158754e1006aa7603874986e8c93812fe899197219abccb3bdb0ae4daacef6d516a0af3
@@ -1,7 +1,9 @@
1
1
  name: linux
2
2
  on:
3
- - push
4
- - pull_request
3
+ push:
4
+ branches: [master]
5
+ pull_request:
6
+ branches: [master]
5
7
  jobs:
6
8
  build:
7
9
  runs-on: ${{ matrix.os }}
@@ -10,7 +12,7 @@ jobs:
10
12
  strategy:
11
13
  fail-fast: false
12
14
  matrix:
13
- ruby: [ '2.4', '2.5', '2.6', '2.7', '3.0' ]
15
+ ruby: [ '3.1', '3.0', '2.7', '2.6' ]
14
16
  os:
15
17
  - ubuntu-latest
16
18
  name: Ruby ${{ matrix.ruby }} unit testing on ${{ matrix.os }}
data/ChangeLog CHANGED
@@ -1,3 +1,14 @@
1
+ Release 0.18.0 - 2022/07/21
2
+ * out_kafka2: Keep alive Kafka connections between flushes
3
+ * out_rdkafka2: Enable to set SASL credentials via `username` and `password` parameters
4
+ * out_kafka2/out_rdkafka2: Add `record_key` parameter
5
+
6
+ Release 0.17.5 - 2022/03/18
7
+ * out_kafka2: Add `resolve_seed_brokers` parameter
8
+
9
+ Release 0.17.4 - 2022/01/25
10
+ * in_kafka_group: Add `refresh_topic_interval` parameter
11
+
1
12
  Release 0.17.3 - 2021/11/26
2
13
  * output: Suppress large warning logs for events skipped by `max_send_limit_bytes`
3
14
 
data/README.md CHANGED
@@ -135,6 +135,7 @@ Consume events by kafka consumer group features..
135
135
  offset_commit_interval (integer) :default => nil (Use default of ruby-kafka)
136
136
  offset_commit_threshold (integer) :default => nil (Use default of ruby-kafka)
137
137
  fetcher_max_queue_size (integer) :default => nil (Use default of ruby-kafka)
138
+ refresh_topic_interval (integer) :default => nil (Use default of ruby-kafka)
138
139
  start_from_beginning (bool) :default => true
139
140
  </source>
140
141
 
@@ -192,6 +193,7 @@ If `ruby-kafka` doesn't fit your kafka environment, check `rdkafka2` plugin inst
192
193
  message_key_key (string) :default => 'message_key'
193
194
  default_topic (string) :default => nil
194
195
  default_partition_key (string) :default => nil
196
+ record_key (string) :default => nil
195
197
  default_message_key (string) :default => nil
196
198
  exclude_topic_key (bool) :default => false
197
199
  exclude_partition_key (bool) :default => false
@@ -204,6 +206,7 @@ If `ruby-kafka` doesn't fit your kafka environment, check `rdkafka2` plugin inst
204
206
  use_default_for_unknown_topic (bool) :default => false
205
207
  discard_kafka_delivery_failed (bool) :default => false (No discard)
206
208
  partitioner_hash_function (enum) (crc32|murmur2) :default => 'crc32'
209
+ share_producer (bool) :default => false
207
210
 
208
211
  <format>
209
212
  @type (json|ltsv|msgpack|attr:<record name>|<formatter name>) :default => json
@@ -334,6 +337,40 @@ For example, `$.source.ip` can be extracted with config `headers_from_record` an
334
337
 
335
338
  > Using this config to remove unused fields is discouraged. A [filter plugin](https://docs.fluentd.org/v/0.12/filter) can be used for this purpose.
336
339
 
340
+ #### Send only a sub field as a message payload
341
+
342
+ If `record_key` is provided, the plugin sends only a sub field given by that key.
343
+ The configuration format is jsonpath.
344
+
345
+ e.g. When the following configuration and the incoming record are given:
346
+
347
+ configuration:
348
+
349
+ <match **>
350
+ @type kafka2
351
+ [...]
352
+ record_key '$.data'
353
+ </match>
354
+
355
+ record:
356
+
357
+ {
358
+ "specversion" : "1.0",
359
+ "type" : "com.example.someevent",
360
+ "id" : "C234-1234-1234",
361
+ "time" : "2018-04-05T17:31:00Z",
362
+ "datacontenttype" : "application/json",
363
+ "data" : {
364
+ "appinfoA" : "abc",
365
+ "appinfoB" : 123,
366
+ "appinfoC" : true
367
+ },
368
+ ...
369
+ }
370
+
371
+ only the `data` field will be serialized by the formatter and sent to Kafka.
372
+ The toplevel `data` key will be removed.
373
+
337
374
  ### Buffered output plugin
338
375
 
339
376
  This plugin uses ruby-kafka producer for writing data. This plugin is for v0.12. If you use v1, see `kafka2`.
@@ -459,6 +496,7 @@ You need to install rdkafka gem.
459
496
  # same with kafka2
460
497
  headers (hash) :default => {}
461
498
  headers_from_record (hash) :default => {}
499
+ record_key (string) :default => nil
462
500
 
463
501
  <format>
464
502
  @type (json|ltsv|msgpack|attr:<record name>|<formatter name>) :default => json
@@ -13,7 +13,7 @@ Gem::Specification.new do |gem|
13
13
  gem.test_files = gem.files.grep(%r{^(test|spec|features)/})
14
14
  gem.name = "fluent-plugin-kafka"
15
15
  gem.require_paths = ["lib"]
16
- gem.version = '0.17.3'
16
+ gem.version = '0.18.0'
17
17
  gem.required_ruby_version = ">= 2.1.0"
18
18
 
19
19
  gem.add_dependency "fluentd", [">= 0.10.58", "< 2"]
@@ -67,6 +67,8 @@ class Fluent::KafkaGroupInput < Fluent::Input
67
67
  :desc => "The number of messages that can be processed before their offsets are committed"
68
68
  config_param :fetcher_max_queue_size, :integer, :default => nil,
69
69
  :desc => "The number of fetched messages per partition that are queued in fetcher queue"
70
+ config_param :refresh_topic_interval, :integer, :default => nil,
71
+ :desc => "The interval of refreshing the topic list in seconds. Zero or unset disables this"
70
72
  config_param :start_from_beginning, :bool, :default => true,
71
73
  :desc => "Whether to start from the beginning of the topic or just subscribe to new messages being produced"
72
74
 
@@ -128,6 +130,7 @@ class Fluent::KafkaGroupInput < Fluent::Input
128
130
  @consumer_opts[:offset_commit_interval] = @offset_commit_interval if @offset_commit_interval
129
131
  @consumer_opts[:offset_commit_threshold] = @offset_commit_threshold if @offset_commit_threshold
130
132
  @consumer_opts[:fetcher_max_queue_size] = @fetcher_max_queue_size if @fetcher_max_queue_size
133
+ @consumer_opts[:refresh_topic_interval] = @refresh_topic_interval if @refresh_topic_interval
131
134
 
132
135
  @fetch_opts = {}
133
136
  @fetch_opts[:max_wait_time] = @max_wait_time if @max_wait_time
@@ -27,6 +27,11 @@ DESC
27
27
  config_param :partitioner_hash_function, :enum, list: [:crc32, :murmur2], :default => :crc32,
28
28
  :desc => "Specify kafka patrtitioner hash algorithm"
29
29
  config_param :default_partition, :integer, :default => nil
30
+ config_param :record_key, :string, :default => nil,
31
+ :desc => <<-DESC
32
+ A jsonpath to a record value pointing to the field which will be passed to the formatter and sent as the Kafka message payload.
33
+ If defined, only this field in the record will be sent to Kafka as the message payload.
34
+ DESC
30
35
  config_param :use_default_for_unknown_topic, :bool, :default => false, :desc => "If true, default_topic is used when topic not found"
31
36
  config_param :client_id, :string, :default => 'fluentd'
32
37
  config_param :idempotent, :bool, :default => false, :desc => 'Enable idempotent producer'
@@ -49,6 +54,8 @@ DESC
49
54
  :desc => 'Kafka message headers'
50
55
  config_param :headers_from_record, :hash, default: {}, symbolize_keys: true, value_type: :string,
51
56
  :desc => 'Kafka message headers where the header value is a jsonpath to a record value'
57
+ config_param :resolve_seed_brokers, :bool, :default => false,
58
+ :desc => "support brokers' hostname with multiple addresses"
52
59
 
53
60
  config_param :get_kafka_client_log, :bool, :default => false
54
61
 
@@ -79,6 +86,7 @@ DESC
79
86
  Add a regular expression to capture ActiveSupport notifications from the Kafka client
80
87
  requires activesupport gem - records will be generated under fluent_kafka_stats.**
81
88
  DESC
89
+ config_param :share_producer, :bool, :default => false, :desc => 'share kafka producer between flush threads'
82
90
 
83
91
  config_section :buffer do
84
92
  config_set_default :chunk_keys, ["topic"]
@@ -94,6 +102,12 @@ DESC
94
102
  super
95
103
 
96
104
  @kafka = nil
105
+ @producers = nil
106
+ @producers_mutex = nil
107
+ @shared_producer = nil
108
+
109
+ @writing_threads_mutex = Mutex.new
110
+ @writing_threads = Set.new
97
111
  end
98
112
 
99
113
  def refresh_client(raise_error = true)
@@ -103,19 +117,19 @@ DESC
103
117
  @kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, logger: logger, connect_timeout: @connect_timeout, socket_timeout: @socket_timeout, ssl_ca_cert_file_path: @ssl_ca_cert,
104
118
  ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_client_cert_chain: read_ssl_file(@ssl_client_cert_chain),
105
119
  ssl_ca_certs_from_system: @ssl_ca_certs_from_system, sasl_scram_username: @username, sasl_scram_password: @password,
106
- sasl_scram_mechanism: @scram_mechanism, sasl_over_ssl: @sasl_over_ssl, ssl_verify_hostname: @ssl_verify_hostname,
120
+ sasl_scram_mechanism: @scram_mechanism, sasl_over_ssl: @sasl_over_ssl, ssl_verify_hostname: @ssl_verify_hostname, resolve_seed_brokers: @resolve_seed_brokers,
107
121
  partitioner: Kafka::Partitioner.new(hash_function: @partitioner_hash_function))
108
122
  elsif @username != nil && @password != nil
109
123
  @kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, logger: logger, connect_timeout: @connect_timeout, socket_timeout: @socket_timeout, ssl_ca_cert_file_path: @ssl_ca_cert,
110
124
  ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_client_cert_chain: read_ssl_file(@ssl_client_cert_chain),
111
125
  ssl_ca_certs_from_system: @ssl_ca_certs_from_system, sasl_plain_username: @username, sasl_plain_password: @password, sasl_over_ssl: @sasl_over_ssl,
112
- ssl_verify_hostname: @ssl_verify_hostname,
126
+ ssl_verify_hostname: @ssl_verify_hostname, resolve_seed_brokers: @resolve_seed_brokers,
113
127
  partitioner: Kafka::Partitioner.new(hash_function: @partitioner_hash_function))
114
128
  else
115
129
  @kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, logger: logger, connect_timeout: @connect_timeout, socket_timeout: @socket_timeout, ssl_ca_cert_file_path: @ssl_ca_cert,
116
130
  ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_client_cert_chain: read_ssl_file(@ssl_client_cert_chain),
117
131
  ssl_ca_certs_from_system: @ssl_ca_certs_from_system, sasl_gssapi_principal: @principal, sasl_gssapi_keytab: @keytab, sasl_over_ssl: @sasl_over_ssl,
118
- ssl_verify_hostname: @ssl_verify_hostname,
132
+ ssl_verify_hostname: @ssl_verify_hostname, resolve_seed_brokers: @resolve_seed_brokers,
119
133
  partitioner: Kafka::Partitioner.new(hash_function: @partitioner_hash_function))
120
134
  end
121
135
  log.info "initialized kafka producer: #{@client_id}"
@@ -183,15 +197,29 @@ DESC
183
197
  @exclude_field_accessors = @exclude_fields.map do |field|
184
198
  record_accessor_create(field)
185
199
  end
200
+
201
+ @record_field_accessor = nil
202
+ @record_field_accessor = record_accessor_create(@record_key) unless @record_key.nil?
186
203
  end
187
204
 
188
205
  def multi_workers_ready?
189
206
  true
190
207
  end
191
208
 
209
+ def create_producer
210
+ @kafka.producer(**@producer_opts)
211
+ end
212
+
192
213
  def start
193
214
  super
194
215
  refresh_client
216
+
217
+ if @share_producer
218
+ @shared_producer = create_producer
219
+ else
220
+ @producers = {}
221
+ @producers_mutex = Mutex.new
222
+ end
195
223
  end
196
224
 
197
225
  def close
@@ -204,6 +232,56 @@ DESC
204
232
  @kafka = nil
205
233
  end
206
234
 
235
+ def wait_writing_threads
236
+ done = false
237
+ until done do
238
+ @writing_threads_mutex.synchronize do
239
+ done = true if @writing_threads.empty?
240
+ end
241
+ sleep(1) unless done
242
+ end
243
+ end
244
+
245
+ def shutdown
246
+ super
247
+ wait_writing_threads
248
+ shutdown_producers
249
+ end
250
+
251
+ def shutdown_producers
252
+ if @share_producer
253
+ @shared_producer.shutdown
254
+ @shared_producer = nil
255
+ else
256
+ @producers_mutex.synchronize {
257
+ shutdown_threads = @producers.map { |key, producer|
258
+ th = Thread.new {
259
+ producer.shutdown
260
+ }
261
+ th.abort_on_exception = true
262
+ th
263
+ }
264
+ shutdown_threads.each { |th| th.join }
265
+ @producers = {}
266
+ }
267
+ end
268
+ end
269
+
270
+ def get_producer
271
+ if @share_producer
272
+ @shared_producer
273
+ else
274
+ @producers_mutex.synchronize {
275
+ producer = @producers[Thread.current.object_id]
276
+ unless producer
277
+ producer = create_producer
278
+ @producers[Thread.current.object_id] = producer
279
+ end
280
+ producer
281
+ }
282
+ end
283
+ end
284
+
207
285
  def setup_formatter(conf)
208
286
  type = conf['@type']
209
287
  case type
@@ -227,6 +305,8 @@ DESC
227
305
 
228
306
  # TODO: optimize write performance
229
307
  def write(chunk)
308
+ @writing_threads_mutex.synchronize { @writing_threads.add(Thread.current) }
309
+
230
310
  tag = chunk.metadata.tag
231
311
  topic = if @topic
232
312
  extract_placeholders(@topic, chunk)
@@ -235,13 +315,12 @@ DESC
235
315
  end
236
316
 
237
317
  messages = 0
238
- record_buf = nil
239
318
 
240
319
  base_headers = @headers
241
320
  mutate_headers = !@headers_from_record_accessors.empty?
242
321
 
243
322
  begin
244
- producer = @kafka.topic_producer(topic, **@producer_opts)
323
+ producer = get_producer
245
324
  chunk.msgpack_each { |time, record|
246
325
  begin
247
326
  record = inject_values_to_record(tag, time, record)
@@ -265,6 +344,7 @@ DESC
265
344
  end
266
345
  end
267
346
 
347
+ record = @record_field_accessor.call(record) unless @record_field_accessor.nil?
268
348
  record_buf = @formatter_proc.call(tag, time, record)
269
349
  record_buf_bytes = record_buf.bytesize
270
350
  if @max_send_limit_bytes && record_buf_bytes > @max_send_limit_bytes
@@ -281,7 +361,7 @@ DESC
281
361
  messages += 1
282
362
 
283
363
  producer.produce(record_buf, key: message_key, partition_key: partition_key, partition: partition, headers: headers,
284
- create_time: @use_event_time ? Time.at(time) : Time.now)
364
+ create_time: @use_event_time ? Time.at(time) : Time.now, topic: topic)
285
365
  }
286
366
 
287
367
  if messages > 0
@@ -299,7 +379,6 @@ DESC
299
379
  end
300
380
  rescue Kafka::UnknownTopicOrPartition
301
381
  if @use_default_for_unknown_topic && topic != @default_topic
302
- producer.shutdown if producer
303
382
  log.warn "'#{topic}' topic not found. Retry with '#{default_topic}' topic"
304
383
  topic = @default_topic
305
384
  retry
@@ -319,7 +398,7 @@ DESC
319
398
  # Raise exception to retry sendind messages
320
399
  raise e unless ignore
321
400
  ensure
322
- producer.shutdown if producer
401
+ @writing_threads_mutex.synchronize { @writing_threads.delete(Thread.current) }
323
402
  end
324
403
  end
325
404
  end
@@ -73,6 +73,11 @@ DESC
73
73
  :desc => <<-DESC
74
74
  The codec the producer uses to compress messages. Used for compression.codec
75
75
  Supported codecs: (gzip|snappy)
76
+ DESC
77
+ config_param :record_key, :string, :default => nil,
78
+ :desc => <<-DESC
79
+ A jsonpath to a record value pointing to the field which will be passed to the formatter and sent as the Kafka message payload.
80
+ If defined, only this field in the record will be sent to Kafka as the message payload.
76
81
  DESC
77
82
  config_param :use_event_time, :bool, :default => false, :desc => 'Use fluentd event time for rdkafka timestamp'
78
83
  config_param :max_send_limit_bytes, :size, :default => nil
@@ -230,6 +235,9 @@ DESC
230
235
  end
231
236
 
232
237
  @enqueue_rate = EnqueueRate.new(@max_enqueue_bytes_per_second) unless @max_enqueue_bytes_per_second.nil?
238
+
239
+ @record_field_accessor = nil
240
+ @record_field_accessor = record_accessor_create(@record_key) unless @record_key.nil?
233
241
  end
234
242
 
235
243
  def build_config
@@ -270,6 +278,8 @@ DESC
270
278
  config[:"queue.buffering.max.messages"] = @rdkafka_buffering_max_messages if @rdkafka_buffering_max_messages
271
279
  config[:"message.max.bytes"] = @rdkafka_message_max_bytes if @rdkafka_message_max_bytes
272
280
  config[:"batch.num.messages"] = @rdkafka_message_max_num if @rdkafka_message_max_num
281
+ config[:"sasl.username"] = @username if @username
282
+ config[:"sasl.password"] = @password if @password
273
283
 
274
284
  @rdkafka_options.each { |k, v|
275
285
  config[k.to_sym] = v
@@ -371,8 +381,6 @@ DESC
371
381
  end
372
382
 
373
383
  handlers = []
374
- record_buf = nil
375
- record_buf_bytes = nil
376
384
 
377
385
  headers = @headers.clone
378
386
 
@@ -395,6 +403,7 @@ DESC
395
403
  end
396
404
  end
397
405
 
406
+ record = @record_field_accessor.call(record) unless @record_field_accessor.nil?
398
407
  record_buf = @formatter_proc.call(tag, time, record)
399
408
  record_buf_bytes = record_buf.bytesize
400
409
  if @max_send_limit_bytes && record_buf_bytes > @max_send_limit_bytes
@@ -14,6 +14,7 @@ class KafkaGroupInputTest < Test::Unit::TestCase
14
14
  brokers localhost:9092
15
15
  consumer_group fluentd
16
16
  format text
17
+ refresh_topic_interval 0
17
18
  @label @kafka
18
19
  topics #{TOPIC_NAME}
19
20
  ]
@@ -52,6 +53,7 @@ class KafkaGroupInputTest < Test::Unit::TestCase
52
53
  brokers localhost:9092
53
54
  format text
54
55
  @label @kafka
56
+ refresh_topic_interval 0
55
57
  topics #{TOPIC_NAME}
56
58
  ]
57
59
  d = create_driver
@@ -60,6 +60,13 @@ class Kafka2OutputTest < Test::Unit::TestCase
60
60
  assert_equal true, d.instance.multi_workers_ready?
61
61
  end
62
62
 
63
+ def test_resolve_seed_brokers
64
+ d = create_driver(config + config_element('ROOT', '', {"resolve_seed_brokers" => true}))
65
+ assert_nothing_raised do
66
+ d.instance.refresh_client
67
+ end
68
+ end
69
+
63
70
  class WriteTest < self
64
71
  TOPIC_NAME = "kafka-output-#{SecureRandom.uuid}"
65
72
 
@@ -98,6 +105,21 @@ class Kafka2OutputTest < Test::Unit::TestCase
98
105
  assert_equal([expected_message], actual_messages)
99
106
  end
100
107
 
108
+ def test_record_key
109
+ conf = config(default_topic: TOPIC_NAME) +
110
+ config_element('ROOT', '', {"record_key" => "$.data"}, [])
111
+ target_driver = create_target_driver
112
+ target_driver.run(expect_records: 1, timeout: 5) do
113
+ sleep 2
114
+ d = create_driver(conf)
115
+ d.run do
116
+ d.feed('test', event_time, {'data' => {'a' => 'b', 'foo' => 'bar', 'message' => 'test'}, 'message_key' => '123456'})
117
+ end
118
+ end
119
+ actual_messages = target_driver.events.collect { |event| event[2] }
120
+ assert_equal([{'a' => 'b', 'foo' => 'bar', 'message' => 'test'}], actual_messages)
121
+ end
122
+
101
123
  def test_exclude_fields
102
124
  conf = config(default_topic: TOPIC_NAME) +
103
125
  config_element('ROOT', '', {"exclude_fields" => "$.foo"}, [])
@@ -163,5 +163,20 @@ class Rdkafka2OutputTest < Test::Unit::TestCase
163
163
  actual_messages = target_driver.events.collect { |event| event[2] }
164
164
  assert_equal(expected_messages, actual_messages)
165
165
  end
166
+
167
+ def test_record_key
168
+ conf = config(default_topic: TOPIC_NAME) +
169
+ config_element('ROOT', '', {"record_key" => "$.data"}, [])
170
+ target_driver = create_target_driver
171
+ target_driver.run(expect_records: 1, timeout: 5) do
172
+ sleep 2
173
+ d = create_driver(conf)
174
+ d.run do
175
+ d.feed('test', event_time, {'data' => {'a' => 'b', 'foo' => 'bar', 'message' => 'test'}, 'message_key' => '123456'})
176
+ end
177
+ end
178
+ actual_messages = target_driver.events.collect { |event| event[2] }
179
+ assert_equal([{'a' => 'b', 'foo' => 'bar', 'message' => 'test'}], actual_messages)
180
+ end
166
181
  end
167
182
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: fluent-plugin-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.17.3
4
+ version: 0.18.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Hidemasa Togashi
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2021-11-26 00:00:00.000000000 Z
12
+ date: 2022-07-21 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: fluentd
@@ -193,7 +193,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
193
193
  - !ruby/object:Gem::Version
194
194
  version: '0'
195
195
  requirements: []
196
- rubygems_version: 3.2.5
196
+ rubygems_version: 3.3.5
197
197
  signing_key:
198
198
  specification_version: 4
199
199
  summary: Fluentd plugin for Apache Kafka > 0.8