fluent-plugin-kafka 0.17.4 → 0.18.1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 5cc122034295e37318cd7510ef3347eeda14cc43b8c0132053cb944d68141feb
4
- data.tar.gz: e725b07eaa95f639b2122f1a4c8342101314f2f721e3625c73889dc8caf9aead
3
+ metadata.gz: 8b47d0dafccd1d84a2adcef4e9a0830b26998fd21c4095a974887ffa201fc64c
4
+ data.tar.gz: 744716215237149802687884a5c306e4684f1d2e89959d13d04b64f3116a65f2
5
5
  SHA512:
6
- metadata.gz: 2e432e7f2670132022b18fa9460b8eda69a18a4dd3a35aa775619c6a45ff8cb6ea5bad869ebc5cefe804b9bb4261ab12150cb77ad10af62dc2e54fd6de435aec
7
- data.tar.gz: edbebd57c325292d197d342ff8f5151aa1fcfbd47128fc09d1b71e2bf4d7ccf196d54b48df106f1b9f655fe334ab1f9fe907fce8b4f1b1d20edd9c8254c6c8cd
6
+ metadata.gz: 0215e2229d272a24abef39f8179457a4585d582a388f8b03bc70a1d4a066d27422878c91277d5b7412b3ddd930716b337ff56a75e4fb8de00366063b7c47c52a
7
+ data.tar.gz: 6f47fa553a9cf2cf2d55b725d5b8f562e6aef1c3ef92aee597bbf9f84aac7e0fb7fac5d67864d935c02b1dc5a890563a9b530b75ae47d3bac1763fbc021f20bc
@@ -1,7 +1,9 @@
1
1
  name: linux
2
2
  on:
3
- - push
4
- - pull_request
3
+ push:
4
+ branches: [master]
5
+ pull_request:
6
+ branches: [master]
5
7
  jobs:
6
8
  build:
7
9
  runs-on: ${{ matrix.os }}
@@ -10,7 +12,7 @@ jobs:
10
12
  strategy:
11
13
  fail-fast: false
12
14
  matrix:
13
- ruby: [ '2.4', '2.5', '2.6', '2.7', '3.0' ]
15
+ ruby: [ '3.1', '3.0', '2.7', '2.6' ]
14
16
  os:
15
17
  - ubuntu-latest
16
18
  name: Ruby ${{ matrix.ruby }} unit testing on ${{ matrix.os }}
data/ChangeLog CHANGED
@@ -1,3 +1,14 @@
1
+ Release 0.18.1 - 2022/08/17
2
+ * out_kafka2: Fix a bug that it doesn't respect `chunk_limit_records` and `chunk_limit_size`
3
+
4
+ Release 0.18.0 - 2022/07/21
5
+ * out_kafka2: Keep alive Kafka connections between flushes
6
+ * out_rdkafka2: Enable to set SASL credentials via `username` and `password` parameters
7
+ * out_kafka2/out_rdkafka2: Add `record_key` parameter
8
+
9
+ Release 0.17.5 - 2022/03/18
10
+ * out_kafka2: Add `resolve_seed_brokers` parameter
11
+
1
12
  Release 0.17.4 - 2022/01/25
2
13
  * in_kafka_group: Add `refresh_topic_interval` parameter
3
14
 
data/README.md CHANGED
@@ -193,6 +193,7 @@ If `ruby-kafka` doesn't fit your kafka environment, check `rdkafka2` plugin inst
193
193
  message_key_key (string) :default => 'message_key'
194
194
  default_topic (string) :default => nil
195
195
  default_partition_key (string) :default => nil
196
+ record_key (string) :default => nil
196
197
  default_message_key (string) :default => nil
197
198
  exclude_topic_key (bool) :default => false
198
199
  exclude_partition_key (bool) :default => false
@@ -205,6 +206,7 @@ If `ruby-kafka` doesn't fit your kafka environment, check `rdkafka2` plugin inst
205
206
  use_default_for_unknown_topic (bool) :default => false
206
207
  discard_kafka_delivery_failed (bool) :default => false (No discard)
207
208
  partitioner_hash_function (enum) (crc32|murmur2) :default => 'crc32'
209
+ share_producer (bool) :default => false
208
210
 
209
211
  <format>
210
212
  @type (json|ltsv|msgpack|attr:<record name>|<formatter name>) :default => json
@@ -335,6 +337,40 @@ For example, `$.source.ip` can be extracted with config `headers_from_record` an
335
337
 
336
338
  > Using this config to remove unused fields is discouraged. A [filter plugin](https://docs.fluentd.org/v/0.12/filter) can be used for this purpose.
337
339
 
340
+ #### Send only a sub field as a message payload
341
+
342
+ If `record_key` is provided, the plugin sends only a sub field given by that key.
343
+ The configuration format is jsonpath.
344
+
345
+ e.g. When the following configuration and the incoming record are given:
346
+
347
+ configuration:
348
+
349
+ <match **>
350
+ @type kafka2
351
+ [...]
352
+ record_key '$.data'
353
+ </match>
354
+
355
+ record:
356
+
357
+ {
358
+ "specversion" : "1.0",
359
+ "type" : "com.example.someevent",
360
+ "id" : "C234-1234-1234",
361
+ "time" : "2018-04-05T17:31:00Z",
362
+ "datacontenttype" : "application/json",
363
+ "data" : {
364
+ "appinfoA" : "abc",
365
+ "appinfoB" : 123,
366
+ "appinfoC" : true
367
+ },
368
+ ...
369
+ }
370
+
371
+ only the `data` field will be serialized by the formatter and sent to Kafka.
372
+ The toplevel `data` key will be removed.
373
+
338
374
  ### Buffered output plugin
339
375
 
340
376
  This plugin uses ruby-kafka producer for writing data. This plugin is for v0.12. If you use v1, see `kafka2`.
@@ -460,6 +496,7 @@ You need to install rdkafka gem.
460
496
  # same with kafka2
461
497
  headers (hash) :default => {}
462
498
  headers_from_record (hash) :default => {}
499
+ record_key (string) :default => nil
463
500
 
464
501
  <format>
465
502
  @type (json|ltsv|msgpack|attr:<record name>|<formatter name>) :default => json
@@ -13,7 +13,7 @@ Gem::Specification.new do |gem|
13
13
  gem.test_files = gem.files.grep(%r{^(test|spec|features)/})
14
14
  gem.name = "fluent-plugin-kafka"
15
15
  gem.require_paths = ["lib"]
16
- gem.version = '0.17.4'
16
+ gem.version = '0.18.1'
17
17
  gem.required_ruby_version = ">= 2.1.0"
18
18
 
19
19
  gem.add_dependency "fluentd", [">= 0.10.58", "< 2"]
@@ -38,9 +38,15 @@ module Kafka
38
38
  end
39
39
 
40
40
  # for out_kafka2
41
+ # Majority (if not all) of this code is lifted from https://github.com/zendesk/ruby-kafka/blob/master/lib/kafka/producer.rb
42
+ # with the main difference where we have removed any checks regarding max_buffer_bytesize and max_buffer_size
43
+ # The reason for doing this is to provide a better UX for our users where they only need to set those bounds in
44
+ # the Buffer section using `chunk_limit_size` and `chunk_limit_records`.
45
+ #
46
+ # We should reconsider this in the future in case the `ruby-kafka` library drastically changes its internal.
41
47
  module Kafka
42
48
  class Client
43
- def topic_producer(topic, compression_codec: nil, compression_threshold: 1, ack_timeout: 5, required_acks: :all, max_retries: 2, retry_backoff: 1, max_buffer_size: 1000, max_buffer_bytesize: 10_000_000, idempotent: false, transactional: false, transactional_id: nil, transactional_timeout: 60)
49
+ def custom_producer(compression_codec: nil, compression_threshold: 1, ack_timeout: 5, required_acks: :all, max_retries: 2, retry_backoff: 1, max_buffer_size: 1000, max_buffer_bytesize: 10_000_000, idempotent: false, transactional: false, transactional_id: nil, transactional_timeout: 60)
44
50
  cluster = initialize_cluster
45
51
  compressor = Compressor.new(
46
52
  codec_name: compression_codec,
@@ -57,8 +63,7 @@ module Kafka
57
63
  transactional_timeout: transactional_timeout,
58
64
  )
59
65
 
60
- TopicProducer.new(topic,
61
- cluster: cluster,
66
+ CustomProducer.new(cluster: cluster,
62
67
  transaction_manager: transaction_manager,
63
68
  logger: @logger,
64
69
  instrumenter: @instrumenter,
@@ -74,8 +79,8 @@ module Kafka
74
79
  end
75
80
  end
76
81
 
77
- class TopicProducer
78
- def initialize(topic, cluster:, transaction_manager:, logger:, instrumenter:, compressor:, ack_timeout:, required_acks:, max_retries:, retry_backoff:, max_buffer_size:, max_buffer_bytesize:, partitioner:)
82
+ class CustomProducer
83
+ def initialize(cluster:, transaction_manager:, logger:, instrumenter:, compressor:, ack_timeout:, required_acks:, max_retries:, retry_backoff:, max_buffer_size:, max_buffer_bytesize:, partitioner:)
79
84
  @cluster = cluster
80
85
  @transaction_manager = transaction_manager
81
86
  @logger = logger
@@ -88,10 +93,6 @@ module Kafka
88
93
  @max_buffer_bytesize = max_buffer_bytesize
89
94
  @compressor = compressor
90
95
  @partitioner = partitioner
91
-
92
- @topic = topic
93
- @cluster.add_target_topics(Set.new([topic]))
94
-
95
96
  # A buffer organized by topic/partition.
96
97
  @buffer = MessageBuffer.new
97
98
 
@@ -99,12 +100,12 @@ module Kafka
99
100
  @pending_message_queue = PendingMessageQueue.new
100
101
  end
101
102
 
102
- def produce(value, key: nil, partition: nil, partition_key: nil, headers: EMPTY_HEADER, create_time: Time.now)
103
+ def produce(value, key: nil, partition: nil, partition_key: nil, headers: EMPTY_HEADER, create_time: Time.now, topic: nil)
103
104
  message = PendingMessage.new(
104
105
  value: value,
105
106
  key: key,
106
107
  headers: headers,
107
- topic: @topic,
108
+ topic: topic,
108
109
  partition: partition,
109
110
  partition_key: partition_key,
110
111
  create_time: create_time
@@ -245,12 +246,13 @@ module Kafka
245
246
 
246
247
  def assign_partitions!
247
248
  failed_messages = []
248
- partition_count = @cluster.partitions_for(@topic).count
249
249
 
250
250
  @pending_message_queue.each do |message|
251
251
  partition = message.partition
252
252
 
253
253
  begin
254
+ partition_count = @cluster.partitions_for(message.topic).count
255
+
254
256
  if partition.nil?
255
257
  partition = @partitioner.call(partition_count, message)
256
258
  end
@@ -27,6 +27,11 @@ DESC
27
27
  config_param :partitioner_hash_function, :enum, list: [:crc32, :murmur2], :default => :crc32,
28
28
  :desc => "Specify kafka patrtitioner hash algorithm"
29
29
  config_param :default_partition, :integer, :default => nil
30
+ config_param :record_key, :string, :default => nil,
31
+ :desc => <<-DESC
32
+ A jsonpath to a record value pointing to the field which will be passed to the formatter and sent as the Kafka message payload.
33
+ If defined, only this field in the record will be sent to Kafka as the message payload.
34
+ DESC
30
35
  config_param :use_default_for_unknown_topic, :bool, :default => false, :desc => "If true, default_topic is used when topic not found"
31
36
  config_param :client_id, :string, :default => 'fluentd'
32
37
  config_param :idempotent, :bool, :default => false, :desc => 'Enable idempotent producer'
@@ -49,6 +54,8 @@ DESC
49
54
  :desc => 'Kafka message headers'
50
55
  config_param :headers_from_record, :hash, default: {}, symbolize_keys: true, value_type: :string,
51
56
  :desc => 'Kafka message headers where the header value is a jsonpath to a record value'
57
+ config_param :resolve_seed_brokers, :bool, :default => false,
58
+ :desc => "support brokers' hostname with multiple addresses"
52
59
 
53
60
  config_param :get_kafka_client_log, :bool, :default => false
54
61
 
@@ -79,6 +86,7 @@ DESC
79
86
  Add a regular expression to capture ActiveSupport notifications from the Kafka client
80
87
  requires activesupport gem - records will be generated under fluent_kafka_stats.**
81
88
  DESC
89
+ config_param :share_producer, :bool, :default => false, :desc => 'share kafka producer between flush threads'
82
90
 
83
91
  config_section :buffer do
84
92
  config_set_default :chunk_keys, ["topic"]
@@ -94,6 +102,12 @@ DESC
94
102
  super
95
103
 
96
104
  @kafka = nil
105
+ @producers = nil
106
+ @producers_mutex = nil
107
+ @shared_producer = nil
108
+
109
+ @writing_threads_mutex = Mutex.new
110
+ @writing_threads = Set.new
97
111
  end
98
112
 
99
113
  def refresh_client(raise_error = true)
@@ -103,19 +117,19 @@ DESC
103
117
  @kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, logger: logger, connect_timeout: @connect_timeout, socket_timeout: @socket_timeout, ssl_ca_cert_file_path: @ssl_ca_cert,
104
118
  ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_client_cert_chain: read_ssl_file(@ssl_client_cert_chain),
105
119
  ssl_ca_certs_from_system: @ssl_ca_certs_from_system, sasl_scram_username: @username, sasl_scram_password: @password,
106
- sasl_scram_mechanism: @scram_mechanism, sasl_over_ssl: @sasl_over_ssl, ssl_verify_hostname: @ssl_verify_hostname,
120
+ sasl_scram_mechanism: @scram_mechanism, sasl_over_ssl: @sasl_over_ssl, ssl_verify_hostname: @ssl_verify_hostname, resolve_seed_brokers: @resolve_seed_brokers,
107
121
  partitioner: Kafka::Partitioner.new(hash_function: @partitioner_hash_function))
108
122
  elsif @username != nil && @password != nil
109
123
  @kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, logger: logger, connect_timeout: @connect_timeout, socket_timeout: @socket_timeout, ssl_ca_cert_file_path: @ssl_ca_cert,
110
124
  ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_client_cert_chain: read_ssl_file(@ssl_client_cert_chain),
111
125
  ssl_ca_certs_from_system: @ssl_ca_certs_from_system, sasl_plain_username: @username, sasl_plain_password: @password, sasl_over_ssl: @sasl_over_ssl,
112
- ssl_verify_hostname: @ssl_verify_hostname,
126
+ ssl_verify_hostname: @ssl_verify_hostname, resolve_seed_brokers: @resolve_seed_brokers,
113
127
  partitioner: Kafka::Partitioner.new(hash_function: @partitioner_hash_function))
114
128
  else
115
129
  @kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, logger: logger, connect_timeout: @connect_timeout, socket_timeout: @socket_timeout, ssl_ca_cert_file_path: @ssl_ca_cert,
116
130
  ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_client_cert_chain: read_ssl_file(@ssl_client_cert_chain),
117
131
  ssl_ca_certs_from_system: @ssl_ca_certs_from_system, sasl_gssapi_principal: @principal, sasl_gssapi_keytab: @keytab, sasl_over_ssl: @sasl_over_ssl,
118
- ssl_verify_hostname: @ssl_verify_hostname,
132
+ ssl_verify_hostname: @ssl_verify_hostname, resolve_seed_brokers: @resolve_seed_brokers,
119
133
  partitioner: Kafka::Partitioner.new(hash_function: @partitioner_hash_function))
120
134
  end
121
135
  log.info "initialized kafka producer: #{@client_id}"
@@ -183,15 +197,29 @@ DESC
183
197
  @exclude_field_accessors = @exclude_fields.map do |field|
184
198
  record_accessor_create(field)
185
199
  end
200
+
201
+ @record_field_accessor = nil
202
+ @record_field_accessor = record_accessor_create(@record_key) unless @record_key.nil?
186
203
  end
187
204
 
188
205
  def multi_workers_ready?
189
206
  true
190
207
  end
191
208
 
209
+ def create_producer
210
+ @kafka.custom_producer(**@producer_opts)
211
+ end
212
+
192
213
  def start
193
214
  super
194
215
  refresh_client
216
+
217
+ if @share_producer
218
+ @shared_producer = create_producer
219
+ else
220
+ @producers = {}
221
+ @producers_mutex = Mutex.new
222
+ end
195
223
  end
196
224
 
197
225
  def close
@@ -204,6 +232,56 @@ DESC
204
232
  @kafka = nil
205
233
  end
206
234
 
235
+ def wait_writing_threads
236
+ done = false
237
+ until done do
238
+ @writing_threads_mutex.synchronize do
239
+ done = true if @writing_threads.empty?
240
+ end
241
+ sleep(1) unless done
242
+ end
243
+ end
244
+
245
+ def shutdown
246
+ super
247
+ wait_writing_threads
248
+ shutdown_producers
249
+ end
250
+
251
+ def shutdown_producers
252
+ if @share_producer
253
+ @shared_producer.shutdown
254
+ @shared_producer = nil
255
+ else
256
+ @producers_mutex.synchronize {
257
+ shutdown_threads = @producers.map { |key, producer|
258
+ th = Thread.new {
259
+ producer.shutdown
260
+ }
261
+ th.abort_on_exception = true
262
+ th
263
+ }
264
+ shutdown_threads.each { |th| th.join }
265
+ @producers = {}
266
+ }
267
+ end
268
+ end
269
+
270
+ def get_producer
271
+ if @share_producer
272
+ @shared_producer
273
+ else
274
+ @producers_mutex.synchronize {
275
+ producer = @producers[Thread.current.object_id]
276
+ unless producer
277
+ producer = create_producer
278
+ @producers[Thread.current.object_id] = producer
279
+ end
280
+ producer
281
+ }
282
+ end
283
+ end
284
+
207
285
  def setup_formatter(conf)
208
286
  type = conf['@type']
209
287
  case type
@@ -227,6 +305,8 @@ DESC
227
305
 
228
306
  # TODO: optimize write performance
229
307
  def write(chunk)
308
+ @writing_threads_mutex.synchronize { @writing_threads.add(Thread.current) }
309
+
230
310
  tag = chunk.metadata.tag
231
311
  topic = if @topic
232
312
  extract_placeholders(@topic, chunk)
@@ -235,13 +315,12 @@ DESC
235
315
  end
236
316
 
237
317
  messages = 0
238
- record_buf = nil
239
318
 
240
319
  base_headers = @headers
241
320
  mutate_headers = !@headers_from_record_accessors.empty?
242
321
 
243
322
  begin
244
- producer = @kafka.topic_producer(topic, **@producer_opts)
323
+ producer = get_producer
245
324
  chunk.msgpack_each { |time, record|
246
325
  begin
247
326
  record = inject_values_to_record(tag, time, record)
@@ -265,6 +344,7 @@ DESC
265
344
  end
266
345
  end
267
346
 
347
+ record = @record_field_accessor.call(record) unless @record_field_accessor.nil?
268
348
  record_buf = @formatter_proc.call(tag, time, record)
269
349
  record_buf_bytes = record_buf.bytesize
270
350
  if @max_send_limit_bytes && record_buf_bytes > @max_send_limit_bytes
@@ -281,7 +361,7 @@ DESC
281
361
  messages += 1
282
362
 
283
363
  producer.produce(record_buf, key: message_key, partition_key: partition_key, partition: partition, headers: headers,
284
- create_time: @use_event_time ? Time.at(time) : Time.now)
364
+ create_time: @use_event_time ? Time.at(time) : Time.now, topic: topic)
285
365
  }
286
366
 
287
367
  if messages > 0
@@ -299,7 +379,6 @@ DESC
299
379
  end
300
380
  rescue Kafka::UnknownTopicOrPartition
301
381
  if @use_default_for_unknown_topic && topic != @default_topic
302
- producer.shutdown if producer
303
382
  log.warn "'#{topic}' topic not found. Retry with '#{default_topic}' topic"
304
383
  topic = @default_topic
305
384
  retry
@@ -319,7 +398,7 @@ DESC
319
398
  # Raise exception to retry sendind messages
320
399
  raise e unless ignore
321
400
  ensure
322
- producer.shutdown if producer
401
+ @writing_threads_mutex.synchronize { @writing_threads.delete(Thread.current) }
323
402
  end
324
403
  end
325
404
  end
@@ -73,6 +73,11 @@ DESC
73
73
  :desc => <<-DESC
74
74
  The codec the producer uses to compress messages. Used for compression.codec
75
75
  Supported codecs: (gzip|snappy)
76
+ DESC
77
+ config_param :record_key, :string, :default => nil,
78
+ :desc => <<-DESC
79
+ A jsonpath to a record value pointing to the field which will be passed to the formatter and sent as the Kafka message payload.
80
+ If defined, only this field in the record will be sent to Kafka as the message payload.
76
81
  DESC
77
82
  config_param :use_event_time, :bool, :default => false, :desc => 'Use fluentd event time for rdkafka timestamp'
78
83
  config_param :max_send_limit_bytes, :size, :default => nil
@@ -230,6 +235,9 @@ DESC
230
235
  end
231
236
 
232
237
  @enqueue_rate = EnqueueRate.new(@max_enqueue_bytes_per_second) unless @max_enqueue_bytes_per_second.nil?
238
+
239
+ @record_field_accessor = nil
240
+ @record_field_accessor = record_accessor_create(@record_key) unless @record_key.nil?
233
241
  end
234
242
 
235
243
  def build_config
@@ -270,6 +278,8 @@ DESC
270
278
  config[:"queue.buffering.max.messages"] = @rdkafka_buffering_max_messages if @rdkafka_buffering_max_messages
271
279
  config[:"message.max.bytes"] = @rdkafka_message_max_bytes if @rdkafka_message_max_bytes
272
280
  config[:"batch.num.messages"] = @rdkafka_message_max_num if @rdkafka_message_max_num
281
+ config[:"sasl.username"] = @username if @username
282
+ config[:"sasl.password"] = @password if @password
273
283
 
274
284
  @rdkafka_options.each { |k, v|
275
285
  config[k.to_sym] = v
@@ -371,8 +381,6 @@ DESC
371
381
  end
372
382
 
373
383
  handlers = []
374
- record_buf = nil
375
- record_buf_bytes = nil
376
384
 
377
385
  headers = @headers.clone
378
386
 
@@ -395,6 +403,7 @@ DESC
395
403
  end
396
404
  end
397
405
 
406
+ record = @record_field_accessor.call(record) unless @record_field_accessor.nil?
398
407
  record_buf = @formatter_proc.call(tag, time, record)
399
408
  record_buf_bytes = record_buf.bytesize
400
409
  if @max_send_limit_bytes && record_buf_bytes > @max_send_limit_bytes
@@ -60,6 +60,13 @@ class Kafka2OutputTest < Test::Unit::TestCase
60
60
  assert_equal true, d.instance.multi_workers_ready?
61
61
  end
62
62
 
63
+ def test_resolve_seed_brokers
64
+ d = create_driver(config + config_element('ROOT', '', {"resolve_seed_brokers" => true}))
65
+ assert_nothing_raised do
66
+ d.instance.refresh_client
67
+ end
68
+ end
69
+
63
70
  class WriteTest < self
64
71
  TOPIC_NAME = "kafka-output-#{SecureRandom.uuid}"
65
72
 
@@ -98,6 +105,21 @@ class Kafka2OutputTest < Test::Unit::TestCase
98
105
  assert_equal([expected_message], actual_messages)
99
106
  end
100
107
 
108
+ def test_record_key
109
+ conf = config(default_topic: TOPIC_NAME) +
110
+ config_element('ROOT', '', {"record_key" => "$.data"}, [])
111
+ target_driver = create_target_driver
112
+ target_driver.run(expect_records: 1, timeout: 5) do
113
+ sleep 2
114
+ d = create_driver(conf)
115
+ d.run do
116
+ d.feed('test', event_time, {'data' => {'a' => 'b', 'foo' => 'bar', 'message' => 'test'}, 'message_key' => '123456'})
117
+ end
118
+ end
119
+ actual_messages = target_driver.events.collect { |event| event[2] }
120
+ assert_equal([{'a' => 'b', 'foo' => 'bar', 'message' => 'test'}], actual_messages)
121
+ end
122
+
101
123
  def test_exclude_fields
102
124
  conf = config(default_topic: TOPIC_NAME) +
103
125
  config_element('ROOT', '', {"exclude_fields" => "$.foo"}, [])
@@ -163,5 +163,20 @@ class Rdkafka2OutputTest < Test::Unit::TestCase
163
163
  actual_messages = target_driver.events.collect { |event| event[2] }
164
164
  assert_equal(expected_messages, actual_messages)
165
165
  end
166
+
167
+ def test_record_key
168
+ conf = config(default_topic: TOPIC_NAME) +
169
+ config_element('ROOT', '', {"record_key" => "$.data"}, [])
170
+ target_driver = create_target_driver
171
+ target_driver.run(expect_records: 1, timeout: 5) do
172
+ sleep 2
173
+ d = create_driver(conf)
174
+ d.run do
175
+ d.feed('test', event_time, {'data' => {'a' => 'b', 'foo' => 'bar', 'message' => 'test'}, 'message_key' => '123456'})
176
+ end
177
+ end
178
+ actual_messages = target_driver.events.collect { |event| event[2] }
179
+ assert_equal([{'a' => 'b', 'foo' => 'bar', 'message' => 'test'}], actual_messages)
180
+ end
166
181
  end
167
182
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: fluent-plugin-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.17.4
4
+ version: 0.18.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Hidemasa Togashi
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2022-01-25 00:00:00.000000000 Z
12
+ date: 2022-08-17 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: fluentd
@@ -193,7 +193,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
193
193
  - !ruby/object:Gem::Version
194
194
  version: '0'
195
195
  requirements: []
196
- rubygems_version: 3.2.5
196
+ rubygems_version: 3.3.5
197
197
  signing_key:
198
198
  specification_version: 4
199
199
  summary: Fluentd plugin for Apache Kafka > 0.8