fluent-plugin-kafka 0.17.2 → 0.17.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 16968c0e56c22f64225e8e41e905294defc7240df6054813151d3904a79a4107
4
- data.tar.gz: 71b13953b11048f201c8a8a275350e983a51377b45e84440196b701063e317c4
3
+ metadata.gz: eb7f693666ff7fd4bdb43b249d3e60e882ebf99e80402de2a5310b61668ef9e7
4
+ data.tar.gz: d4b141409cf83402ad1e28b9579317c672b20c9760b8f58b8d54800e8c46313b
5
5
  SHA512:
6
- metadata.gz: aedae36f4b7a29408bc96838da3e158a98ddac136987408dcd4bc347068e3d27839e844d1d3d4a239e4f175e023f19d9ecd82e14d4ab089fd43625f60dcca17c
7
- data.tar.gz: 4a72ea62a754b689944d3f2de81cc495c9ef92df97c127b80b46ace6824aca00df5b4cc499eb3993d0edbd3f156f140c6c8ba718fb82f65b5328170adf26f622
6
+ metadata.gz: 8f86c7c82fbf5db63ef0c92737ba4935a3a60534d434dfb9552c2ad624ec1e28fea9876c689a29ca6b16c16abc3818fdfb24bd7540495f980daa8a5509b29ca0
7
+ data.tar.gz: 8b1dfba11d40e12f9b0b1fbf4ff947678fb5ef2ab3fbd3969e363935c8e895c29f497273ddb86c80c4e1ebc9d254045275dc7e08b07788d4573799c97ce7a393
@@ -1,7 +1,9 @@
1
1
  name: linux
2
2
  on:
3
- - push
4
- - pull_request
3
+ push:
4
+ branches: [master]
5
+ pull_request:
6
+ branches: [master]
5
7
  jobs:
6
8
  build:
7
9
  runs-on: ${{ matrix.os }}
@@ -10,7 +12,7 @@ jobs:
10
12
  strategy:
11
13
  fail-fast: false
12
14
  matrix:
13
- ruby: [ '2.4', '2.5', '2.6', '2.7', '3.0' ]
15
+ ruby: [ '3.1', '3.0', '2.7', '2.6' ]
14
16
  os:
15
17
  - ubuntu-latest
16
18
  name: Ruby ${{ matrix.ruby }} unit testing on ${{ matrix.os }}
data/ChangeLog CHANGED
@@ -1,3 +1,12 @@
1
+ Release 0.17.5 - 2022/03/18
2
+ * out_kafka2: Add `resolve_seed_brokers` parameter
3
+
4
+ Release 0.17.4 - 2022/01/25
5
+ * in_kafka_group: Add `refresh_topic_interval` parameter
6
+
7
+ Release 0.17.3 - 2021/11/26
8
+ * output: Suppress large warning logs for events skipped by `max_send_limit_bytes`
9
+
1
10
  Release 0.17.2 - 2021/10/14
2
11
  * out_rdkafka2: Add `max_enqueue_bytes_per_second` parameter
3
12
  * out_rdkafka2: Support `use_event_time` parameter
data/README.md CHANGED
@@ -119,7 +119,7 @@ Consume events by kafka consumer group features..
119
119
  topics <listening topics(separate with comma',')>
120
120
  format <input text type (text|json|ltsv|msgpack)> :default => json
121
121
  message_key <key (Optional, for text format only, default is message)>
122
- kafka_mesasge_key <key (Optional, If specified, set kafka's message key to this key)>
122
+ kafka_message_key <key (Optional, If specified, set kafka's message key to this key)>
123
123
  add_headers <If true, add kafka's message headers to record>
124
124
  add_prefix <tag prefix (Optional)>
125
125
  add_suffix <tag suffix (Optional)>
@@ -135,6 +135,7 @@ Consume events by kafka consumer group features..
135
135
  offset_commit_interval (integer) :default => nil (Use default of ruby-kafka)
136
136
  offset_commit_threshold (integer) :default => nil (Use default of ruby-kafka)
137
137
  fetcher_max_queue_size (integer) :default => nil (Use default of ruby-kafka)
138
+ refresh_topic_interval (integer) :default => nil (Use default of ruby-kafka)
138
139
  start_from_beginning (bool) :default => true
139
140
  </source>
140
141
 
@@ -155,7 +156,7 @@ With the introduction of the rdkafka-ruby based input plugin we hope to support
155
156
  topics <listening topics(separate with comma',')>
156
157
  format <input text type (text|json|ltsv|msgpack)> :default => json
157
158
  message_key <key (Optional, for text format only, default is message)>
158
- kafka_mesasge_key <key (Optional, If specified, set kafka's message key to this key)>
159
+ kafka_message_key <key (Optional, If specified, set kafka's message key to this key)>
159
160
  add_headers <If true, add kafka's message headers to record>
160
161
  add_prefix <tag prefix (Optional)>
161
162
  add_suffix <tag suffix (Optional)>
@@ -13,7 +13,7 @@ Gem::Specification.new do |gem|
13
13
  gem.test_files = gem.files.grep(%r{^(test|spec|features)/})
14
14
  gem.name = "fluent-plugin-kafka"
15
15
  gem.require_paths = ["lib"]
16
- gem.version = '0.17.2'
16
+ gem.version = '0.17.5'
17
17
  gem.required_ruby_version = ">= 2.1.0"
18
18
 
19
19
  gem.add_dependency "fluentd", [">= 0.10.58", "< 2"]
@@ -67,6 +67,8 @@ class Fluent::KafkaGroupInput < Fluent::Input
67
67
  :desc => "The number of messages that can be processed before their offsets are committed"
68
68
  config_param :fetcher_max_queue_size, :integer, :default => nil,
69
69
  :desc => "The number of fetched messages per partition that are queued in fetcher queue"
70
+ config_param :refresh_topic_interval, :integer, :default => nil,
71
+ :desc => "The interval of refreshing the topic list in seconds. Zero or unset disables this"
70
72
  config_param :start_from_beginning, :bool, :default => true,
71
73
  :desc => "Whether to start from the beginning of the topic or just subscribe to new messages being produced"
72
74
 
@@ -128,6 +130,7 @@ class Fluent::KafkaGroupInput < Fluent::Input
128
130
  @consumer_opts[:offset_commit_interval] = @offset_commit_interval if @offset_commit_interval
129
131
  @consumer_opts[:offset_commit_threshold] = @offset_commit_threshold if @offset_commit_threshold
130
132
  @consumer_opts[:fetcher_max_queue_size] = @fetcher_max_queue_size if @fetcher_max_queue_size
133
+ @consumer_opts[:refresh_topic_interval] = @refresh_topic_interval if @refresh_topic_interval
131
134
 
132
135
  @fetch_opts = {}
133
136
  @fetch_opts[:max_wait_time] = @max_wait_time if @max_wait_time
@@ -240,7 +240,8 @@ DESC
240
240
  record_buf = @formatter_proc.call(tag, time, record)
241
241
  record_buf_bytes = record_buf.bytesize
242
242
  if @max_send_limit_bytes && record_buf_bytes > @max_send_limit_bytes
243
- log.warn "record size exceeds max_send_limit_bytes. Skip event:", :time => time, :record => record
243
+ log.warn "record size exceeds max_send_limit_bytes. Skip event:", :time => time, :record_size => record_buf_bytes
244
+ log.debug "Skipped event:", :record => record
244
245
  next
245
246
  end
246
247
  log.trace { "message will send to #{topic} with partition_key: #{partition_key}, partition: #{partition}, message_key: #{message_key} and value: #{record_buf}." }
@@ -49,6 +49,8 @@ DESC
49
49
  :desc => 'Kafka message headers'
50
50
  config_param :headers_from_record, :hash, default: {}, symbolize_keys: true, value_type: :string,
51
51
  :desc => 'Kafka message headers where the header value is a jsonpath to a record value'
52
+ config_param :resolve_seed_brokers, :bool, :default => false,
53
+ :desc => "support brokers' hostname with multiple addresses"
52
54
 
53
55
  config_param :get_kafka_client_log, :bool, :default => false
54
56
 
@@ -103,19 +105,19 @@ DESC
103
105
  @kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, logger: logger, connect_timeout: @connect_timeout, socket_timeout: @socket_timeout, ssl_ca_cert_file_path: @ssl_ca_cert,
104
106
  ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_client_cert_chain: read_ssl_file(@ssl_client_cert_chain),
105
107
  ssl_ca_certs_from_system: @ssl_ca_certs_from_system, sasl_scram_username: @username, sasl_scram_password: @password,
106
- sasl_scram_mechanism: @scram_mechanism, sasl_over_ssl: @sasl_over_ssl, ssl_verify_hostname: @ssl_verify_hostname,
108
+ sasl_scram_mechanism: @scram_mechanism, sasl_over_ssl: @sasl_over_ssl, ssl_verify_hostname: @ssl_verify_hostname, resolve_seed_brokers: @resolve_seed_brokers,
107
109
  partitioner: Kafka::Partitioner.new(hash_function: @partitioner_hash_function))
108
110
  elsif @username != nil && @password != nil
109
111
  @kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, logger: logger, connect_timeout: @connect_timeout, socket_timeout: @socket_timeout, ssl_ca_cert_file_path: @ssl_ca_cert,
110
112
  ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_client_cert_chain: read_ssl_file(@ssl_client_cert_chain),
111
113
  ssl_ca_certs_from_system: @ssl_ca_certs_from_system, sasl_plain_username: @username, sasl_plain_password: @password, sasl_over_ssl: @sasl_over_ssl,
112
- ssl_verify_hostname: @ssl_verify_hostname,
114
+ ssl_verify_hostname: @ssl_verify_hostname, resolve_seed_brokers: @resolve_seed_brokers,
113
115
  partitioner: Kafka::Partitioner.new(hash_function: @partitioner_hash_function))
114
116
  else
115
117
  @kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, logger: logger, connect_timeout: @connect_timeout, socket_timeout: @socket_timeout, ssl_ca_cert_file_path: @ssl_ca_cert,
116
118
  ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_client_cert_chain: read_ssl_file(@ssl_client_cert_chain),
117
119
  ssl_ca_certs_from_system: @ssl_ca_certs_from_system, sasl_gssapi_principal: @principal, sasl_gssapi_keytab: @keytab, sasl_over_ssl: @sasl_over_ssl,
118
- ssl_verify_hostname: @ssl_verify_hostname,
120
+ ssl_verify_hostname: @ssl_verify_hostname, resolve_seed_brokers: @resolve_seed_brokers,
119
121
  partitioner: Kafka::Partitioner.new(hash_function: @partitioner_hash_function))
120
122
  end
121
123
  log.info "initialized kafka producer: #{@client_id}"
@@ -268,7 +270,8 @@ DESC
268
270
  record_buf = @formatter_proc.call(tag, time, record)
269
271
  record_buf_bytes = record_buf.bytesize
270
272
  if @max_send_limit_bytes && record_buf_bytes > @max_send_limit_bytes
271
- log.warn "record size exceeds max_send_limit_bytes. Skip event:", :time => time, :record => record
273
+ log.warn "record size exceeds max_send_limit_bytes. Skip event:", :time => time, :record_size => record_buf_bytes
274
+ log.debug "Skipped event:", :record => record
272
275
  next
273
276
  end
274
277
  rescue StandardError => e
@@ -332,7 +332,8 @@ DESC
332
332
  record_buf = @formatter_proc.call(tag, time, record)
333
333
  record_buf_bytes = record_buf.bytesize
334
334
  if @max_send_limit_bytes && record_buf_bytes > @max_send_limit_bytes
335
- log.warn "record size exceeds max_send_limit_bytes. Skip event:", :time => time, :record => record
335
+ log.warn "record size exceeds max_send_limit_bytes. Skip event:", :time => time, :record_size => record_buf_bytes
336
+ log.debug "Skipped event:", :record => record
336
337
  next
337
338
  end
338
339
  rescue StandardError => e
@@ -278,7 +278,8 @@ DESC
278
278
  record_buf = @formatter_proc.call(tag, time, record)
279
279
  record_buf_bytes = record_buf.bytesize
280
280
  if @max_send_limit_bytes && record_buf_bytes > @max_send_limit_bytes
281
- log.warn "record size exceeds max_send_limit_bytes. Skip event:", :time => time, :record => record
281
+ log.warn "record size exceeds max_send_limit_bytes. Skip event:", :time => time, :record_size => record_buf_bytes
282
+ log.debug "Skipped event:", :record => record
282
283
  next
283
284
  end
284
285
  rescue StandardError => e
@@ -398,7 +398,8 @@ DESC
398
398
  record_buf = @formatter_proc.call(tag, time, record)
399
399
  record_buf_bytes = record_buf.bytesize
400
400
  if @max_send_limit_bytes && record_buf_bytes > @max_send_limit_bytes
401
- log.warn "record size exceeds max_send_limit_bytes. Skip event:", :time => time, :record => record
401
+ log.warn "record size exceeds max_send_limit_bytes. Skip event:", :time => time, :record_size => record_buf_bytes
402
+ log.debug "Skipped event:", :record => record
402
403
  next
403
404
  end
404
405
  rescue StandardError => e
@@ -14,6 +14,7 @@ class KafkaGroupInputTest < Test::Unit::TestCase
14
14
  brokers localhost:9092
15
15
  consumer_group fluentd
16
16
  format text
17
+ refresh_topic_interval 0
17
18
  @label @kafka
18
19
  topics #{TOPIC_NAME}
19
20
  ]
@@ -52,6 +53,7 @@ class KafkaGroupInputTest < Test::Unit::TestCase
52
53
  brokers localhost:9092
53
54
  format text
54
55
  @label @kafka
56
+ refresh_topic_interval 0
55
57
  topics #{TOPIC_NAME}
56
58
  ]
57
59
  d = create_driver
@@ -60,6 +60,13 @@ class Kafka2OutputTest < Test::Unit::TestCase
60
60
  assert_equal true, d.instance.multi_workers_ready?
61
61
  end
62
62
 
63
+ def test_resolve_seed_brokers
64
+ d = create_driver(config + config_element('ROOT', '', {"resolve_seed_brokers" => true}))
65
+ assert_nothing_raised do
66
+ d.instance.refresh_client
67
+ end
68
+ end
69
+
63
70
  class WriteTest < self
64
71
  TOPIC_NAME = "kafka-output-#{SecureRandom.uuid}"
65
72
 
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: fluent-plugin-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.17.2
4
+ version: 0.17.5
5
5
  platform: ruby
6
6
  authors:
7
7
  - Hidemasa Togashi
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2021-10-14 00:00:00.000000000 Z
12
+ date: 2022-03-18 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: fluentd