fluent-plugin-kafka 0.9.6 → 0.10.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/ChangeLog +7 -0
- data/README.md +69 -56
- data/fluent-plugin-kafka.gemspec +2 -2
- data/lib/fluent/plugin/kafka_plugin_util.rb +2 -0
- data/lib/fluent/plugin/out_kafka.rb +20 -15
- data/lib/fluent/plugin/out_kafka2.rb +12 -5
- data/lib/fluent/plugin/out_kafka_buffered.rb +5 -4
- data/lib/fluent/plugin/out_rdkafka.rb +2 -2
- data/lib/fluent/plugin/out_rdkafka2.rb +2 -2
- metadata +4 -4
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: e2c8a3efaae4c3daecbc360c8092aa17a46a0bbcd2e533243f6f7ed375eb3aa4
|
4
|
+
data.tar.gz: e2ae435c6923d71ea46e4fe3a5bfdd599cfb08aeea309da461ab15196cebdc07
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 233159921bb7c2cd699b2e10939d26bd90e3280bfae56b9e5146dc7ac75619e3d91256e1f404c5ab412726bcf74e980f61b0e52600290b0b927579969aa21937
|
7
|
+
data.tar.gz: f20ce22e379eb1785667d19ff95089b70c9cd33bbe0517f52e4c67b733cebc8128e6096b7376468fe15c1aea4aea3c9347f3413ad1c8535be58579f16f409583
|
data/ChangeLog
CHANGED
@@ -1,3 +1,10 @@
|
|
1
|
+
Release 0.10.0 - 2019/07/03
|
2
|
+
|
3
|
+
* output: Fixed max_send_limit_bytes parameter to be in all kafka outputs
|
4
|
+
* output: Add ssl_verify_hostname parameter
|
5
|
+
* rdkafka output: Fix exceptiona handling
|
6
|
+
* Update ruby-kafka version to v0.7.8 or later
|
7
|
+
|
1
8
|
Release 0.9.6 - 2019/05/28
|
2
9
|
|
3
10
|
* out_kafka2: Add metadata nil check
|
data/README.md
CHANGED
@@ -140,18 +140,14 @@ See also [ruby-kafka README](https://github.com/zendesk/ruby-kafka#consuming-mes
|
|
140
140
|
|
141
141
|
Consuming topic name is used for event tag. So when the target topic name is `app_event`, the tag is `app_event`. If you want to modify tag, use `add_prefix` or `add_suffix` parameter. With `add_prefix kafka`, the tag is `kafka.app_event`.
|
142
142
|
|
143
|
-
###
|
143
|
+
### Output plugin
|
144
144
|
|
145
|
-
This plugin
|
145
|
+
This plugin is for fluentd v1.0 or later. This will be `out_kafka` plugin in the future.
|
146
146
|
|
147
147
|
<match app.**>
|
148
|
-
@type
|
149
|
-
|
150
|
-
# Brokers: you can choose either brokers or zookeeper. If you are not familiar with zookeeper, use brokers parameters.
|
151
|
-
brokers <broker1_host>:<broker1_port>,<broker2_host>:<broker2_port>,.. # Set brokers directly
|
152
|
-
zookeeper <zookeeper_host>:<zookeeper_port> # Set brokers via Zookeeper
|
153
|
-
zookeeper_path <broker path in zookeeper> :default => /brokers/ids # Set path in zookeeper for kafka
|
148
|
+
@type kafka2
|
154
149
|
|
150
|
+
brokers <broker1_host>:<broker1_port>,<broker2_host>:<broker2_port>,.. # Set brokers directly
|
155
151
|
topic_key (string) :default => 'topic'
|
156
152
|
partition_key (string) :default => 'partition'
|
157
153
|
partition_key_key (string) :default => 'partition_key'
|
@@ -159,28 +155,37 @@ This plugin uses ruby-kafka producer for writing data. This plugin works with re
|
|
159
155
|
default_topic (string) :default => nil
|
160
156
|
default_partition_key (string) :default => nil
|
161
157
|
default_message_key (string) :default => nil
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
158
|
+
exclude_topic_key (bool) :default => false
|
159
|
+
exclude_partition_key (bool) :default => false
|
160
|
+
get_kafka_client_log (bool) :default => false
|
161
|
+
use_default_for_unknown_topic (bool) :default => false
|
162
|
+
|
163
|
+
<format>
|
164
|
+
@type (json|ltsv|msgpack|attr:<record name>|<formatter name>) :default => json
|
165
|
+
</format>
|
168
166
|
|
169
|
-
# See
|
167
|
+
# Optional. See https://docs.fluentd.org/v/1.0/configuration/inject-section
|
168
|
+
<inject>
|
169
|
+
tag_key tag
|
170
|
+
time_key time
|
171
|
+
</inject>
|
172
|
+
|
173
|
+
# See fluentd document for buffer related parameters: https://docs.fluentd.org/v/1.0/configuration/buffer-section
|
174
|
+
# Buffer chunk key should be same with topic_key. If value is not found in the record, default_topic is used.
|
175
|
+
<buffer topic>
|
176
|
+
flush_interval 10s
|
177
|
+
</buffer>
|
170
178
|
|
171
179
|
# ruby-kafka producer options
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
max_send_limit_bytes (integer) :default => nil (No drop)
|
179
|
-
discard_kafka_delivery_failed (bool) :default => false (No discard)
|
180
|
-
monitoring_list (array) :default => []
|
180
|
+
idempotent (bool) :default => false
|
181
|
+
sasl_over_ssl (bool) :default => false
|
182
|
+
max_send_retries (integer) :default => 1
|
183
|
+
required_acks (integer) :default => -1
|
184
|
+
ack_timeout (integer) :default => nil (Use default of ruby-kafka)
|
185
|
+
compression_codec (string) :default => nil (No compression. Depends on ruby-kafka: https://github.com/zendesk/ruby-kafka#compression)
|
181
186
|
</match>
|
182
187
|
|
183
|
-
`<formatter name>`
|
188
|
+
The `<formatter name>` in `<format>` uses fluentd's formatter plugins. See [formatter article](https://docs.fluentd.org/v/1.0/formatter).
|
184
189
|
|
185
190
|
ruby-kafka sometimes returns `Kafka::DeliveryFailed` error without good information.
|
186
191
|
In this case, `get_kafka_client_log` is useful for identifying the error cause.
|
@@ -216,6 +221,16 @@ On CentOS 7 installation is also necessary.
|
|
216
221
|
|
217
222
|
$ sudo yum install gcc autoconf automake libtool snappy-devel
|
218
223
|
|
224
|
+
This plugin supports compression codec "lz4" also.
|
225
|
+
Install extlz4 module before you use lz4 compression.
|
226
|
+
|
227
|
+
$ gem install extlz4 --no-document
|
228
|
+
|
229
|
+
This plugin supports compression codec "zstd" also.
|
230
|
+
Install zstd-ruby module before you use zstd compression.
|
231
|
+
|
232
|
+
$ gem install zstd-ruby --no-document
|
233
|
+
|
219
234
|
#### Load balancing
|
220
235
|
|
221
236
|
Messages will be assigned a partition at random as default by ruby-kafka, but messages with the same partition key will always be assigned to the same partition by setting `default_partition_key` in config file.
|
@@ -230,14 +245,17 @@ If key name `partition_key_key` exists in a message, this plugin set the value o
|
|
230
245
|
|
231
246
|
If key name `message_key_key` exists in a message, this plugin publishes the value of message_key_key to kafka and can be read by consumers. Same message key will be assigned to all messages by setting `default_message_key` in config file. If message_key_key exists and if partition_key_key is not set explicitly, messsage_key_key will be used for partitioning.
|
232
247
|
|
233
|
-
###
|
248
|
+
### Buffered output plugin
|
234
249
|
|
235
|
-
This plugin
|
250
|
+
This plugin uses ruby-kafka producer for writing data. This plugin works with recent kafka versions. This plugin is for v0.12. If you use v1, see `kafka2`.
|
236
251
|
|
237
252
|
<match app.**>
|
238
|
-
@type
|
253
|
+
@type kafka_buffered
|
239
254
|
|
255
|
+
# Brokers: you can choose either brokers or zookeeper. If you are not familiar with zookeeper, use brokers parameters.
|
240
256
|
brokers <broker1_host>:<broker1_port>,<broker2_host>:<broker2_port>,.. # Set brokers directly
|
257
|
+
zookeeper <zookeeper_host>:<zookeeper_port> # Set brokers via Zookeeper
|
258
|
+
zookeeper_path <broker path in zookeeper> :default => /brokers/ids # Set path in zookeeper for kafka
|
241
259
|
|
242
260
|
topic_key (string) :default => 'topic'
|
243
261
|
partition_key (string) :default => 'partition'
|
@@ -246,32 +264,27 @@ This plugin is for fluentd v1.0 or later. This will be `out_kafka` plugin in the
|
|
246
264
|
default_topic (string) :default => nil
|
247
265
|
default_partition_key (string) :default => nil
|
248
266
|
default_message_key (string) :default => nil
|
267
|
+
output_data_type (json|ltsv|msgpack|attr:<record name>|<formatter name>) :default => json
|
268
|
+
output_include_tag (bool) :default => false
|
269
|
+
output_include_time (bool) :default => false
|
249
270
|
exclude_topic_key (bool) :default => false
|
250
271
|
exclude_partition_key (bool) :default => false
|
251
272
|
get_kafka_client_log (bool) :default => false
|
252
|
-
use_default_for_unknown_topic (bool) :default => false
|
253
|
-
|
254
|
-
<format>
|
255
|
-
@type (json|ltsv|msgpack|attr:<record name>|<formatter name>) :default => json
|
256
|
-
</format>
|
257
273
|
|
258
|
-
#
|
259
|
-
<inject>
|
260
|
-
tag_key tag
|
261
|
-
time_key time
|
262
|
-
</inject>
|
263
|
-
|
264
|
-
# See fluentd document for buffer related parameters: http://docs.fluentd.org/articles/buffer-plugin-overview
|
265
|
-
# Buffer chunk key should be same with topic_key. If value is not found in the record, default_topic is used.
|
266
|
-
<buffer topic>
|
267
|
-
flush_interval 10s
|
268
|
-
</buffer>
|
274
|
+
# See fluentd document for buffer related parameters: https://docs.fluentd.org/v/0.12/buffer
|
269
275
|
|
270
276
|
# ruby-kafka producer options
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
277
|
+
idempotent (bool) :default => false
|
278
|
+
sasl_over_ssl (bool) :default => false
|
279
|
+
max_send_retries (integer) :default => 1
|
280
|
+
required_acks (integer) :default => -1
|
281
|
+
ack_timeout (integer) :default => nil (Use default of ruby-kafka)
|
282
|
+
compression_codec (string) :default => nil (No compression. Depends on ruby-kafka: https://github.com/zendesk/ruby-kafka#compression)
|
283
|
+
kafka_agg_max_bytes (integer) :default => 4096
|
284
|
+
kafka_agg_max_messages (integer) :default => nil (No limit)
|
285
|
+
max_send_limit_bytes (integer) :default => nil (No drop)
|
286
|
+
discard_kafka_delivery_failed (bool) :default => false (No discard)
|
287
|
+
monitoring_list (array) :default => []
|
275
288
|
</match>
|
276
289
|
|
277
290
|
### Non-buffered output plugin
|
@@ -296,12 +309,12 @@ This plugin uses ruby-kafka producer for writing data. For performance and relia
|
|
296
309
|
exclude_partition_key (bool) :default => false
|
297
310
|
|
298
311
|
# ruby-kafka producer options
|
299
|
-
max_send_retries (integer)
|
300
|
-
required_acks (integer)
|
301
|
-
ack_timeout (integer)
|
302
|
-
compression_codec (
|
303
|
-
max_buffer_size (integer)
|
304
|
-
max_buffer_bytesize (integer)
|
312
|
+
max_send_retries (integer) :default => 1
|
313
|
+
required_acks (integer) :default => -1
|
314
|
+
ack_timeout (integer) :default => nil (Use default of ruby-kafka)
|
315
|
+
compression_codec (string) :default => nil (No compression. Depends on ruby-kafka: https://github.com/zendesk/ruby-kafka#compression)
|
316
|
+
max_buffer_size (integer) :default => nil (Use default of ruby-kafka)
|
317
|
+
max_buffer_bytesize (integer) :default => nil (Use default of ruby-kafka)
|
305
318
|
</match>
|
306
319
|
|
307
320
|
This plugin also supports ruby-kafka related parameters. See Buffered output plugin section.
|
@@ -336,13 +349,13 @@ You need to install rdkafka gem.
|
|
336
349
|
@type (json|ltsv|msgpack|attr:<record name>|<formatter name>) :default => json
|
337
350
|
</format>
|
338
351
|
|
339
|
-
# Optional. See https://docs.fluentd.org/
|
352
|
+
# Optional. See https://docs.fluentd.org/v/1.0/configuration/inject-section
|
340
353
|
<inject>
|
341
354
|
tag_key tag
|
342
355
|
time_key time
|
343
356
|
</inject>
|
344
357
|
|
345
|
-
# See fluentd document for buffer section parameters: https://docs.fluentd.org/
|
358
|
+
# See fluentd document for buffer section parameters: https://docs.fluentd.org/v/1.0/configuration/buffer-section
|
346
359
|
# Buffer chunk key should be same with topic_key. If value is not found in the record, default_topic is used.
|
347
360
|
<buffer topic>
|
348
361
|
flush_interval 10s
|
data/fluent-plugin-kafka.gemspec
CHANGED
@@ -13,12 +13,12 @@ Gem::Specification.new do |gem|
|
|
13
13
|
gem.test_files = gem.files.grep(%r{^(test|spec|features)/})
|
14
14
|
gem.name = "fluent-plugin-kafka"
|
15
15
|
gem.require_paths = ["lib"]
|
16
|
-
gem.version = '0.
|
16
|
+
gem.version = '0.10.0'
|
17
17
|
gem.required_ruby_version = ">= 2.1.0"
|
18
18
|
|
19
19
|
gem.add_dependency "fluentd", [">= 0.10.58", "< 2"]
|
20
20
|
gem.add_dependency 'ltsv'
|
21
|
-
gem.add_dependency 'ruby-kafka', '>= 0.7.
|
21
|
+
gem.add_dependency 'ruby-kafka', '>= 0.7.8', '< 0.8.0'
|
22
22
|
gem.add_development_dependency "rake", ">= 0.9.2"
|
23
23
|
gem.add_development_dependency "test-unit", ">= 3.0.8"
|
24
24
|
end
|
@@ -14,6 +14,8 @@ module Fluent
|
|
14
14
|
:desc => "an extra PEM encoded cert to use with and SSL connection."
|
15
15
|
config_param :ssl_ca_certs_from_system, :bool, :default => false,
|
16
16
|
:desc => "this configures the store to look up CA certificates from the system default certificate store on an as needed basis. The location of the store can usually be determined by: OpenSSL::X509::DEFAULT_CERT_FILE."
|
17
|
+
config_param :ssl_verify_hostname, :bool, :default => true,
|
18
|
+
:desc => "this configures whether hostname of certificate should be verified or not."
|
17
19
|
}
|
18
20
|
end
|
19
21
|
|
@@ -55,7 +55,7 @@ DESC
|
|
55
55
|
:desc => "How long the producer waits for acks."
|
56
56
|
config_param :compression_codec, :string, :default => nil,
|
57
57
|
:desc => "The codec the producer uses to compress messages."
|
58
|
-
|
58
|
+
config_param :max_send_limit_bytes, :size, :default => nil
|
59
59
|
config_param :time_format, :string, :default => nil
|
60
60
|
|
61
61
|
config_param :max_buffer_size, :integer, :default => nil,
|
@@ -108,15 +108,16 @@ DESC
|
|
108
108
|
if @scram_mechanism != nil && @username != nil && @password != nil
|
109
109
|
@kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, ssl_ca_cert: read_ssl_file(@ssl_ca_cert),
|
110
110
|
ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_ca_certs_from_system: @ssl_ca_certs_from_system,
|
111
|
-
sasl_scram_username: @username, sasl_scram_password: @password, sasl_scram_mechanism: @scram_mechanism, sasl_over_ssl: @sasl_over_ssl
|
111
|
+
sasl_scram_username: @username, sasl_scram_password: @password, sasl_scram_mechanism: @scram_mechanism, sasl_over_ssl: @sasl_over_ssl,
|
112
|
+
ssl_verify_hostname: @ssl_verify_hostname)
|
112
113
|
elsif @username != nil && @password != nil
|
113
114
|
@kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, ssl_ca_cert: read_ssl_file(@ssl_ca_cert),
|
114
115
|
ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_ca_certs_from_system: @ssl_ca_certs_from_system,
|
115
|
-
sasl_plain_username: @username, sasl_plain_password: @password, sasl_over_ssl: @sasl_over_ssl)
|
116
|
+
sasl_plain_username: @username, sasl_plain_password: @password, sasl_over_ssl: @sasl_over_ssl, ssl_verify_hostname: @ssl_verify_hostname)
|
116
117
|
else
|
117
118
|
@kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, ssl_ca_cert: read_ssl_file(@ssl_ca_cert),
|
118
119
|
ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_ca_certs_from_system: @ssl_ca_certs_from_system,
|
119
|
-
sasl_gssapi_principal: @principal, sasl_gssapi_keytab: @keytab, sasl_over_ssl: @sasl_over_ssl)
|
120
|
+
sasl_gssapi_principal: @principal, sasl_gssapi_keytab: @keytab, sasl_over_ssl: @sasl_over_ssl, ssl_verify_hostname: @ssl_verify_hostname)
|
120
121
|
end
|
121
122
|
log.info "initialized kafka producer: #{@client_id}"
|
122
123
|
else
|
@@ -228,17 +229,21 @@ DESC
|
|
228
229
|
partition = (@exclude_partition ? record.delete('partition'.freeze) : record['partition'.freeze]) || @default_partition
|
229
230
|
message_key = (@exclude_message_key ? record.delete('message_key') : record['message_key']) || @default_message_key
|
230
231
|
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
232
|
+
record_buf = @formatter_proc.call(tag, time, record)
|
233
|
+
record_buf_bytes = record_buf.bytesize
|
234
|
+
if @max_send_limit_bytes && record_buf_bytes > @max_send_limit_bytes
|
235
|
+
log.warn "record size exceeds max_send_limit_bytes. Skip event:", :time => time, :record => record
|
236
|
+
next
|
237
|
+
end
|
238
|
+
log.trace { "message will send to #{topic} with partition_key: #{partition_key}, partition: #{partition}, message_key: #{message_key} and value: #{record_buf}." }
|
239
|
+
begin
|
240
|
+
producer.produce(record_buf, topic: topic, key: message_key, partition: partition, partition_key: partition_key)
|
241
|
+
rescue Kafka::BufferOverflow => e
|
242
|
+
log.warn "BufferOverflow occurred: #{e}"
|
243
|
+
log.info "Trying to deliver the messages to prevent the buffer from overflowing again."
|
244
|
+
producer.deliver_messages
|
245
|
+
log.info "Recovered from BufferOverflow successfully`"
|
246
|
+
end
|
242
247
|
end
|
243
248
|
|
244
249
|
producer.deliver_messages
|
@@ -55,9 +55,9 @@ DESC
|
|
55
55
|
config_param :compression_codec, :string, :default => nil,
|
56
56
|
:desc => <<-DESC
|
57
57
|
The codec the producer uses to compress messages.
|
58
|
-
Supported codecs:
|
58
|
+
Supported codecs depends on ruby-kafka: https://github.com/zendesk/ruby-kafka#compression
|
59
59
|
DESC
|
60
|
-
|
60
|
+
config_param :max_send_limit_bytes, :size, :default => nil
|
61
61
|
config_param :active_support_notification_regex, :string, :default => nil,
|
62
62
|
:desc => <<-DESC
|
63
63
|
Add a regular expression to capture ActiveSupport notifications from the Kafka client
|
@@ -87,15 +87,17 @@ DESC
|
|
87
87
|
@kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, logger: logger, ssl_ca_cert: read_ssl_file(@ssl_ca_cert),
|
88
88
|
ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_client_cert_chain: read_ssl_file(@ssl_client_cert_chain),
|
89
89
|
ssl_ca_certs_from_system: @ssl_ca_certs_from_system, sasl_scram_username: @username, sasl_scram_password: @password,
|
90
|
-
sasl_scram_mechanism: @scram_mechanism, sasl_over_ssl: @sasl_over_ssl)
|
90
|
+
sasl_scram_mechanism: @scram_mechanism, sasl_over_ssl: @sasl_over_ssl, ssl_verify_hostname: @ssl_verify_hostname)
|
91
91
|
elsif @username != nil && @password != nil
|
92
92
|
@kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, logger: logger, ssl_ca_cert: read_ssl_file(@ssl_ca_cert),
|
93
93
|
ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_client_cert_chain: read_ssl_file(@ssl_client_cert_chain),
|
94
|
-
ssl_ca_certs_from_system: @ssl_ca_certs_from_system, sasl_plain_username: @username, sasl_plain_password: @password, sasl_over_ssl: @sasl_over_ssl
|
94
|
+
ssl_ca_certs_from_system: @ssl_ca_certs_from_system, sasl_plain_username: @username, sasl_plain_password: @password, sasl_over_ssl: @sasl_over_ssl,
|
95
|
+
ssl_verify_hostname: @ssl_verify_hostname)
|
95
96
|
else
|
96
97
|
@kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, logger: logger, ssl_ca_cert: read_ssl_file(@ssl_ca_cert),
|
97
98
|
ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_client_cert_chain: read_ssl_file(@ssl_client_cert_chain),
|
98
|
-
ssl_ca_certs_from_system: @ssl_ca_certs_from_system, sasl_gssapi_principal: @principal, sasl_gssapi_keytab: @keytab, sasl_over_ssl: @sasl_over_ssl
|
99
|
+
ssl_ca_certs_from_system: @ssl_ca_certs_from_system, sasl_gssapi_principal: @principal, sasl_gssapi_keytab: @keytab, sasl_over_ssl: @sasl_over_ssl,
|
100
|
+
ssl_verify_hostname: @ssl_verify_hostname)
|
99
101
|
end
|
100
102
|
log.info "initialized kafka producer: #{@client_id}"
|
101
103
|
rescue Exception => e
|
@@ -214,6 +216,11 @@ DESC
|
|
214
216
|
message_key = (@exclude_message_key ? record.delete(@message_key_key) : record[@message_key_key]) || @default_message_key
|
215
217
|
|
216
218
|
record_buf = @formatter_proc.call(tag, time, record)
|
219
|
+
record_buf_bytes = record_buf.bytesize
|
220
|
+
if @max_send_limit_bytes && record_buf_bytes > @max_send_limit_bytes
|
221
|
+
log.warn "record size exceeds max_send_limit_bytes. Skip event:", :time => time, :record => record
|
222
|
+
next
|
223
|
+
end
|
217
224
|
rescue StandardError => e
|
218
225
|
log.warn "unexpected error during format record. Skip broken event:", :error => e.to_s, :error_class => e.class.to_s, :time => time, :record => record
|
219
226
|
next
|
@@ -69,7 +69,7 @@ DESC
|
|
69
69
|
config_param :compression_codec, :string, :default => nil,
|
70
70
|
:desc => <<-DESC
|
71
71
|
The codec the producer uses to compress messages.
|
72
|
-
Supported codecs:
|
72
|
+
Supported codecs depends on ruby-kafka: https://github.com/zendesk/ruby-kafka#compression
|
73
73
|
DESC
|
74
74
|
config_param :max_send_limit_bytes, :size, :default => nil
|
75
75
|
config_param :discard_kafka_delivery_failed, :bool, :default => false
|
@@ -131,15 +131,16 @@ DESC
|
|
131
131
|
if @scram_mechanism != nil && @username != nil && @password != nil
|
132
132
|
@kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, logger: logger, ssl_ca_cert: read_ssl_file(@ssl_ca_cert),
|
133
133
|
ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_ca_certs_from_system: @ssl_ca_certs_from_system,
|
134
|
-
sasl_scram_username: @username, sasl_scram_password: @password, sasl_scram_mechanism: @scram_mechanism, sasl_over_ssl: @sasl_over_ssl
|
134
|
+
sasl_scram_username: @username, sasl_scram_password: @password, sasl_scram_mechanism: @scram_mechanism, sasl_over_ssl: @sasl_over_ssl,
|
135
|
+
ssl_verify_hostname: @ssl_verify_hostname)
|
135
136
|
elsif @username != nil && @password != nil
|
136
137
|
@kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, logger: logger, ssl_ca_cert: read_ssl_file(@ssl_ca_cert),
|
137
138
|
ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_ca_certs_from_system: @ssl_ca_certs_from_system,
|
138
|
-
sasl_plain_username: @username, sasl_plain_password: @password, sasl_over_ssl: @sasl_over_ssl)
|
139
|
+
sasl_plain_username: @username, sasl_plain_password: @password, sasl_over_ssl: @sasl_over_ssl, ssl_verify_hostname: @ssl_verify_hostname)
|
139
140
|
else
|
140
141
|
@kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, logger: logger, ssl_ca_cert: read_ssl_file(@ssl_ca_cert),
|
141
142
|
ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_ca_certs_from_system: @ssl_ca_certs_from_system,
|
142
|
-
sasl_gssapi_principal: @principal, sasl_gssapi_keytab: @keytab, sasl_over_ssl: @sasl_over_ssl)
|
143
|
+
sasl_gssapi_principal: @principal, sasl_gssapi_keytab: @keytab, sasl_over_ssl: @sasl_over_ssl, ssl_verify_hostname: @ssl_verify_hostname)
|
143
144
|
end
|
144
145
|
log.info "initialized kafka producer: #{@client_id}"
|
145
146
|
else
|
@@ -64,7 +64,7 @@ DESC
|
|
64
64
|
The codec the producer uses to compress messages.
|
65
65
|
Supported codecs: (gzip|snappy)
|
66
66
|
DESC
|
67
|
-
|
67
|
+
config_param :max_send_limit_bytes, :size, :default => nil
|
68
68
|
config_param :rdkafka_buffering_max_ms, :integer, :default => nil
|
69
69
|
config_param :rdkafka_buffering_max_messages, :integer, :default => nil
|
70
70
|
config_param :rdkafka_message_max_bytes, :integer, :default => nil
|
@@ -284,7 +284,7 @@ DESC
|
|
284
284
|
handler = producer.produce(topic: topic, payload: record_buf, key: message_key, partition: partition)
|
285
285
|
return handler
|
286
286
|
rescue Exception => e
|
287
|
-
if e.code == :queue_full
|
287
|
+
if e.respond_to?(:code) && e.code == :queue_full
|
288
288
|
if attempt <= @max_enqueue_retries
|
289
289
|
log.warn "Failed to enqueue message; attempting retry #{attempt} of #{@max_enqueue_retries} after #{@enqueue_retry_backoff}s"
|
290
290
|
sleep @enqueue_retry_backoff
|
@@ -65,7 +65,7 @@ DESC
|
|
65
65
|
The codec the producer uses to compress messages. Used for compression.codec
|
66
66
|
Supported codecs: (gzip|snappy)
|
67
67
|
DESC
|
68
|
-
|
68
|
+
config_param :max_send_limit_bytes, :size, :default => nil
|
69
69
|
config_param :rdkafka_buffering_max_ms, :integer, :default => nil, :desc => 'Used for queue.buffering.max.ms'
|
70
70
|
config_param :rdkafka_buffering_max_messages, :integer, :default => nil, :desc => 'Used for queue.buffering.max.messages'
|
71
71
|
config_param :rdkafka_message_max_bytes, :integer, :default => nil, :desc => 'Used for message.max.bytes'
|
@@ -274,7 +274,7 @@ DESC
|
|
274
274
|
begin
|
275
275
|
return producer.produce(topic: topic, payload: record_buf, key: message_key, partition: partition)
|
276
276
|
rescue Exception => e
|
277
|
-
if e.code == :queue_full
|
277
|
+
if e.respond_to?(:code) && e.code == :queue_full
|
278
278
|
if attempt <= @max_enqueue_retries
|
279
279
|
log.warn "Failed to enqueue message; attempting retry #{attempt} of #{@max_enqueue_retries} after #{@enqueue_retry_backoff}s"
|
280
280
|
sleep @enqueue_retry_backoff
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: fluent-plugin-kafka
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.10.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Hidemasa Togashi
|
@@ -9,7 +9,7 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
|
-
date: 2019-
|
12
|
+
date: 2019-07-03 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
14
|
- !ruby/object:Gem::Dependency
|
15
15
|
name: fluentd
|
@@ -51,7 +51,7 @@ dependencies:
|
|
51
51
|
requirements:
|
52
52
|
- - ">="
|
53
53
|
- !ruby/object:Gem::Version
|
54
|
-
version: 0.7.
|
54
|
+
version: 0.7.8
|
55
55
|
- - "<"
|
56
56
|
- !ruby/object:Gem::Version
|
57
57
|
version: 0.8.0
|
@@ -61,7 +61,7 @@ dependencies:
|
|
61
61
|
requirements:
|
62
62
|
- - ">="
|
63
63
|
- !ruby/object:Gem::Version
|
64
|
-
version: 0.7.
|
64
|
+
version: 0.7.8
|
65
65
|
- - "<"
|
66
66
|
- !ruby/object:Gem::Version
|
67
67
|
version: 0.8.0
|