fluent-plugin-kafka 0.9.6 → 0.10.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 283d567ceb1e9bbca30164b40f335f015466abafddb0a0ca45f9d12023270d5d
4
- data.tar.gz: 417f0d18dc4043e04c133ae98daace0a7d368df81cf7b4ef26f8702ffac452ec
3
+ metadata.gz: e2c8a3efaae4c3daecbc360c8092aa17a46a0bbcd2e533243f6f7ed375eb3aa4
4
+ data.tar.gz: e2ae435c6923d71ea46e4fe3a5bfdd599cfb08aeea309da461ab15196cebdc07
5
5
  SHA512:
6
- metadata.gz: 7665925660938965d03f649a660a15d5c5abb420c7897fa62a28c3818b13e34cc453f663bbb59be8cba5cac481acbb85618e7ae323ea7d4e765562079adf4fad
7
- data.tar.gz: 77fa999ff6ce9fb1efe18d5d1d47e8b9daaa1e06d8aad9dafd627d97834adb5aba834e0832e5782161547b42bb55786e420a1a4423fd063af2b8614af51a65b7
6
+ metadata.gz: 233159921bb7c2cd699b2e10939d26bd90e3280bfae56b9e5146dc7ac75619e3d91256e1f404c5ab412726bcf74e980f61b0e52600290b0b927579969aa21937
7
+ data.tar.gz: f20ce22e379eb1785667d19ff95089b70c9cd33bbe0517f52e4c67b733cebc8128e6096b7376468fe15c1aea4aea3c9347f3413ad1c8535be58579f16f409583
data/ChangeLog CHANGED
@@ -1,3 +1,10 @@
1
+ Release 0.10.0 - 2019/07/03
2
+
3
+ * output: Fixed max_send_limit_bytes parameter to be in all kafka outputs
4
+ * output: Add ssl_verify_hostname parameter
5
+ * rdkafka output: Fix exceptiona handling
6
+ * Update ruby-kafka version to v0.7.8 or later
7
+
1
8
  Release 0.9.6 - 2019/05/28
2
9
 
3
10
  * out_kafka2: Add metadata nil check
data/README.md CHANGED
@@ -140,18 +140,14 @@ See also [ruby-kafka README](https://github.com/zendesk/ruby-kafka#consuming-mes
140
140
 
141
141
  Consuming topic name is used for event tag. So when the target topic name is `app_event`, the tag is `app_event`. If you want to modify tag, use `add_prefix` or `add_suffix` parameter. With `add_prefix kafka`, the tag is `kafka.app_event`.
142
142
 
143
- ### Buffered output plugin
143
+ ### Output plugin
144
144
 
145
- This plugin uses ruby-kafka producer for writing data. This plugin works with recent kafka versions. This plugin is for v0.12. If you use v1, see `kafka2`.
145
+ This plugin is for fluentd v1.0 or later. This will be `out_kafka` plugin in the future.
146
146
 
147
147
  <match app.**>
148
- @type kafka_buffered
149
-
150
- # Brokers: you can choose either brokers or zookeeper. If you are not familiar with zookeeper, use brokers parameters.
151
- brokers <broker1_host>:<broker1_port>,<broker2_host>:<broker2_port>,.. # Set brokers directly
152
- zookeeper <zookeeper_host>:<zookeeper_port> # Set brokers via Zookeeper
153
- zookeeper_path <broker path in zookeeper> :default => /brokers/ids # Set path in zookeeper for kafka
148
+ @type kafka2
154
149
 
150
+ brokers <broker1_host>:<broker1_port>,<broker2_host>:<broker2_port>,.. # Set brokers directly
155
151
  topic_key (string) :default => 'topic'
156
152
  partition_key (string) :default => 'partition'
157
153
  partition_key_key (string) :default => 'partition_key'
@@ -159,28 +155,37 @@ This plugin uses ruby-kafka producer for writing data. This plugin works with re
159
155
  default_topic (string) :default => nil
160
156
  default_partition_key (string) :default => nil
161
157
  default_message_key (string) :default => nil
162
- output_data_type (json|ltsv|msgpack|attr:<record name>|<formatter name>) :default => json
163
- output_include_tag (bool) :default => false
164
- output_include_time (bool) :default => false
165
- exclude_topic_key (bool) :default => false
166
- exclude_partition_key (bool) :default => false
167
- get_kafka_client_log (bool) :default => false
158
+ exclude_topic_key (bool) :default => false
159
+ exclude_partition_key (bool) :default => false
160
+ get_kafka_client_log (bool) :default => false
161
+ use_default_for_unknown_topic (bool) :default => false
162
+
163
+ <format>
164
+ @type (json|ltsv|msgpack|attr:<record name>|<formatter name>) :default => json
165
+ </format>
168
166
 
169
- # See fluentd document for buffer related parameters: http://docs.fluentd.org/articles/buffer-plugin-overview
167
+ # Optional. See https://docs.fluentd.org/v/1.0/configuration/inject-section
168
+ <inject>
169
+ tag_key tag
170
+ time_key time
171
+ </inject>
172
+
173
+ # See fluentd document for buffer related parameters: https://docs.fluentd.org/v/1.0/configuration/buffer-section
174
+ # Buffer chunk key should be same with topic_key. If value is not found in the record, default_topic is used.
175
+ <buffer topic>
176
+ flush_interval 10s
177
+ </buffer>
170
178
 
171
179
  # ruby-kafka producer options
172
- max_send_retries (integer) :default => 1
173
- required_acks (integer) :default => -1
174
- ack_timeout (integer) :default => nil (Use default of ruby-kafka)
175
- compression_codec (gzip|snappy) :default => nil (No compression)
176
- kafka_agg_max_bytes (integer) :default => 4096
177
- kafka_agg_max_messages (integer) :default => nil (No limit)
178
- max_send_limit_bytes (integer) :default => nil (No drop)
179
- discard_kafka_delivery_failed (bool) :default => false (No discard)
180
- monitoring_list (array) :default => []
180
+ idempotent (bool) :default => false
181
+ sasl_over_ssl (bool) :default => false
182
+ max_send_retries (integer) :default => 1
183
+ required_acks (integer) :default => -1
184
+ ack_timeout (integer) :default => nil (Use default of ruby-kafka)
185
+ compression_codec (string) :default => nil (No compression. Depends on ruby-kafka: https://github.com/zendesk/ruby-kafka#compression)
181
186
  </match>
182
187
 
183
- `<formatter name>` of `output_data_type` uses fluentd's formatter plugins. See [formatter article](http://docs.fluentd.org/articles/formatter-plugin-overview).
188
+ The `<formatter name>` in `<format>` uses fluentd's formatter plugins. See [formatter article](https://docs.fluentd.org/v/1.0/formatter).
184
189
 
185
190
  ruby-kafka sometimes returns `Kafka::DeliveryFailed` error without good information.
186
191
  In this case, `get_kafka_client_log` is useful for identifying the error cause.
@@ -216,6 +221,16 @@ On CentOS 7 installation is also necessary.
216
221
 
217
222
  $ sudo yum install gcc autoconf automake libtool snappy-devel
218
223
 
224
+ This plugin supports compression codec "lz4" also.
225
+ Install extlz4 module before you use lz4 compression.
226
+
227
+ $ gem install extlz4 --no-document
228
+
229
+ This plugin supports compression codec "zstd" also.
230
+ Install zstd-ruby module before you use zstd compression.
231
+
232
+ $ gem install zstd-ruby --no-document
233
+
219
234
  #### Load balancing
220
235
 
221
236
  Messages will be assigned a partition at random as default by ruby-kafka, but messages with the same partition key will always be assigned to the same partition by setting `default_partition_key` in config file.
@@ -230,14 +245,17 @@ If key name `partition_key_key` exists in a message, this plugin set the value o
230
245
 
231
246
  If key name `message_key_key` exists in a message, this plugin publishes the value of message_key_key to kafka and can be read by consumers. Same message key will be assigned to all messages by setting `default_message_key` in config file. If message_key_key exists and if partition_key_key is not set explicitly, messsage_key_key will be used for partitioning.
232
247
 
233
- ### Output plugin
248
+ ### Buffered output plugin
234
249
 
235
- This plugin is for fluentd v1.0 or later. This will be `out_kafka` plugin in the future.
250
+ This plugin uses ruby-kafka producer for writing data. This plugin works with recent kafka versions. This plugin is for v0.12. If you use v1, see `kafka2`.
236
251
 
237
252
  <match app.**>
238
- @type kafka2
253
+ @type kafka_buffered
239
254
 
255
+ # Brokers: you can choose either brokers or zookeeper. If you are not familiar with zookeeper, use brokers parameters.
240
256
  brokers <broker1_host>:<broker1_port>,<broker2_host>:<broker2_port>,.. # Set brokers directly
257
+ zookeeper <zookeeper_host>:<zookeeper_port> # Set brokers via Zookeeper
258
+ zookeeper_path <broker path in zookeeper> :default => /brokers/ids # Set path in zookeeper for kafka
241
259
 
242
260
  topic_key (string) :default => 'topic'
243
261
  partition_key (string) :default => 'partition'
@@ -246,32 +264,27 @@ This plugin is for fluentd v1.0 or later. This will be `out_kafka` plugin in the
246
264
  default_topic (string) :default => nil
247
265
  default_partition_key (string) :default => nil
248
266
  default_message_key (string) :default => nil
267
+ output_data_type (json|ltsv|msgpack|attr:<record name>|<formatter name>) :default => json
268
+ output_include_tag (bool) :default => false
269
+ output_include_time (bool) :default => false
249
270
  exclude_topic_key (bool) :default => false
250
271
  exclude_partition_key (bool) :default => false
251
272
  get_kafka_client_log (bool) :default => false
252
- use_default_for_unknown_topic (bool) :default => false
253
-
254
- <format>
255
- @type (json|ltsv|msgpack|attr:<record name>|<formatter name>) :default => json
256
- </format>
257
273
 
258
- # Optional. See https://docs.fluentd.org/v1.0/articles/inject-section
259
- <inject>
260
- tag_key tag
261
- time_key time
262
- </inject>
263
-
264
- # See fluentd document for buffer related parameters: http://docs.fluentd.org/articles/buffer-plugin-overview
265
- # Buffer chunk key should be same with topic_key. If value is not found in the record, default_topic is used.
266
- <buffer topic>
267
- flush_interval 10s
268
- </buffer>
274
+ # See fluentd document for buffer related parameters: https://docs.fluentd.org/v/0.12/buffer
269
275
 
270
276
  # ruby-kafka producer options
271
- max_send_retries (integer) :default => 1
272
- required_acks (integer) :default => -1
273
- ack_timeout (integer) :default => nil (Use default of ruby-kafka)
274
- compression_codec (gzip|snappy) :default => nil (No compression)
277
+ idempotent (bool) :default => false
278
+ sasl_over_ssl (bool) :default => false
279
+ max_send_retries (integer) :default => 1
280
+ required_acks (integer) :default => -1
281
+ ack_timeout (integer) :default => nil (Use default of ruby-kafka)
282
+ compression_codec (string) :default => nil (No compression. Depends on ruby-kafka: https://github.com/zendesk/ruby-kafka#compression)
283
+ kafka_agg_max_bytes (integer) :default => 4096
284
+ kafka_agg_max_messages (integer) :default => nil (No limit)
285
+ max_send_limit_bytes (integer) :default => nil (No drop)
286
+ discard_kafka_delivery_failed (bool) :default => false (No discard)
287
+ monitoring_list (array) :default => []
275
288
  </match>
276
289
 
277
290
  ### Non-buffered output plugin
@@ -296,12 +309,12 @@ This plugin uses ruby-kafka producer for writing data. For performance and relia
296
309
  exclude_partition_key (bool) :default => false
297
310
 
298
311
  # ruby-kafka producer options
299
- max_send_retries (integer) :default => 1
300
- required_acks (integer) :default => -1
301
- ack_timeout (integer) :default => nil (Use default of ruby-kafka)
302
- compression_codec (gzip|snappy) :default => nil
303
- max_buffer_size (integer) :default => nil (Use default of ruby-kafka)
304
- max_buffer_bytesize (integer) :default => nil (Use default of ruby-kafka)
312
+ max_send_retries (integer) :default => 1
313
+ required_acks (integer) :default => -1
314
+ ack_timeout (integer) :default => nil (Use default of ruby-kafka)
315
+ compression_codec (string) :default => nil (No compression. Depends on ruby-kafka: https://github.com/zendesk/ruby-kafka#compression)
316
+ max_buffer_size (integer) :default => nil (Use default of ruby-kafka)
317
+ max_buffer_bytesize (integer) :default => nil (Use default of ruby-kafka)
305
318
  </match>
306
319
 
307
320
  This plugin also supports ruby-kafka related parameters. See Buffered output plugin section.
@@ -336,13 +349,13 @@ You need to install rdkafka gem.
336
349
  @type (json|ltsv|msgpack|attr:<record name>|<formatter name>) :default => json
337
350
  </format>
338
351
 
339
- # Optional. See https://docs.fluentd.org/v1.0/articles/inject-section
352
+ # Optional. See https://docs.fluentd.org/v/1.0/configuration/inject-section
340
353
  <inject>
341
354
  tag_key tag
342
355
  time_key time
343
356
  </inject>
344
357
 
345
- # See fluentd document for buffer section parameters: https://docs.fluentd.org/v1.0/articles/buffer-section
358
+ # See fluentd document for buffer section parameters: https://docs.fluentd.org/v/1.0/configuration/buffer-section
346
359
  # Buffer chunk key should be same with topic_key. If value is not found in the record, default_topic is used.
347
360
  <buffer topic>
348
361
  flush_interval 10s
@@ -13,12 +13,12 @@ Gem::Specification.new do |gem|
13
13
  gem.test_files = gem.files.grep(%r{^(test|spec|features)/})
14
14
  gem.name = "fluent-plugin-kafka"
15
15
  gem.require_paths = ["lib"]
16
- gem.version = '0.9.6'
16
+ gem.version = '0.10.0'
17
17
  gem.required_ruby_version = ">= 2.1.0"
18
18
 
19
19
  gem.add_dependency "fluentd", [">= 0.10.58", "< 2"]
20
20
  gem.add_dependency 'ltsv'
21
- gem.add_dependency 'ruby-kafka', '>= 0.7.1', '< 0.8.0'
21
+ gem.add_dependency 'ruby-kafka', '>= 0.7.8', '< 0.8.0'
22
22
  gem.add_development_dependency "rake", ">= 0.9.2"
23
23
  gem.add_development_dependency "test-unit", ">= 3.0.8"
24
24
  end
@@ -14,6 +14,8 @@ module Fluent
14
14
  :desc => "an extra PEM encoded cert to use with and SSL connection."
15
15
  config_param :ssl_ca_certs_from_system, :bool, :default => false,
16
16
  :desc => "this configures the store to look up CA certificates from the system default certificate store on an as needed basis. The location of the store can usually be determined by: OpenSSL::X509::DEFAULT_CERT_FILE."
17
+ config_param :ssl_verify_hostname, :bool, :default => true,
18
+ :desc => "this configures whether hostname of certificate should be verified or not."
17
19
  }
18
20
  end
19
21
 
@@ -55,7 +55,7 @@ DESC
55
55
  :desc => "How long the producer waits for acks."
56
56
  config_param :compression_codec, :string, :default => nil,
57
57
  :desc => "The codec the producer uses to compress messages."
58
-
58
+ config_param :max_send_limit_bytes, :size, :default => nil
59
59
  config_param :time_format, :string, :default => nil
60
60
 
61
61
  config_param :max_buffer_size, :integer, :default => nil,
@@ -108,15 +108,16 @@ DESC
108
108
  if @scram_mechanism != nil && @username != nil && @password != nil
109
109
  @kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, ssl_ca_cert: read_ssl_file(@ssl_ca_cert),
110
110
  ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_ca_certs_from_system: @ssl_ca_certs_from_system,
111
- sasl_scram_username: @username, sasl_scram_password: @password, sasl_scram_mechanism: @scram_mechanism, sasl_over_ssl: @sasl_over_ssl)
111
+ sasl_scram_username: @username, sasl_scram_password: @password, sasl_scram_mechanism: @scram_mechanism, sasl_over_ssl: @sasl_over_ssl,
112
+ ssl_verify_hostname: @ssl_verify_hostname)
112
113
  elsif @username != nil && @password != nil
113
114
  @kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, ssl_ca_cert: read_ssl_file(@ssl_ca_cert),
114
115
  ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_ca_certs_from_system: @ssl_ca_certs_from_system,
115
- sasl_plain_username: @username, sasl_plain_password: @password, sasl_over_ssl: @sasl_over_ssl)
116
+ sasl_plain_username: @username, sasl_plain_password: @password, sasl_over_ssl: @sasl_over_ssl, ssl_verify_hostname: @ssl_verify_hostname)
116
117
  else
117
118
  @kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, ssl_ca_cert: read_ssl_file(@ssl_ca_cert),
118
119
  ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_ca_certs_from_system: @ssl_ca_certs_from_system,
119
- sasl_gssapi_principal: @principal, sasl_gssapi_keytab: @keytab, sasl_over_ssl: @sasl_over_ssl)
120
+ sasl_gssapi_principal: @principal, sasl_gssapi_keytab: @keytab, sasl_over_ssl: @sasl_over_ssl, ssl_verify_hostname: @ssl_verify_hostname)
120
121
  end
121
122
  log.info "initialized kafka producer: #{@client_id}"
122
123
  else
@@ -228,17 +229,21 @@ DESC
228
229
  partition = (@exclude_partition ? record.delete('partition'.freeze) : record['partition'.freeze]) || @default_partition
229
230
  message_key = (@exclude_message_key ? record.delete('message_key') : record['message_key']) || @default_message_key
230
231
 
231
- value = @formatter_proc.call(tag, time, record)
232
-
233
- log.trace { "message will send to #{topic} with partition_key: #{partition_key}, partition: #{partition}, message_key: #{message_key} and value: #{value}." }
234
- begin
235
- producer.produce(value, topic: topic, key: message_key, partition: partition, partition_key: partition_key)
236
- rescue Kafka::BufferOverflow => e
237
- log.warn "BufferOverflow occurred: #{e}"
238
- log.info "Trying to deliver the messages to prevent the buffer from overflowing again."
239
- producer.deliver_messages
240
- log.info "Recovered from BufferOverflow successfully`"
241
- end
232
+ record_buf = @formatter_proc.call(tag, time, record)
233
+ record_buf_bytes = record_buf.bytesize
234
+ if @max_send_limit_bytes && record_buf_bytes > @max_send_limit_bytes
235
+ log.warn "record size exceeds max_send_limit_bytes. Skip event:", :time => time, :record => record
236
+ next
237
+ end
238
+ log.trace { "message will send to #{topic} with partition_key: #{partition_key}, partition: #{partition}, message_key: #{message_key} and value: #{record_buf}." }
239
+ begin
240
+ producer.produce(record_buf, topic: topic, key: message_key, partition: partition, partition_key: partition_key)
241
+ rescue Kafka::BufferOverflow => e
242
+ log.warn "BufferOverflow occurred: #{e}"
243
+ log.info "Trying to deliver the messages to prevent the buffer from overflowing again."
244
+ producer.deliver_messages
245
+ log.info "Recovered from BufferOverflow successfully`"
246
+ end
242
247
  end
243
248
 
244
249
  producer.deliver_messages
@@ -55,9 +55,9 @@ DESC
55
55
  config_param :compression_codec, :string, :default => nil,
56
56
  :desc => <<-DESC
57
57
  The codec the producer uses to compress messages.
58
- Supported codecs: (gzip|snappy)
58
+ Supported codecs depends on ruby-kafka: https://github.com/zendesk/ruby-kafka#compression
59
59
  DESC
60
-
60
+ config_param :max_send_limit_bytes, :size, :default => nil
61
61
  config_param :active_support_notification_regex, :string, :default => nil,
62
62
  :desc => <<-DESC
63
63
  Add a regular expression to capture ActiveSupport notifications from the Kafka client
@@ -87,15 +87,17 @@ DESC
87
87
  @kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, logger: logger, ssl_ca_cert: read_ssl_file(@ssl_ca_cert),
88
88
  ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_client_cert_chain: read_ssl_file(@ssl_client_cert_chain),
89
89
  ssl_ca_certs_from_system: @ssl_ca_certs_from_system, sasl_scram_username: @username, sasl_scram_password: @password,
90
- sasl_scram_mechanism: @scram_mechanism, sasl_over_ssl: @sasl_over_ssl)
90
+ sasl_scram_mechanism: @scram_mechanism, sasl_over_ssl: @sasl_over_ssl, ssl_verify_hostname: @ssl_verify_hostname)
91
91
  elsif @username != nil && @password != nil
92
92
  @kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, logger: logger, ssl_ca_cert: read_ssl_file(@ssl_ca_cert),
93
93
  ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_client_cert_chain: read_ssl_file(@ssl_client_cert_chain),
94
- ssl_ca_certs_from_system: @ssl_ca_certs_from_system, sasl_plain_username: @username, sasl_plain_password: @password, sasl_over_ssl: @sasl_over_ssl)
94
+ ssl_ca_certs_from_system: @ssl_ca_certs_from_system, sasl_plain_username: @username, sasl_plain_password: @password, sasl_over_ssl: @sasl_over_ssl,
95
+ ssl_verify_hostname: @ssl_verify_hostname)
95
96
  else
96
97
  @kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, logger: logger, ssl_ca_cert: read_ssl_file(@ssl_ca_cert),
97
98
  ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_client_cert_chain: read_ssl_file(@ssl_client_cert_chain),
98
- ssl_ca_certs_from_system: @ssl_ca_certs_from_system, sasl_gssapi_principal: @principal, sasl_gssapi_keytab: @keytab, sasl_over_ssl: @sasl_over_ssl)
99
+ ssl_ca_certs_from_system: @ssl_ca_certs_from_system, sasl_gssapi_principal: @principal, sasl_gssapi_keytab: @keytab, sasl_over_ssl: @sasl_over_ssl,
100
+ ssl_verify_hostname: @ssl_verify_hostname)
99
101
  end
100
102
  log.info "initialized kafka producer: #{@client_id}"
101
103
  rescue Exception => e
@@ -214,6 +216,11 @@ DESC
214
216
  message_key = (@exclude_message_key ? record.delete(@message_key_key) : record[@message_key_key]) || @default_message_key
215
217
 
216
218
  record_buf = @formatter_proc.call(tag, time, record)
219
+ record_buf_bytes = record_buf.bytesize
220
+ if @max_send_limit_bytes && record_buf_bytes > @max_send_limit_bytes
221
+ log.warn "record size exceeds max_send_limit_bytes. Skip event:", :time => time, :record => record
222
+ next
223
+ end
217
224
  rescue StandardError => e
218
225
  log.warn "unexpected error during format record. Skip broken event:", :error => e.to_s, :error_class => e.class.to_s, :time => time, :record => record
219
226
  next
@@ -69,7 +69,7 @@ DESC
69
69
  config_param :compression_codec, :string, :default => nil,
70
70
  :desc => <<-DESC
71
71
  The codec the producer uses to compress messages.
72
- Supported codecs: (gzip|snappy)
72
+ Supported codecs depends on ruby-kafka: https://github.com/zendesk/ruby-kafka#compression
73
73
  DESC
74
74
  config_param :max_send_limit_bytes, :size, :default => nil
75
75
  config_param :discard_kafka_delivery_failed, :bool, :default => false
@@ -131,15 +131,16 @@ DESC
131
131
  if @scram_mechanism != nil && @username != nil && @password != nil
132
132
  @kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, logger: logger, ssl_ca_cert: read_ssl_file(@ssl_ca_cert),
133
133
  ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_ca_certs_from_system: @ssl_ca_certs_from_system,
134
- sasl_scram_username: @username, sasl_scram_password: @password, sasl_scram_mechanism: @scram_mechanism, sasl_over_ssl: @sasl_over_ssl)
134
+ sasl_scram_username: @username, sasl_scram_password: @password, sasl_scram_mechanism: @scram_mechanism, sasl_over_ssl: @sasl_over_ssl,
135
+ ssl_verify_hostname: @ssl_verify_hostname)
135
136
  elsif @username != nil && @password != nil
136
137
  @kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, logger: logger, ssl_ca_cert: read_ssl_file(@ssl_ca_cert),
137
138
  ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_ca_certs_from_system: @ssl_ca_certs_from_system,
138
- sasl_plain_username: @username, sasl_plain_password: @password, sasl_over_ssl: @sasl_over_ssl)
139
+ sasl_plain_username: @username, sasl_plain_password: @password, sasl_over_ssl: @sasl_over_ssl, ssl_verify_hostname: @ssl_verify_hostname)
139
140
  else
140
141
  @kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, logger: logger, ssl_ca_cert: read_ssl_file(@ssl_ca_cert),
141
142
  ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_ca_certs_from_system: @ssl_ca_certs_from_system,
142
- sasl_gssapi_principal: @principal, sasl_gssapi_keytab: @keytab, sasl_over_ssl: @sasl_over_ssl)
143
+ sasl_gssapi_principal: @principal, sasl_gssapi_keytab: @keytab, sasl_over_ssl: @sasl_over_ssl, ssl_verify_hostname: @ssl_verify_hostname)
143
144
  end
144
145
  log.info "initialized kafka producer: #{@client_id}"
145
146
  else
@@ -64,7 +64,7 @@ DESC
64
64
  The codec the producer uses to compress messages.
65
65
  Supported codecs: (gzip|snappy)
66
66
  DESC
67
-
67
+ config_param :max_send_limit_bytes, :size, :default => nil
68
68
  config_param :rdkafka_buffering_max_ms, :integer, :default => nil
69
69
  config_param :rdkafka_buffering_max_messages, :integer, :default => nil
70
70
  config_param :rdkafka_message_max_bytes, :integer, :default => nil
@@ -284,7 +284,7 @@ DESC
284
284
  handler = producer.produce(topic: topic, payload: record_buf, key: message_key, partition: partition)
285
285
  return handler
286
286
  rescue Exception => e
287
- if e.code == :queue_full
287
+ if e.respond_to?(:code) && e.code == :queue_full
288
288
  if attempt <= @max_enqueue_retries
289
289
  log.warn "Failed to enqueue message; attempting retry #{attempt} of #{@max_enqueue_retries} after #{@enqueue_retry_backoff}s"
290
290
  sleep @enqueue_retry_backoff
@@ -65,7 +65,7 @@ DESC
65
65
  The codec the producer uses to compress messages. Used for compression.codec
66
66
  Supported codecs: (gzip|snappy)
67
67
  DESC
68
-
68
+ config_param :max_send_limit_bytes, :size, :default => nil
69
69
  config_param :rdkafka_buffering_max_ms, :integer, :default => nil, :desc => 'Used for queue.buffering.max.ms'
70
70
  config_param :rdkafka_buffering_max_messages, :integer, :default => nil, :desc => 'Used for queue.buffering.max.messages'
71
71
  config_param :rdkafka_message_max_bytes, :integer, :default => nil, :desc => 'Used for message.max.bytes'
@@ -274,7 +274,7 @@ DESC
274
274
  begin
275
275
  return producer.produce(topic: topic, payload: record_buf, key: message_key, partition: partition)
276
276
  rescue Exception => e
277
- if e.code == :queue_full
277
+ if e.respond_to?(:code) && e.code == :queue_full
278
278
  if attempt <= @max_enqueue_retries
279
279
  log.warn "Failed to enqueue message; attempting retry #{attempt} of #{@max_enqueue_retries} after #{@enqueue_retry_backoff}s"
280
280
  sleep @enqueue_retry_backoff
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: fluent-plugin-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.9.6
4
+ version: 0.10.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Hidemasa Togashi
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2019-05-29 00:00:00.000000000 Z
12
+ date: 2019-07-03 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: fluentd
@@ -51,7 +51,7 @@ dependencies:
51
51
  requirements:
52
52
  - - ">="
53
53
  - !ruby/object:Gem::Version
54
- version: 0.7.1
54
+ version: 0.7.8
55
55
  - - "<"
56
56
  - !ruby/object:Gem::Version
57
57
  version: 0.8.0
@@ -61,7 +61,7 @@ dependencies:
61
61
  requirements:
62
62
  - - ">="
63
63
  - !ruby/object:Gem::Version
64
- version: 0.7.1
64
+ version: 0.7.8
65
65
  - - "<"
66
66
  - !ruby/object:Gem::Version
67
67
  version: 0.8.0