madhawk57-log 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,316 @@
1
+ require 'thread'
2
+ require 'logger'
3
+ require 'fluent/output'
4
+ require 'fluent/plugin/kafka_plugin_util'
5
+
6
+ require 'rdkafka'
7
+ require 'fluent/plugin/kafka_producer_ext'
8
+
9
+ class Rdkafka::Producer
10
+ # return false if producer is forcefully closed, otherwise return true
11
+ def close(timeout = nil)
12
+ @closing = true
13
+ # Wait for the polling thread to finish up
14
+ # If the broker isn't alive, the thread doesn't exit
15
+ if timeout
16
+ thr = @polling_thread.join(timeout)
17
+ return !!thr
18
+ else
19
+ @polling_thread.join
20
+ return true
21
+ end
22
+ end
23
+ end
24
+
25
+ class Fluent::KafkaOutputBuffered2 < Fluent::BufferedOutput
26
+ Fluent::Plugin.register_output('rdkafka', self)
27
+
28
+ config_param :brokers, :string, :default => 'localhost:9092',
29
+ :desc => <<-DESC
30
+ Set brokers directly:
31
+ <broker1_host>:<broker1_port>,<broker2_host>:<broker2_port>,..
32
+ Brokers: you can choose to use either brokers or zookeeper.
33
+ DESC
34
+ config_param :default_topic, :string, :default => nil,
35
+ :desc => "Output topic"
36
+ config_param :default_message_key, :string, :default => nil
37
+ config_param :default_partition, :integer, :default => nil
38
+ config_param :client_id, :string, :default => 'kafka'
39
+ config_param :output_data_type, :string, :default => 'json',
40
+ :desc => <<-DESC
41
+ Supported format: (json|ltsv|msgpack|attr:<record name>|<formatter name>)
42
+ DESC
43
+ config_param :output_include_tag, :bool, :default => false
44
+ config_param :output_include_time, :bool, :default => false
45
+ config_param :exclude_partition, :bool, :default => false,
46
+ :desc => <<-DESC
47
+ Set true to remove partition from data
48
+ DESC
49
+ config_param :exclude_message_key, :bool, :default => false,
50
+ :desc => <<-DESC
51
+ Set true to remove partition key from data
52
+ DESC
53
+ config_param :exclude_topic_key, :bool, :default => false,
54
+ :desc => <<-DESC
55
+ Set true to remove topic name key from data
56
+ DESC
57
+ config_param :max_send_retries, :integer, :default => 2,
58
+ :desc => "Number of times to retry sending of messages to a leader."
59
+ config_param :required_acks, :integer, :default => -1,
60
+ :desc => "The number of acks required per request."
61
+ config_param :ack_timeout, :time, :default => nil,
62
+ :desc => "How long the producer waits for acks."
63
+ config_param :compression_codec, :string, :default => nil,
64
+ :desc => <<-DESC
65
+ The codec the producer uses to compress messages.
66
+ Supported codecs: (gzip|snappy)
67
+ DESC
68
+ config_param :max_send_limit_bytes, :size, :default => nil
69
+ config_param :rdkafka_buffering_max_ms, :integer, :default => nil
70
+ config_param :rdkafka_buffering_max_messages, :integer, :default => nil
71
+ config_param :rdkafka_message_max_bytes, :integer, :default => nil
72
+ config_param :rdkafka_message_max_num, :integer, :default => nil
73
+ config_param :rdkafka_delivery_handle_poll_timeout, :integer, :default => 30
74
+ config_param :rdkafka_options, :hash, :default => {}
75
+
76
+ config_param :max_enqueue_retries, :integer, :default => 3
77
+ config_param :enqueue_retry_backoff, :integer, :default => 3
78
+
79
+ config_param :service_name, :string, :default => nil
80
+ config_param :ssl_client_cert_key_password, :string, :default => nil
81
+
82
+ include Fluent::KafkaPluginUtil::SSLSettings
83
+ include Fluent::KafkaPluginUtil::SaslSettings
84
+
85
+ def initialize
86
+ super
87
+ @producers = {}
88
+ @producers_mutex = Mutex.new
89
+ end
90
+
91
+ def configure(conf)
92
+ super
93
+ log.instance_eval {
94
+ def add(level, &block)
95
+ return unless block
96
+
97
+ # Follow rdkakfa's log level. See also rdkafka-ruby's bindings.rb: https://github.com/appsignal/rdkafka-ruby/blob/e5c7261e3f2637554a5c12b924be297d7dca1328/lib/rdkafka/bindings.rb#L117
98
+ case level
99
+ when Logger::FATAL
100
+ self.fatal(block.call)
101
+ when Logger::ERROR
102
+ self.error(block.call)
103
+ when Logger::WARN
104
+ self.warn(block.call)
105
+ when Logger::INFO
106
+ self.info(block.call)
107
+ when Logger::DEBUG
108
+ self.debug(block.call)
109
+ else
110
+ self.trace(block.call)
111
+ end
112
+ end
113
+ }
114
+ Rdkafka::Config.logger = log
115
+ config = build_config
116
+ @rdkafka = Rdkafka::Config.new(config)
117
+ @formatter_proc = setup_formatter(conf)
118
+ end
119
+
120
+ def build_config
121
+ config = {
122
+ :"bootstrap.servers" => @brokers,
123
+ }
124
+
125
+ if @ssl_ca_cert && @ssl_ca_cert[0]
126
+ ssl = true
127
+ config[:"ssl.ca.location"] = @ssl_ca_cert[0]
128
+ config[:"ssl.certificate.location"] = @ssl_client_cert if @ssl_client_cert
129
+ config[:"ssl.key.location"] = @ssl_client_cert_key if @ssl_client_cert_key
130
+ config[:"ssl.key.password"] = @ssl_client_cert_key_password if @ssl_client_cert_key_password
131
+ end
132
+
133
+ if @principal
134
+ sasl = true
135
+ config[:"sasl.mechanisms"] = "GSSAPI"
136
+ config[:"sasl.kerberos.principal"] = @principal
137
+ config[:"sasl.kerberos.service.name"] = @service_name if @service_name
138
+ config[:"sasl.kerberos.keytab"] = @keytab if @keytab
139
+ end
140
+
141
+ if ssl && sasl
142
+ security_protocol = "SASL_SSL"
143
+ elsif ssl && !sasl
144
+ security_protocol = "SSL"
145
+ elsif !ssl && sasl
146
+ security_protocol = "SASL_PLAINTEXT"
147
+ else
148
+ security_protocol = "PLAINTEXT"
149
+ end
150
+ config[:"security.protocol"] = security_protocol
151
+
152
+ config[:"compression.codec"] = @compression_codec if @compression_codec
153
+ config[:"message.send.max.retries"] = @max_send_retries if @max_send_retries
154
+ config[:"request.required.acks"] = @required_acks if @required_acks
155
+ config[:"request.timeout.ms"] = @ack_timeout * 1000 if @ack_timeout
156
+ config[:"queue.buffering.max.ms"] = @rdkafka_buffering_max_ms if @rdkafka_buffering_max_ms
157
+ config[:"queue.buffering.max.messages"] = @rdkafka_buffering_max_messages if @rdkafka_buffering_max_messages
158
+ config[:"message.max.bytes"] = @rdkafka_message_max_bytes if @rdkafka_message_max_bytes
159
+ config[:"batch.num.messages"] = @rdkafka_message_max_num if @rdkafka_message_max_num
160
+
161
+ @rdkafka_options.each { |k, v|
162
+ config[k.to_sym] = v
163
+ }
164
+
165
+ config
166
+ end
167
+
168
+ def start
169
+ super
170
+ end
171
+
172
+ def multi_workers_ready?
173
+ true
174
+ end
175
+
176
+ def shutdown
177
+ super
178
+ shutdown_producers
179
+ end
180
+
181
+ def shutdown_producers
182
+ @producers_mutex.synchronize {
183
+ shutdown_threads = @producers.map { |key, producer|
184
+ th = Thread.new {
185
+ unless producer.close(10)
186
+ log.warn("Queue is forcefully closed after 10 seconds wait")
187
+ end
188
+ }
189
+ th.abort_on_exception = true
190
+ th
191
+ }
192
+ shutdown_threads.each { |th| th.join }
193
+ @producers = {}
194
+ }
195
+ end
196
+
197
+ def get_producer
198
+ @producers_mutex.synchronize {
199
+ producer = @producers[Thread.current.object_id]
200
+ unless producer
201
+ producer = @rdkafka.producer
202
+ @producers[Thread.current.object_id] = producer
203
+ end
204
+ producer
205
+ }
206
+ end
207
+
208
+ def emit(tag, es, chain)
209
+ super(tag, es, chain, tag)
210
+ end
211
+
212
+ def format_stream(tag, es)
213
+ es.to_msgpack_stream
214
+ end
215
+
216
+ def setup_formatter(conf)
217
+ if @output_data_type == 'json'
218
+ begin
219
+ require 'oj'
220
+ Oj.default_options = Fluent::DEFAULT_OJ_OPTIONS
221
+ Proc.new { |tag, time, record| Oj.dump(record) }
222
+ rescue LoadError
223
+ require 'yajl'
224
+ Proc.new { |tag, time, record| Yajl::Encoder.encode(record) }
225
+ end
226
+ elsif @output_data_type == 'ltsv'
227
+ require 'ltsv'
228
+ Proc.new { |tag, time, record| LTSV.dump(record) }
229
+ elsif @output_data_type == 'msgpack'
230
+ require 'msgpack'
231
+ Proc.new { |tag, time, record| record.to_msgpack }
232
+ elsif @output_data_type =~ /^attr:(.*)$/
233
+ @custom_attributes = $1.split(',').map(&:strip).reject(&:empty?)
234
+ @custom_attributes.unshift('time') if @output_include_time
235
+ @custom_attributes.unshift('tag') if @output_include_tag
236
+ Proc.new { |tag, time, record|
237
+ @custom_attributes.map { |attr|
238
+ record[attr].nil? ? '' : record[attr].to_s
239
+ }.join(@f_separator)
240
+ }
241
+ else
242
+ @formatter = Fluent::Plugin.new_formatter(@output_data_type)
243
+ @formatter.configure(conf)
244
+ @formatter.method(:format)
245
+ end
246
+ end
247
+
248
+ def write(chunk)
249
+ tag = chunk.key
250
+ def_topic = @default_topic || tag
251
+
252
+ record_buf = nil
253
+ record_buf_bytes = nil
254
+
255
+ begin
256
+ chunk.msgpack_each.map { |time, record|
257
+ begin
258
+ if @output_include_time
259
+ if @time_format
260
+ record['time'.freeze] = Time.at(time).strftime(@time_format)
261
+ else
262
+ record['time'.freeze] = time
263
+ end
264
+ end
265
+
266
+ record['tag'] = tag if @output_include_tag
267
+ topic = (@exclude_topic_key ? record.delete('topic'.freeze) : record['topic'.freeze]) || def_topic
268
+ partition = (@exclude_partition ? record.delete('partition'.freeze) : record['partition'.freeze]) || @default_partition
269
+ message_key = (@exclude_message_key ? record.delete('message_key'.freeze) : record['message_key'.freeze]) || @default_message_key
270
+
271
+ record_buf = @formatter_proc.call(tag, time, record)
272
+ record_buf_bytes = record_buf.bytesize
273
+ if @max_send_limit_bytes && record_buf_bytes > @max_send_limit_bytes
274
+ log.warn "record size exceeds max_send_limit_bytes. Skip event:", :time => time, :record => record
275
+ next
276
+ end
277
+ rescue StandardError => e
278
+ log.warn "unexpected error during format record. Skip broken event:", :error => e.to_s, :error_class => e.class.to_s, :time => time, :record => record
279
+ next
280
+ end
281
+
282
+ producer = get_producer
283
+ handler = enqueue_with_retry(producer, topic, record_buf, message_key, partition)
284
+ handler
285
+ }.each { |handler|
286
+ handler.wait(max_wait_timeout: @rdkafka_delivery_handle_poll_timeout) if @rdkafka_delivery_handle_poll_timeout != 0
287
+ }
288
+ end
289
+ rescue Exception => e
290
+ log.warn "Send exception occurred: #{e} at #{e.backtrace.first}"
291
+ # Raise exception to retry sendind messages
292
+ raise e
293
+ end
294
+
295
+ def enqueue_with_retry(producer, topic, record_buf, message_key, partition)
296
+ attempt = 0
297
+ loop do
298
+ begin
299
+ handler = producer.produce(topic: topic, payload: record_buf, key: message_key, partition: partition)
300
+ return handler
301
+ rescue Exception => e
302
+ if e.respond_to?(:code) && e.code == :queue_full
303
+ if attempt <= @max_enqueue_retries
304
+ log.warn "Failed to enqueue message; attempting retry #{attempt} of #{@max_enqueue_retries} after #{@enqueue_retry_backoff}s"
305
+ sleep @enqueue_retry_backoff
306
+ attempt += 1
307
+ else
308
+ raise "Failed to enqueue message although tried retry #{@max_enqueue_retries} times"
309
+ end
310
+ else
311
+ raise e
312
+ end
313
+ end
314
+ end
315
+ end
316
+ end
@@ -0,0 +1,349 @@
1
+ require 'thread'
2
+ require 'logger'
3
+ require 'fluent/plugin/output'
4
+ require 'fluent/plugin/kafka_plugin_util'
5
+
6
+ require 'rdkafka'
7
+
8
+ class Rdkafka::Producer
9
+ # return false if producer is forcefully closed, otherwise return true
10
+ def close(timeout = nil)
11
+ @closing = true
12
+ # Wait for the polling thread to finish up
13
+ # If the broker isn't alive, the thread doesn't exit
14
+ if timeout
15
+ thr = @polling_thread.join(timeout)
16
+ return !!thr
17
+ else
18
+ @polling_thread.join
19
+ return true
20
+ end
21
+ end
22
+ end
23
+
24
+ module Fluent::Plugin
25
+ class Fluent::Rdkafka2Output < Output
26
+ Fluent::Plugin.register_output('rdkafka2', self)
27
+
28
+ helpers :inject, :formatter, :record_accessor
29
+
30
+ config_param :brokers, :string, :default => 'localhost:9092',
31
+ :desc => <<-DESC
32
+ Set brokers directly:
33
+ <broker1_host>:<broker1_port>,<broker2_host>:<broker2_port>,..
34
+ Brokers: you can choose to use either brokers or zookeeper.
35
+ DESC
36
+ config_param :topic_key, :string, :default => 'topic', :desc => "Field for kafka topic"
37
+ config_param :default_topic, :string, :default => nil,
38
+ :desc => "Default output topic when record doesn't have topic field"
39
+ config_param :message_key_key, :string, :default => 'message_key', :desc => "Field for kafka message key"
40
+ config_param :default_message_key, :string, :default => nil
41
+ config_param :partition_key, :string, :default => 'partition', :desc => "Field for kafka partition"
42
+ config_param :default_partition, :integer, :default => nil
43
+ config_param :output_data_type, :string, :default => 'json', :obsoleted => "Use <format> section instead"
44
+ config_param :output_include_tag, :bool, :default => false, :obsoleted => "Use <inject> section instead"
45
+ config_param :output_include_time, :bool, :default => false, :obsoleted => "Use <inject> section instead"
46
+ config_param :exclude_partition, :bool, :default => false,
47
+ :desc => <<-DESC
48
+ Set true to remove partition from data
49
+ DESC
50
+ config_param :exclude_message_key, :bool, :default => false,
51
+ :desc => <<-DESC
52
+ Set true to remove message_key from data
53
+ DESC
54
+ config_param :exclude_topic_key, :bool, :default => false,
55
+ :desc => <<-DESC
56
+ Set true to remove topic key from data
57
+ DESC
58
+ config_param :headers, :hash, default: {}, symbolize_keys: true, value_type: :string,
59
+ :desc => 'Kafka message headers'
60
+ config_param :headers_from_record, :hash, default: {}, symbolize_keys: true, value_type: :string,
61
+ :desc => 'Kafka message headers where the header value is a jsonpath to a record value'
62
+
63
+ config_param :max_send_retries, :integer, :default => 2,
64
+ :desc => "Number of times to retry sending of messages to a leader. Used for message.send.max.retries"
65
+ config_param :required_acks, :integer, :default => -1,
66
+ :desc => "The number of acks required per request. Used for request.required.acks"
67
+ config_param :ack_timeout, :time, :default => nil,
68
+ :desc => "How long the producer waits for acks. Used for request.timeout.ms"
69
+ config_param :compression_codec, :string, :default => nil,
70
+ :desc => <<-DESC
71
+ The codec the producer uses to compress messages. Used for compression.codec
72
+ Supported codecs: (gzip|snappy)
73
+ DESC
74
+ config_param :max_send_limit_bytes, :size, :default => nil
75
+ config_param :rdkafka_buffering_max_ms, :integer, :default => nil, :desc => 'Used for queue.buffering.max.ms'
76
+ config_param :rdkafka_buffering_max_messages, :integer, :default => nil, :desc => 'Used for queue.buffering.max.messages'
77
+ config_param :rdkafka_message_max_bytes, :integer, :default => nil, :desc => 'Used for message.max.bytes'
78
+ config_param :rdkafka_message_max_num, :integer, :default => nil, :desc => 'Used for batch.num.messages'
79
+ config_param :rdkafka_delivery_handle_poll_timeout, :integer, :default => 30, :desc => 'Timeout for polling message wait'
80
+ config_param :rdkafka_options, :hash, :default => {}, :desc => 'Set any rdkafka configuration. See https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md'
81
+ config_param :share_producer, :bool, :default => false, :desc => 'share kafka producer between flush threads'
82
+
83
+ config_param :max_enqueue_retries, :integer, :default => 3
84
+ config_param :enqueue_retry_backoff, :integer, :default => 3
85
+
86
+ config_param :service_name, :string, :default => nil, :desc => 'Used for sasl.kerberos.service.name'
87
+ config_param :ssl_client_cert_key_password, :string, :default => nil, :desc => 'Used for ssl.key.password'
88
+
89
+ config_section :buffer do
90
+ config_set_default :chunk_keys, ["topic"]
91
+ end
92
+ config_section :format do
93
+ config_set_default :@type, 'json'
94
+ config_set_default :add_newline, false
95
+ end
96
+
97
+ include Fluent::KafkaPluginUtil::SSLSettings
98
+ include Fluent::KafkaPluginUtil::SaslSettings
99
+
100
+ def initialize
101
+ super
102
+
103
+ @producers = nil
104
+ @producers_mutex = nil
105
+ @shared_producer = nil
106
+ end
107
+
108
+ def configure(conf)
109
+ super
110
+ log.instance_eval {
111
+ def add(level, &block)
112
+ return unless block
113
+
114
+ # Follow rdkakfa's log level. See also rdkafka-ruby's bindings.rb: https://github.com/appsignal/rdkafka-ruby/blob/e5c7261e3f2637554a5c12b924be297d7dca1328/lib/rdkafka/bindings.rb#L117
115
+ case level
116
+ when Logger::FATAL
117
+ self.fatal(block.call)
118
+ when Logger::ERROR
119
+ self.error(block.call)
120
+ when Logger::WARN
121
+ self.warn(block.call)
122
+ when Logger::INFO
123
+ self.info(block.call)
124
+ when Logger::DEBUG
125
+ self.debug(block.call)
126
+ else
127
+ self.trace(block.call)
128
+ end
129
+ end
130
+ }
131
+ Rdkafka::Config.logger = log
132
+ config = build_config
133
+ @rdkafka = Rdkafka::Config.new(config)
134
+
135
+ if @default_topic.nil?
136
+ if @chunk_keys.include?(@topic_key) && !@chunk_key_tag
137
+ log.warn "Use '#{@topic_key}' field of event record for topic but no fallback. Recommend to set default_topic or set 'tag' in buffer chunk keys like <buffer #{@topic_key},tag>"
138
+ end
139
+ else
140
+ if @chunk_key_tag
141
+ log.warn "default_topic is set. Fluentd's event tag is not used for topic"
142
+ end
143
+ end
144
+
145
+ formatter_conf = conf.elements('format').first
146
+ unless formatter_conf
147
+ raise Fluent::ConfigError, "<format> section is required."
148
+ end
149
+ unless formatter_conf["@type"]
150
+ raise Fluent::ConfigError, "format/@type is required."
151
+ end
152
+ @formatter_proc = setup_formatter(formatter_conf)
153
+ @topic_key_sym = @topic_key.to_sym
154
+
155
+ @headers_from_record_accessors = {}
156
+ @headers_from_record.each do |key, value|
157
+ @headers_from_record_accessors[key] = record_accessor_create(value)
158
+ end
159
+ end
160
+
161
+ def build_config
162
+ config = {:"bootstrap.servers" => @brokers}
163
+
164
+ if @ssl_ca_cert && @ssl_ca_cert[0]
165
+ ssl = true
166
+ config[:"ssl.ca.location"] = @ssl_ca_cert[0]
167
+ config[:"ssl.certificate.location"] = @ssl_client_cert if @ssl_client_cert
168
+ config[:"ssl.key.location"] = @ssl_client_cert_key if @ssl_client_cert_key
169
+ config[:"ssl.key.password"] = @ssl_client_cert_key_password if @ssl_client_cert_key_password
170
+ end
171
+
172
+ if @principal
173
+ sasl = true
174
+ config[:"sasl.mechanisms"] = "GSSAPI"
175
+ config[:"sasl.kerberos.principal"] = @principal
176
+ config[:"sasl.kerberos.service.name"] = @service_name if @service_name
177
+ config[:"sasl.kerberos.keytab"] = @keytab if @keytab
178
+ end
179
+
180
+ if ssl && sasl
181
+ security_protocol = "SASL_SSL"
182
+ elsif ssl && !sasl
183
+ security_protocol = "SSL"
184
+ elsif !ssl && sasl
185
+ security_protocol = "SASL_PLAINTEXT"
186
+ else
187
+ security_protocol = "PLAINTEXT"
188
+ end
189
+ config[:"security.protocol"] = security_protocol
190
+
191
+ config[:"compression.codec"] = @compression_codec if @compression_codec
192
+ config[:"message.send.max.retries"] = @max_send_retries if @max_send_retries
193
+ config[:"request.required.acks"] = @required_acks if @required_acks
194
+ config[:"request.timeout.ms"] = @ack_timeout * 1000 if @ack_timeout
195
+ config[:"queue.buffering.max.ms"] = @rdkafka_buffering_max_ms if @rdkafka_buffering_max_ms
196
+ config[:"queue.buffering.max.messages"] = @rdkafka_buffering_max_messages if @rdkafka_buffering_max_messages
197
+ config[:"message.max.bytes"] = @rdkafka_message_max_bytes if @rdkafka_message_max_bytes
198
+ config[:"batch.num.messages"] = @rdkafka_message_max_num if @rdkafka_message_max_num
199
+
200
+ @rdkafka_options.each { |k, v|
201
+ config[k.to_sym] = v
202
+ }
203
+
204
+ config
205
+ end
206
+
207
+ def start
208
+ if @share_producer
209
+ @shared_producer = @rdkafka.producer
210
+ else
211
+ @producers = {}
212
+ @producers_mutex = Mutex.new
213
+ end
214
+
215
+ super
216
+ end
217
+
218
+ def multi_workers_ready?
219
+ true
220
+ end
221
+
222
+ def shutdown
223
+ super
224
+ shutdown_producers
225
+ end
226
+
227
+ def shutdown_producers
228
+ if @share_producer
229
+ close_producer(@shared_producer)
230
+ @shared_producer = nil
231
+ else
232
+ @producers_mutex.synchronize {
233
+ shutdown_threads = @producers.map { |key, producer|
234
+ th = Thread.new {
235
+ close_producer(producer)
236
+ }
237
+ th.abort_on_exception = true
238
+ th
239
+ }
240
+ shutdown_threads.each { |th| th.join }
241
+ @producers = {}
242
+ }
243
+ end
244
+ end
245
+
246
+ def close_producer(producer)
247
+ unless producer.close(10)
248
+ log.warn("Queue is forcefully closed after 10 seconds wait")
249
+ end
250
+ end
251
+
252
+ def get_producer
253
+ if @share_producer
254
+ @shared_producer
255
+ else
256
+ @producers_mutex.synchronize {
257
+ producer = @producers[Thread.current.object_id]
258
+ unless producer
259
+ producer = @rdkafka.producer
260
+ @producers[Thread.current.object_id] = producer
261
+ end
262
+ producer
263
+ }
264
+ end
265
+ end
266
+
267
+ def setup_formatter(conf)
268
+ type = conf['@type']
269
+ case type
270
+ when 'ltsv'
271
+ require 'ltsv'
272
+ Proc.new { |tag, time, record| LTSV.dump(record) }
273
+ else
274
+ @formatter = formatter_create(usage: 'rdkafka-plugin', conf: conf)
275
+ @formatter.method(:format)
276
+ end
277
+ end
278
+
279
+ def write(chunk)
280
+ tag = chunk.metadata.tag
281
+ topic = (chunk.metadata.variables && chunk.metadata.variables[@topic_key_sym]) || @default_topic || tag
282
+
283
+ handlers = []
284
+ record_buf = nil
285
+ record_buf_bytes = nil
286
+
287
+ headers = @headers.clone
288
+
289
+ begin
290
+ producer = get_producer
291
+ chunk.msgpack_each { |time, record|
292
+ begin
293
+ record = inject_values_to_record(tag, time, record)
294
+ record.delete(@topic_key) if @exclude_topic_key
295
+ partition = (@exclude_partition ? record.delete(@partition_key) : record[@partition_key]) || @default_partition
296
+ message_key = (@exclude_message_key ? record.delete(@message_key_key) : record[@message_key_key]) || @default_message_key
297
+
298
+ @headers_from_record_accessors.each do |key, header_accessor|
299
+ headers[key] = header_accessor.call(record)
300
+ end
301
+
302
+ record_buf = @formatter_proc.call(tag, time, record)
303
+ record_buf_bytes = record_buf.bytesize
304
+ if @max_send_limit_bytes && record_buf_bytes > @max_send_limit_bytes
305
+ log.warn "record size exceeds max_send_limit_bytes. Skip event:", :time => time, :record => record
306
+ next
307
+ end
308
+ rescue StandardError => e
309
+ log.warn "unexpected error during format record. Skip broken event:", :error => e.to_s, :error_class => e.class.to_s, :time => time, :record => record
310
+ next
311
+ end
312
+
313
+ handler = enqueue_with_retry(producer, topic, record_buf, message_key, partition, headers)
314
+ if @rdkafka_delivery_handle_poll_timeout != 0
315
+ handlers << handler
316
+ end
317
+ }
318
+ handlers.each { |handler|
319
+ handler.wait(max_wait_timeout: @rdkafka_delivery_handle_poll_timeout)
320
+ }
321
+ end
322
+ rescue Exception => e
323
+ log.warn "Send exception occurred: #{e} at #{e.backtrace.first}"
324
+ # Raise exception to retry sendind messages
325
+ raise e
326
+ end
327
+
328
+ def enqueue_with_retry(producer, topic, record_buf, message_key, partition, headers)
329
+ attempt = 0
330
+ loop do
331
+ begin
332
+ return producer.produce(topic: topic, payload: record_buf, key: message_key, partition: partition, headers: headers)
333
+ rescue Exception => e
334
+ if e.respond_to?(:code) && e.code == :queue_full
335
+ if attempt <= @max_enqueue_retries
336
+ log.warn "Failed to enqueue message; attempting retry #{attempt} of #{@max_enqueue_retries} after #{@enqueue_retry_backoff}s"
337
+ sleep @enqueue_retry_backoff
338
+ attempt += 1
339
+ else
340
+ raise "Failed to enqueue message although tried retry #{@max_enqueue_retries} times"
341
+ end
342
+ else
343
+ raise e
344
+ end
345
+ end
346
+ end
347
+ end
348
+ end
349
+ end