madhawk57-log 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,105 @@
1
+ module Fluent
2
+ module KafkaPluginUtil
3
+ module SSLSettings
4
+ def self.included(klass)
5
+ klass.instance_eval {
6
+ # https://github.com/zendesk/ruby-kafka#encryption-and-authentication-using-ssl
7
+ config_param :ssl_ca_cert, :array, :value_type => :string, :default => nil,
8
+ :desc => "a PEM encoded CA cert to use with and SSL connection."
9
+ config_param :ssl_client_cert, :string, :default => nil,
10
+ :desc => "a PEM encoded client cert to use with and SSL connection. Must be used in combination with ssl_client_cert_key."
11
+ config_param :ssl_client_cert_key, :string, :default => nil,
12
+ :desc => "a PEM encoded client cert key to use with and SSL connection. Must be used in combination with ssl_client_cert."
13
+ config_param :ssl_client_cert_chain, :string, :default => nil,
14
+ :desc => "an extra PEM encoded cert to use with and SSL connection."
15
+ config_param :ssl_ca_certs_from_system, :bool, :default => false,
16
+ :desc => "this configures the store to look up CA certificates from the system default certificate store on an as needed basis. The location of the store can usually be determined by: OpenSSL::X509::DEFAULT_CERT_FILE."
17
+ config_param :ssl_verify_hostname, :bool, :default => true,
18
+ :desc => "this configures whether hostname of certificate should be verified or not."
19
+ }
20
+ end
21
+
22
+ DummyFormatter = Object.new
23
+
24
+ def start
25
+ super
26
+
27
+ # This is bad point here but easy to fix for all kafka plugins
28
+ unless log.respond_to?(:formatter)
29
+ def log.formatter
30
+ Fluent::KafkaPluginUtil::SSLSettings::DummyFormatter
31
+ end
32
+ end
33
+ end
34
+
35
+ def read_ssl_file(path)
36
+ return nil if path.nil?
37
+
38
+ if path.is_a?(Array)
39
+ path.map { |fp| File.read(fp) }
40
+ else
41
+ File.read(path)
42
+ end
43
+ end
44
+
45
+ def pickup_ssl_endpoint(node)
46
+ ssl_endpoint = node['endpoints'].find { |e| e.start_with?('SSL') }
47
+ raise 'no SSL endpoint found on Zookeeper' unless ssl_endpoint
48
+ return [URI.parse(ssl_endpoint).host, URI.parse(ssl_endpoint).port].join(':')
49
+ end
50
+ end
51
+
52
+ module SaslSettings
53
+ def self.included(klass)
54
+ klass.instance_eval {
55
+ config_param :principal, :string, :default => nil,
56
+ :desc => "a Kerberos principal to use with SASL authentication (GSSAPI)."
57
+ config_param :keytab, :string, :default => nil,
58
+ :desc => "a filepath to Kerberos keytab. Must be used with principal."
59
+ config_param :username, :string, :default => nil,
60
+ :desc => "a username when using PLAIN/SCRAM SASL authentication"
61
+ config_param :password, :string, :default => nil, secret: true,
62
+ :desc => "a password when using PLAIN/SCRAM SASL authentication"
63
+ config_param :scram_mechanism, :string, :default => nil,
64
+ :desc => "if set, use SCRAM authentication with specified mechanism. When unset, default to PLAIN authentication"
65
+ }
66
+ end
67
+ end
68
+
69
+ module GsmSettings
70
+ def self.included(klass)
71
+ klass.instance_eval {
72
+ config_param :secretName, :string, :default => nil,
73
+ :desc => "the gsm secret name"
74
+ config_param :projectId, :string, :default => nil,
75
+ :desc => "the gsm project id"
76
+ config_param :version, :string, :default => "latest",
77
+ :desc => "version of secret to fetch, default: latest"
78
+ }
79
+ end
80
+
81
+ def self.fetch(logger)
82
+ logger.info "fetching secret with name #{@secretName} in project with id: #{@projectId} and version: #{@version}"
83
+
84
+ client = Google::Cloud::SecretManager.secret_manager_service
85
+
86
+ req = client.secret_version_path(
87
+ project: @projectId,
88
+ secret: @secretName,
89
+ secret_version: @version
90
+ )
91
+
92
+ res = client.access_secret_version name: req
93
+ s = res.split("\n")
94
+
95
+ if s.length < 1
96
+ raise Fluent::ConfigError, "GSM response too short"
97
+ end
98
+
99
+ @username = s[0]
100
+ @password = s[1]
101
+ @scram_mechanism = "PLAIN"
102
+ end
103
+ end
104
+ end
105
+ end
@@ -0,0 +1,304 @@
1
+ require "set"
2
+ require "kafka/partitioner"
3
+ require "kafka/message_buffer"
4
+ require "kafka/produce_operation"
5
+ require "kafka/pending_message_queue"
6
+ require "kafka/pending_message"
7
+ require "kafka/compressor"
8
+ require 'kafka/producer'
9
+
10
+ # for out_kafka_buffered
11
+ module Kafka
12
+ EMPTY_HEADER = {}
13
+
14
+ class Producer
15
+ def produce_for_buffered(value, key: nil, topic:, partition: nil, partition_key: nil, create_time: Time.now)
16
+ message = PendingMessage.new(
17
+ value: value,
18
+ key: key,
19
+ headers: EMPTY_HEADER,
20
+ topic: topic,
21
+ partition: partition,
22
+ partition_key: partition_key,
23
+ create_time: create_time
24
+ )
25
+
26
+ # If the producer is in transactional mode, all the message production
27
+ # must be used when the producer is currently in transaction
28
+ if @transaction_manager.transactional? && !@transaction_manager.in_transaction?
29
+ raise 'You must trigger begin_transaction before producing messages'
30
+ end
31
+
32
+ @target_topics.add(topic)
33
+ @pending_message_queue.write(message)
34
+
35
+ nil
36
+ end
37
+ end
38
+ end
39
+
40
+ # for out_kafka2
41
+ module Kafka
42
+ class Client
43
+ def topic_producer(topic, compression_codec: nil, compression_threshold: 1, ack_timeout: 5, required_acks: :all, max_retries: 2, retry_backoff: 1, max_buffer_size: 1000, max_buffer_bytesize: 10_000_000, idempotent: false, transactional: false, transactional_id: nil, transactional_timeout: 60)
44
+ cluster = initialize_cluster
45
+ compressor = Compressor.new(
46
+ codec_name: compression_codec,
47
+ threshold: compression_threshold,
48
+ instrumenter: @instrumenter,
49
+ )
50
+
51
+ transaction_manager = TransactionManager.new(
52
+ cluster: cluster,
53
+ logger: @logger,
54
+ idempotent: idempotent,
55
+ transactional: transactional,
56
+ transactional_id: transactional_id,
57
+ transactional_timeout: transactional_timeout,
58
+ )
59
+
60
+ TopicProducer.new(topic,
61
+ cluster: cluster,
62
+ transaction_manager: transaction_manager,
63
+ logger: @logger,
64
+ instrumenter: @instrumenter,
65
+ compressor: compressor,
66
+ ack_timeout: ack_timeout,
67
+ required_acks: required_acks,
68
+ max_retries: max_retries,
69
+ retry_backoff: retry_backoff,
70
+ max_buffer_size: max_buffer_size,
71
+ max_buffer_bytesize: max_buffer_bytesize,
72
+ )
73
+ end
74
+ end
75
+
76
+ class TopicProducer
77
+ def initialize(topic, cluster:, transaction_manager:, logger:, instrumenter:, compressor:, ack_timeout:, required_acks:, max_retries:, retry_backoff:, max_buffer_size:, max_buffer_bytesize:)
78
+ @cluster = cluster
79
+ @transaction_manager = transaction_manager
80
+ @logger = logger
81
+ @instrumenter = instrumenter
82
+ @required_acks = required_acks == :all ? -1 : required_acks
83
+ @ack_timeout = ack_timeout
84
+ @max_retries = max_retries
85
+ @retry_backoff = retry_backoff
86
+ @max_buffer_size = max_buffer_size
87
+ @max_buffer_bytesize = max_buffer_bytesize
88
+ @compressor = compressor
89
+
90
+ @topic = topic
91
+ @cluster.add_target_topics(Set.new([topic]))
92
+
93
+ # A buffer organized by topic/partition.
94
+ @buffer = MessageBuffer.new
95
+
96
+ # Messages added by `#produce` but not yet assigned a partition.
97
+ @pending_message_queue = PendingMessageQueue.new
98
+ end
99
+
100
+ def produce(value, key: nil, partition: nil, partition_key: nil, headers: EMPTY_HEADER, create_time: Time.now)
101
+ message = PendingMessage.new(
102
+ value: value,
103
+ key: key,
104
+ headers: headers,
105
+ topic: @topic,
106
+ partition: partition,
107
+ partition_key: partition_key,
108
+ create_time: create_time
109
+ )
110
+
111
+ # If the producer is in transactional mode, all the message production
112
+ # must be used when the producer is currently in transaction
113
+ if @transaction_manager.transactional? && !@transaction_manager.in_transaction?
114
+ raise 'You must trigger begin_transaction before producing messages'
115
+ end
116
+
117
+ @pending_message_queue.write(message)
118
+
119
+ nil
120
+ end
121
+
122
+ def deliver_messages
123
+ # There's no need to do anything if the buffer is empty.
124
+ return if buffer_size == 0
125
+
126
+ deliver_messages_with_retries
127
+ end
128
+
129
+ # Returns the number of messages currently held in the buffer.
130
+ #
131
+ # @return [Integer] buffer size.
132
+ def buffer_size
133
+ @pending_message_queue.size + @buffer.size
134
+ end
135
+
136
+ def buffer_bytesize
137
+ @pending_message_queue.bytesize + @buffer.bytesize
138
+ end
139
+
140
+ # Deletes all buffered messages.
141
+ #
142
+ # @return [nil]
143
+ def clear_buffer
144
+ @buffer.clear
145
+ @pending_message_queue.clear
146
+ end
147
+
148
+ # Closes all connections to the brokers.
149
+ #
150
+ # @return [nil]
151
+ def shutdown
152
+ @transaction_manager.close
153
+ @cluster.disconnect
154
+ end
155
+
156
+ def init_transactions
157
+ @transaction_manager.init_transactions
158
+ end
159
+
160
+ def begin_transaction
161
+ @transaction_manager.begin_transaction
162
+ end
163
+
164
+ def commit_transaction
165
+ @transaction_manager.commit_transaction
166
+ end
167
+
168
+ def abort_transaction
169
+ @transaction_manager.abort_transaction
170
+ end
171
+
172
+ def transaction
173
+ raise 'This method requires a block' unless block_given?
174
+ begin_transaction
175
+ yield
176
+ commit_transaction
177
+ rescue Kafka::Producer::AbortTransaction
178
+ abort_transaction
179
+ rescue
180
+ abort_transaction
181
+ raise
182
+ end
183
+
184
+ def deliver_messages_with_retries
185
+ attempt = 0
186
+
187
+ #@cluster.add_target_topics(@target_topics)
188
+
189
+ operation = ProduceOperation.new(
190
+ cluster: @cluster,
191
+ transaction_manager: @transaction_manager,
192
+ buffer: @buffer,
193
+ required_acks: @required_acks,
194
+ ack_timeout: @ack_timeout,
195
+ compressor: @compressor,
196
+ logger: @logger,
197
+ instrumenter: @instrumenter,
198
+ )
199
+
200
+ loop do
201
+ attempt += 1
202
+
203
+ begin
204
+ @cluster.refresh_metadata_if_necessary!
205
+ rescue ConnectionError => e
206
+ raise DeliveryFailed.new(e, buffer_messages)
207
+ end
208
+
209
+ assign_partitions!
210
+ operation.execute
211
+
212
+ if @required_acks.zero?
213
+ # No response is returned by the brokers, so we can't know which messages
214
+ # have been successfully written. Our only option is to assume that they all
215
+ # have.
216
+ @buffer.clear
217
+ end
218
+
219
+ if buffer_size.zero?
220
+ break
221
+ elsif attempt <= @max_retries
222
+ @logger.warn "Failed to send all messages; attempting retry #{attempt} of #{@max_retries} after #{@retry_backoff}s"
223
+
224
+ sleep @retry_backoff
225
+ else
226
+ @logger.error "Failed to send all messages; keeping remaining messages in buffer"
227
+ break
228
+ end
229
+ end
230
+
231
+ unless @pending_message_queue.empty?
232
+ # Mark the cluster as stale in order to force a cluster metadata refresh.
233
+ @cluster.mark_as_stale!
234
+ raise DeliveryFailed.new("Failed to assign partitions to #{@pending_message_queue.size} messages", buffer_messages)
235
+ end
236
+
237
+ unless @buffer.empty?
238
+ partitions = @buffer.map {|topic, partition, _| "#{topic}/#{partition}" }.join(", ")
239
+
240
+ raise DeliveryFailed.new("Failed to send messages to #{partitions}", buffer_messages)
241
+ end
242
+ end
243
+
244
+ def assign_partitions!
245
+ failed_messages = []
246
+ partition_count = @cluster.partitions_for(@topic).count
247
+
248
+ @pending_message_queue.each do |message|
249
+ partition = message.partition
250
+
251
+ begin
252
+ if partition.nil?
253
+ partition = Partitioner.partition_for_key(partition_count, message)
254
+ end
255
+
256
+ @buffer.write(
257
+ value: message.value,
258
+ key: message.key,
259
+ headers: message.headers,
260
+ topic: message.topic,
261
+ partition: partition,
262
+ create_time: message.create_time,
263
+ )
264
+ rescue Kafka::Error => e
265
+ failed_messages << message
266
+ end
267
+ end
268
+
269
+ if failed_messages.any?
270
+ failed_messages.group_by(&:topic).each do |topic, messages|
271
+ @logger.error "Failed to assign partitions to #{messages.count} messages in #{topic}"
272
+ end
273
+
274
+ @cluster.mark_as_stale!
275
+ end
276
+
277
+ @pending_message_queue.replace(failed_messages)
278
+ end
279
+
280
+ def buffer_messages
281
+ messages = []
282
+
283
+ @pending_message_queue.each do |message|
284
+ messages << message
285
+ end
286
+
287
+ @buffer.each do |topic, partition, messages_for_partition|
288
+ messages_for_partition.each do |message|
289
+ messages << PendingMessage.new(
290
+ value: message.value,
291
+ key: message.key,
292
+ headers: message.headers,
293
+ topic: topic,
294
+ partition: partition,
295
+ partition_key: nil,
296
+ create_time: message.create_time
297
+ )
298
+ end
299
+ end
300
+
301
+ messages
302
+ end
303
+ end
304
+ end
@@ -0,0 +1,266 @@
1
+ require 'fluent/output'
2
+ require 'fluent/plugin/kafka_plugin_util'
3
+
4
+ class Fluent::KafkaOutput < Fluent::Output
5
+ Fluent::Plugin.register_output('kafka', self)
6
+
7
+ config_param :brokers, :string, :default => 'localhost:9092',
8
+ :desc => <<-DESC
9
+ Set brokers directly
10
+ <broker1_host>:<broker1_port>,<broker2_host>:<broker2_port>,..
11
+ Note that you can choose to use either brokers or zookeeper.
12
+ DESC
13
+ config_param :zookeeper, :string, :default => nil,
14
+ :desc => "Set brokers via Zookeeper: <zookeeper_host>:<zookeeper_port>"
15
+ config_param :zookeeper_path, :string, :default => '/brokers/ids',
16
+ :desc => "Path in path for Broker id. Default to /brokers/ids"
17
+ config_param :default_topic, :string, :default => nil,
18
+ :desc => "Output topic."
19
+ config_param :default_message_key, :string, :default => nil
20
+ config_param :default_partition_key, :string, :default => nil
21
+ config_param :default_partition, :integer, :default => nil
22
+ config_param :client_id, :string, :default => 'kafka'
23
+ config_param :sasl_over_ssl, :bool, :default => true,
24
+ :desc => <<-DESC
25
+ Set to false to prevent SSL strict mode when using SASL authentication
26
+ DESC
27
+ config_param :output_data_type, :string, :default => 'json',
28
+ :desc => "Supported format: (json|ltsv|msgpack|attr:<record name>|<formatter name>)"
29
+ config_param :output_include_tag, :bool, :default => false
30
+ config_param :output_include_time, :bool, :default => false
31
+ config_param :exclude_partition_key, :bool, :default => false,
32
+ :desc => <<-DESC
33
+ Set true to remove partition key from data
34
+ DESC
35
+ config_param :exclude_partition, :bool, :default => false,
36
+ :desc => <<-DESC
37
+ Set true to remove partition from data
38
+ DESC
39
+
40
+ config_param :exclude_message_key, :bool, :default => false,
41
+ :desc => <<-DESC
42
+ Set true to remove message key from data
43
+ DESC
44
+ config_param :exclude_topic_key, :bool, :default => false,
45
+ :desc => <<-DESC
46
+ Set true to remove topic name key from data
47
+ DESC
48
+
49
+ # ruby-kafka producer options
50
+ config_param :max_send_retries, :integer, :default => 2,
51
+ :desc => "Number of times to retry sending of messages to a leader."
52
+ config_param :required_acks, :integer, :default => -1,
53
+ :desc => "The number of acks required per request."
54
+ config_param :ack_timeout, :integer, :default => nil,
55
+ :desc => "How long the producer waits for acks."
56
+ config_param :compression_codec, :string, :default => nil,
57
+ :desc => "The codec the producer uses to compress messages."
58
+ config_param :max_send_limit_bytes, :size, :default => nil
59
+ config_param :time_format, :string, :default => nil
60
+
61
+ config_param :max_buffer_size, :integer, :default => nil,
62
+ :desc => "Number of messages to be buffered by the kafka producer."
63
+
64
+ config_param :max_buffer_bytesize, :integer, :default => nil,
65
+ :desc => "Maximum size in bytes to be buffered."
66
+
67
+ config_param :active_support_notification_regex, :string, :default => nil,
68
+ :desc => <<-DESC
69
+ Add a regular expression to capture ActiveSupport notifications from the Kafka client
70
+ requires activesupport gem - records will be generated under fluent_kafka_stats.**
71
+ DESC
72
+
73
+ include Fluent::KafkaPluginUtil::SSLSettings
74
+ include Fluent::KafkaPluginUtil::SaslSettings
75
+ include Fluent::KafkaPluginUtil::GsmSettings
76
+
77
+ attr_accessor :output_data_type
78
+ attr_accessor :field_separator
79
+
80
+ unless method_defined?(:log)
81
+ define_method("log") { $log }
82
+ end
83
+
84
+ def initialize
85
+ super
86
+
87
+ require 'kafka'
88
+
89
+ @kafka = nil
90
+ end
91
+
92
+ def refresh_client
93
+ if @zookeeper
94
+ @seed_brokers = []
95
+ z = Zookeeper.new(@zookeeper)
96
+ z.get_children(:path => @zookeeper_path)[:children].each do |id|
97
+ broker = Yajl.load(z.get(:path => @zookeeper_path + "/#{id}")[:data])
98
+ if @ssl_client_cert
99
+ @seed_brokers.push(pickup_ssl_endpoint(broker))
100
+ else
101
+ @seed_brokers.push("#{broker['host']}:#{broker['port']}")
102
+ end
103
+ end
104
+ z.close
105
+ log.info "brokers has been refreshed via Zookeeper: #{@seed_brokers}"
106
+ end
107
+ begin
108
+ if @seed_brokers.length > 0
109
+ g = Fluent::KafkaPluginUtil::GsmSettings
110
+ if @secretName != nil && @projectId != nil
111
+ g.fetch(log)
112
+ end
113
+ if @scram_mechanism != nil && @username != nil && @password != nil
114
+ @kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, ssl_ca_cert: read_ssl_file(@ssl_ca_cert),
115
+ ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_ca_certs_from_system: @ssl_ca_certs_from_system,
116
+ sasl_scram_username: @username, sasl_scram_password: @password, sasl_scram_mechanism: @scram_mechanism, sasl_over_ssl: @sasl_over_ssl,
117
+ ssl_verify_hostname: @ssl_verify_hostname)
118
+ elsif @username != nil && @password != nil
119
+ @kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, ssl_ca_cert: read_ssl_file(@ssl_ca_cert),
120
+ ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_ca_certs_from_system: @ssl_ca_certs_from_system,
121
+ sasl_plain_username: @username, sasl_plain_password: @password, sasl_over_ssl: @sasl_over_ssl, ssl_verify_hostname: @ssl_verify_hostname)
122
+ else
123
+ @kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, ssl_ca_cert: read_ssl_file(@ssl_ca_cert),
124
+ ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_ca_certs_from_system: @ssl_ca_certs_from_system,
125
+ sasl_gssapi_principal: @principal, sasl_gssapi_keytab: @keytab, sasl_over_ssl: @sasl_over_ssl, ssl_verify_hostname: @ssl_verify_hostname)
126
+ end
127
+ log.info "initialized kafka producer: #{@client_id}"
128
+ else
129
+ log.warn "No brokers found on Zookeeper"
130
+ end
131
+ rescue Exception => e
132
+ log.error e
133
+ end
134
+ end
135
+
136
+ def configure(conf)
137
+ super
138
+
139
+ log.warn "Support of fluentd v0.12 has ended. Use kafka2 instead. kafka will be an alias of kafka2"
140
+
141
+ if @zookeeper
142
+ require 'zookeeper'
143
+ else
144
+ @seed_brokers = @brokers.split(",")
145
+ log.info "brokers has been set directly: #{@seed_brokers}"
146
+ end
147
+
148
+ if conf['ack_timeout_ms']
149
+ log.warn "'ack_timeout_ms' parameter is deprecated. Use second unit 'ack_timeout' instead"
150
+ @ack_timeout = conf['ack_timeout_ms'].to_i / 1000
151
+ end
152
+
153
+ @f_separator = case @field_separator
154
+ when /SPACE/i then ' '
155
+ when /COMMA/i then ','
156
+ when /SOH/i then "\x01"
157
+ else "\t"
158
+ end
159
+
160
+ @formatter_proc = setup_formatter(conf)
161
+
162
+ @producer_opts = {max_retries: @max_send_retries, required_acks: @required_acks}
163
+ @producer_opts[:ack_timeout] = @ack_timeout if @ack_timeout
164
+ @producer_opts[:compression_codec] = @compression_codec.to_sym if @compression_codec
165
+ @producer_opts[:max_buffer_size] = @max_buffer_size if @max_buffer_size
166
+ @producer_opts[:max_buffer_bytesize] = @max_buffer_bytesize if @max_buffer_bytesize
167
+ if @active_support_notification_regex
168
+ require 'active_support/notifications'
169
+ require 'active_support/core_ext/hash/keys'
170
+ ActiveSupport::Notifications.subscribe(Regexp.new(@active_support_notification_regex)) do |*args|
171
+ event = ActiveSupport::Notifications::Event.new(*args)
172
+ message = event.payload.respond_to?(:stringify_keys) ? event.payload.stringify_keys : event.payload
173
+ @router.emit("fluent_kafka_stats.#{event.name}", Time.now.to_i, message)
174
+ end
175
+ end
176
+ end
177
+
178
+ def multi_workers_ready?
179
+ true
180
+ end
181
+
182
+ def start
183
+ super
184
+ refresh_client
185
+ end
186
+
187
+ def shutdown
188
+ super
189
+ @kafka = nil
190
+ end
191
+
192
+ def setup_formatter(conf)
193
+ if @output_data_type == 'json'
194
+ require 'yajl'
195
+ Proc.new { |tag, time, record| Yajl::Encoder.encode(record) }
196
+ elsif @output_data_type == 'ltsv'
197
+ require 'ltsv'
198
+ Proc.new { |tag, time, record| LTSV.dump(record) }
199
+ elsif @output_data_type == 'msgpack'
200
+ require 'msgpack'
201
+ Proc.new { |tag, time, record| record.to_msgpack }
202
+ elsif @output_data_type =~ /^attr:(.*)$/
203
+ @custom_attributes = $1.split(',').map(&:strip).reject(&:empty?)
204
+ @custom_attributes.unshift('time') if @output_include_time
205
+ @custom_attributes.unshift('tag') if @output_include_tag
206
+ Proc.new { |tag, time, record|
207
+ @custom_attributes.map { |attr|
208
+ record[attr].nil? ? '' : record[attr].to_s
209
+ }.join(@f_separator)
210
+ }
211
+ else
212
+ @formatter = Fluent::Plugin.new_formatter(@output_data_type)
213
+ @formatter.configure(conf)
214
+ @formatter.method(:format)
215
+ end
216
+ end
217
+
218
+ def emit(tag, es, chain)
219
+ begin
220
+ chain.next
221
+
222
+ # out_kafka is mainly for testing so don't need the performance unlike out_kafka_buffered.
223
+ producer = @kafka.producer(@producer_opts)
224
+
225
+ es.each do |time, record|
226
+ if @output_include_time
227
+ if @time_format
228
+ record['time'] = Time.at(time).strftime(@time_format)
229
+ else
230
+ record['time'] = time
231
+ end
232
+ end
233
+ record['tag'] = tag if @output_include_tag
234
+ topic = (@exclude_topic_key ? record.delete('topic') : record['topic']) || @default_topic || tag
235
+ partition_key = (@exclude_partition_key ? record.delete('partition_key') : record['partition_key']) || @default_partition_key
236
+ partition = (@exclude_partition ? record.delete('partition'.freeze) : record['partition'.freeze]) || @default_partition
237
+ message_key = (@exclude_message_key ? record.delete('message_key') : record['message_key']) || @default_message_key
238
+
239
+ record_buf = @formatter_proc.call(tag, time, record)
240
+ record_buf_bytes = record_buf.bytesize
241
+ if @max_send_limit_bytes && record_buf_bytes > @max_send_limit_bytes
242
+ log.warn "record size exceeds max_send_limit_bytes. Skip event:", :time => time, :record => record
243
+ next
244
+ end
245
+ log.trace { "message will send to #{topic} with partition_key: #{partition_key}, partition: #{partition}, message_key: #{message_key} and value: #{record_buf}." }
246
+ begin
247
+ producer.produce(record_buf, topic: topic, key: message_key, partition: partition, partition_key: partition_key)
248
+ rescue Kafka::BufferOverflow => e
249
+ log.warn "BufferOverflow occurred: #{e}"
250
+ log.info "Trying to deliver the messages to prevent the buffer from overflowing again."
251
+ producer.deliver_messages
252
+ log.info "Recovered from BufferOverflow successfully`"
253
+ end
254
+ end
255
+
256
+ producer.deliver_messages
257
+ producer.shutdown
258
+ rescue Exception => e
259
+ log.warn "Send exception occurred: #{e}"
260
+ producer.shutdown if producer
261
+ refresh_client
262
+ raise e
263
+ end
264
+ end
265
+
266
+ end