fluent-plugin-vmware-loginsight 0.1.10 → 1.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/gem-push.yml +38 -0
- data/CHANGELOG.md +63 -0
- data/Dockerfile +68 -0
- data/README.md +73 -24
- data/VERSION +1 -0
- data/examples/fluent.conf +104 -52
- data/examples/fluentd-vrli-plugin-debian.dockerfile +24 -12
- data/examples/k8s-log-collector-ds.yaml +108 -49
- data/fluent-plugin-vmware-loginsight.gemspec +2 -2
- data/lib/fluent/plugin/out_vmware_loginsight.rb +264 -290
- metadata +11 -7
@@ -9,333 +9,307 @@
|
|
9
9
|
# SPDX-License-Identifier: MIT
|
10
10
|
|
11
11
|
|
12
|
-
require
|
12
|
+
require 'fluent/plugin/output'
|
13
13
|
require 'json'
|
14
14
|
require 'net/http'
|
15
15
|
require 'uri'
|
16
16
|
|
17
|
-
module Fluent
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
def initialize
|
99
|
-
super
|
100
|
-
end
|
101
|
-
|
102
|
-
def configure(conf)
|
103
|
-
super
|
17
|
+
module Fluent::Plugin
|
18
|
+
class VmwareLoginsightOutput < Output
|
19
|
+
Fluent::Plugin.register_output('vmware_loginsight', self)
|
20
|
+
|
21
|
+
### Connection Params ###
|
22
|
+
config_param :scheme, :string, :default => 'http'
|
23
|
+
# Loginsight Host ex. localhost
|
24
|
+
config_param :host, :string, :default => 'localhost'
|
25
|
+
# In case we want to post to multiple hosts. This is futuristic, Fluentd copy plugin can support this as is
|
26
|
+
#config_param :hosts, :string, :default => nil
|
27
|
+
# Loginsight port ex. 9000. Default 80
|
28
|
+
config_param :port, :integer, :default => 80
|
29
|
+
# Loginsight ingestion api path ex. 'api/v1/events/ingest'
|
30
|
+
config_param :path, :string, :default => 'api/v1/events/ingest'
|
31
|
+
# agent_id generated by your LI
|
32
|
+
config_param :agent_id, :string, :default => '0'
|
33
|
+
# Credentials if used
|
34
|
+
config_param :username, :string, :default => nil
|
35
|
+
config_param :password, :string, :default => nil, :secret => true
|
36
|
+
# Authentication nil | 'basic'
|
37
|
+
config_param :authentication, :string, :default => nil
|
38
|
+
|
39
|
+
# Set Net::HTTP.verify_mode to `OpenSSL::SSL::VERIFY_NONE`
|
40
|
+
config_param :ssl_verify, :bool, :default => true
|
41
|
+
config_param :ca_file, :string, :default => nil
|
42
|
+
|
43
|
+
### API Params ###
|
44
|
+
# HTTP method
|
45
|
+
# post | put
|
46
|
+
config_param :http_method, :string, :default => :post
|
47
|
+
# form | json
|
48
|
+
config_param :serializer, :string, :default => :json
|
49
|
+
config_param :request_retries, :integer, :default => 3
|
50
|
+
config_param :request_timeout, :time, :default => 5
|
51
|
+
config_param :http_conn_debug, :bool, :default => false
|
52
|
+
# in bytes
|
53
|
+
config_param :max_batch_size, :integer, :default => 512000
|
54
|
+
|
55
|
+
# Simple rate limiting: ignore any records within `rate_limit_msec`
|
56
|
+
# since the last one.
|
57
|
+
config_param :rate_limit_msec, :integer, :default => 0
|
58
|
+
# Raise errors that were rescued during HTTP requests?
|
59
|
+
config_param :raise_on_error, :bool, :default => false
|
60
|
+
# Keys from log event whose values should be added as log message/text
|
61
|
+
# to loginsight. Note these key/value pairs won't be added as metadata/fields
|
62
|
+
config_param :log_text_keys, :array, default: ["log", "message", "msg"], value_type: :string
|
63
|
+
# Flatten hashes to create one key/val pair w/o losing log data
|
64
|
+
config_param :flatten_hashes, :bool, :default => true
|
65
|
+
# Seperator to use for joining flattened keys
|
66
|
+
config_param :flatten_hashes_separator, :string, :default => "_"
|
67
|
+
|
68
|
+
# Keys from log event to rewrite
|
69
|
+
# for instance from 'kubernetes_namespace' to 'k8s_namespace'
|
70
|
+
# tags will be rewritten with substring substitution
|
71
|
+
# and applied in the order present in the hash
|
72
|
+
# (Hashes enumerate their values in the order that the
|
73
|
+
# corresponding keys were inserted
|
74
|
+
# see https://ruby-doc.org/core-2.2.2/Hash.html)
|
75
|
+
# example config:
|
76
|
+
# shorten_keys {
|
77
|
+
# "__":"_",
|
78
|
+
# "container_":"",
|
79
|
+
# "kubernetes_":"k8s_",
|
80
|
+
# "labels_":"",
|
81
|
+
# }
|
82
|
+
config_param :shorten_keys, :hash, value_type: :string, default:
|
83
|
+
{
|
84
|
+
'kubernetes_':'k8s_',
|
85
|
+
'namespace':'ns',
|
86
|
+
'labels_':'',
|
87
|
+
'_name':'',
|
88
|
+
'_hash':'',
|
89
|
+
'container_':''
|
90
|
+
}
|
91
|
+
|
92
|
+
config_section :buffer do
|
93
|
+
config_set_default :@type, "memory"
|
94
|
+
config_set_default :chunk_keys, []
|
95
|
+
config_set_default :timekey_use_utc, true
|
96
|
+
end
|
104
97
|
|
105
|
-
|
106
|
-
|
107
|
-
when 'basic'
|
108
|
-
:basic
|
109
|
-
else
|
110
|
-
:none
|
111
|
-
end
|
98
|
+
def configure(conf)
|
99
|
+
super
|
112
100
|
|
113
|
-
|
114
|
-
|
101
|
+
@ssl_verify_mode = @ssl_verify ? OpenSSL::SSL::VERIFY_PEER : OpenSSL::SSL::VERIFY_NONE
|
102
|
+
@auth = case @authentication
|
103
|
+
when 'basic'
|
104
|
+
:basic
|
105
|
+
else
|
106
|
+
:none
|
107
|
+
end
|
115
108
|
|
116
|
-
|
117
|
-
|
118
|
-
end
|
109
|
+
@last_request_time = nil
|
110
|
+
end
|
119
111
|
|
120
|
-
|
121
|
-
|
122
|
-
|
112
|
+
def format_url()
|
113
|
+
url = "#{@scheme}://#{host}:#{port}/#{path}/#{agent_id}"
|
114
|
+
url
|
115
|
+
end
|
123
116
|
|
124
|
-
|
125
|
-
|
126
|
-
|
117
|
+
def set_header(req)
|
118
|
+
if @serializer == 'json'
|
119
|
+
set_json_header(req)
|
127
120
|
end
|
121
|
+
req
|
122
|
+
end
|
128
123
|
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
req
|
134
|
-
end
|
124
|
+
def set_json_header(req)
|
125
|
+
req['Content-Type'] = 'application/json'
|
126
|
+
req
|
127
|
+
end
|
135
128
|
|
136
|
-
|
137
|
-
|
138
|
-
|
129
|
+
def shorten_key(key)
|
130
|
+
# LI doesn't allow some characters in field 'name'
|
131
|
+
# like '/', '-', '\', '.', etc. so replace them with @flatten_hashes_separator
|
132
|
+
key = key.gsub(/[\/\.\-\\\@]/,@flatten_hashes_separator).downcase
|
133
|
+
# shorten field names using provided shorten_keys parameters
|
134
|
+
@shorten_keys.each do | match, replace |
|
135
|
+
key = key.gsub(match.to_s,replace)
|
139
136
|
end
|
137
|
+
key
|
138
|
+
end
|
140
139
|
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
key = key.gsub(match.to_s,replace)
|
148
|
-
end
|
149
|
-
key
|
140
|
+
def create_loginsight_event(time, record)
|
141
|
+
flattened_records = {}
|
142
|
+
if @flatten_hashes
|
143
|
+
flattened_records = flatten_record(record, [])
|
144
|
+
else
|
145
|
+
flattened_records = record
|
150
146
|
end
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
147
|
+
fields = []
|
148
|
+
keys = []
|
149
|
+
log = ''
|
150
|
+
flattened_records.each do |key, value|
|
151
|
+
begin
|
152
|
+
next if value.nil?
|
153
|
+
# LI doesn't support duplicate fields, make unique names by appending underscore
|
154
|
+
key = shorten_key(key)
|
155
|
+
while keys.include?(key)
|
156
|
+
key = key + '_'
|
157
|
+
end
|
158
|
+
keys.push(key)
|
159
|
+
key.force_encoding("utf-8")
|
160
|
+
# convert value to json string if its a hash and to string if not already a string
|
165
161
|
begin
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
# convert value to json string if its a hash and to string if not already a string
|
175
|
-
begin
|
176
|
-
value = value.to_json if value.is_a?(Hash)
|
177
|
-
value = value.to_s
|
178
|
-
value = value.frozen? ? value.dup : value # if value is immutable, use a copy.
|
179
|
-
value.force_encoding("utf-8")
|
180
|
-
rescue Exception=>e
|
181
|
-
$log.warn "force_encoding exception: " "#{e.class}, '#{e.message}', " \
|
182
|
-
"\n Request: #{key} #{record.to_json[1..1024]}"
|
183
|
-
value = "Exception during conversion: #{e.message}"
|
184
|
-
end
|
162
|
+
value = value.to_json if value.is_a?(Hash)
|
163
|
+
value = value.to_s
|
164
|
+
value = value.frozen? ? value.dup : value # if value is immutable, use a copy.
|
165
|
+
value.force_encoding("utf-8")
|
166
|
+
rescue Exception=>e
|
167
|
+
$log.warn "force_encoding exception: " "#{e.class}, '#{e.message}', " \
|
168
|
+
"\n Request: #{key} #{record.to_json[1..1024]}"
|
169
|
+
value = "Exception during conversion: #{e.message}"
|
185
170
|
end
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
171
|
+
end
|
172
|
+
if @log_text_keys.include?(key)
|
173
|
+
if log != "#{value}"
|
174
|
+
if log.empty?
|
175
|
+
log = "#{value}"
|
176
|
+
else
|
177
|
+
log += " #{value}"
|
193
178
|
end
|
194
|
-
else
|
195
|
-
# If there is time information available, update time for LI. LI ignores
|
196
|
-
# time if it is out of the error/adjusment window of 10 mins. in such
|
197
|
-
# cases we would still like to preserve time info, so add it as event.
|
198
|
-
# TODO Ignore the below block for now. Handle the case for time being in
|
199
|
-
# different formats than milliseconds
|
200
|
-
#if ['time', '_source_realtime_timestamp'].include?(key)
|
201
|
-
# time = value
|
202
|
-
#end
|
203
|
-
fields << {"name" => key, "content" => value}
|
204
179
|
end
|
180
|
+
else
|
181
|
+
# If there is time information available, update time for LI. LI ignores
|
182
|
+
# time if it is out of the error/adjusment window of 10 mins. in such
|
183
|
+
# cases we would still like to preserve time info, so add it as event.
|
184
|
+
# TODO Ignore the below block for now. Handle the case for time being in
|
185
|
+
# different formats than milliseconds
|
186
|
+
#if ['time', '_source_realtime_timestamp'].include?(key)
|
187
|
+
# time = value
|
188
|
+
#end
|
189
|
+
fields << {"name" => key, "content" => value}
|
205
190
|
end
|
206
|
-
event = {
|
207
|
-
"fields" => fields,
|
208
|
-
"text" => log.gsub(/^$\n/, ''),
|
209
|
-
"timestamp" => time * 1000
|
210
|
-
}
|
211
|
-
event
|
212
191
|
end
|
192
|
+
event = {
|
193
|
+
"fields" => fields,
|
194
|
+
"text" => log.gsub(/^$\n/, ''),
|
195
|
+
"timestamp" => time * 1000
|
196
|
+
}
|
197
|
+
event
|
198
|
+
end
|
213
199
|
|
214
|
-
|
215
|
-
|
200
|
+
def flatten_record(record, prefix=[])
|
201
|
+
ret = {}
|
216
202
|
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
end
|
225
|
-
end
|
226
|
-
when Array
|
227
|
-
record.each do |value|
|
228
|
-
ret.merge! flatten_record(value, prefix)
|
203
|
+
case record
|
204
|
+
when Hash
|
205
|
+
record.each do |key, value|
|
206
|
+
if @log_text_keys.include?(key)
|
207
|
+
ret.merge!({key.to_s => value})
|
208
|
+
else
|
209
|
+
ret.merge! flatten_record(value, prefix + [key.to_s])
|
229
210
|
end
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
211
|
+
end
|
212
|
+
when Array
|
213
|
+
record.each do |value|
|
214
|
+
ret.merge! flatten_record(value, prefix)
|
215
|
+
end
|
216
|
+
else
|
217
|
+
return {prefix.join(@flatten_hashes_separator) => record}
|
234
218
|
end
|
219
|
+
ret
|
220
|
+
end
|
235
221
|
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
set_header(req)
|
242
|
-
return req, uri
|
222
|
+
def send_request(req, uri)
|
223
|
+
is_rate_limited = (@rate_limit_msec != 0 and not @last_request_time.nil?)
|
224
|
+
if is_rate_limited and ((Time.now.to_f - @last_request_time) * 1000.0 < @rate_limit_msec)
|
225
|
+
$log.info('Dropped request due to rate limiting')
|
226
|
+
return
|
243
227
|
end
|
244
228
|
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
229
|
+
if @auth and @auth.to_s.eql? "basic"
|
230
|
+
req.basic_auth(@username, @password)
|
231
|
+
end
|
232
|
+
begin
|
233
|
+
retries ||= 2
|
234
|
+
response = nil
|
235
|
+
@last_request_time = Time.now.to_f
|
236
|
+
|
237
|
+
http_conn = Net::HTTP.new(uri.host, uri.port)
|
238
|
+
# For debugging, set this
|
239
|
+
http_conn.set_debug_output($stdout) if @http_conn_debug
|
240
|
+
http_conn.use_ssl = (uri.scheme == 'https')
|
241
|
+
if http_conn.use_ssl?
|
242
|
+
http_conn.ca_file = @ca_file
|
251
243
|
end
|
244
|
+
http_conn.verify_mode = @ssl_verify_mode
|
252
245
|
|
253
|
-
|
254
|
-
|
246
|
+
response = http_conn.start do |http|
|
247
|
+
http.read_timeout = @request_timeout
|
248
|
+
http.request(req)
|
255
249
|
end
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
250
|
+
rescue => e # rescue all StandardErrors
|
251
|
+
# server didn't respond
|
252
|
+
# Be careful while turning on below log, if LI instance can't be reached and you're sending
|
253
|
+
# log-container logs to LI as well, you may end up in a cycle.
|
254
|
+
# TODO handle the cyclic case at plugin level if possible.
|
255
|
+
# $log.warn "Net::HTTP.#{req.method.capitalize} raises exception: " \
|
256
|
+
# "#{e.class}, '#{e.message}', \n Request: #{req.body[1..1024]}"
|
257
|
+
retry unless (retries -= 1).zero?
|
258
|
+
raise e if @raise_on_error
|
259
|
+
else
|
260
|
+
unless response and response.is_a?(Net::HTTPSuccess)
|
261
|
+
res_summary = if response
|
262
|
+
"Response Code: #{response.code}\n"\
|
263
|
+
"Response Message: #{response.message}\n" \
|
264
|
+
"Response Body: #{response.body}"
|
265
|
+
else
|
266
|
+
"Response = nil"
|
267
|
+
end
|
268
|
+
# ditto cyclic warning
|
269
|
+
# $log.warn "Failed to #{req.method} #{uri}\n(#{res_summary})\n" \
|
270
|
+
# "Request Size: #{req.body.size} Request Body: #{req.body[1..1024]}"
|
271
|
+
end #end unless
|
272
|
+
end # end begin
|
273
|
+
end # end send_request
|
274
|
+
|
275
|
+
def send_events(uri, events)
|
276
|
+
req = Net::HTTP.const_get(@http_method.to_s.capitalize).new(uri.path)
|
277
|
+
event_req = {
|
278
|
+
"events" => events
|
279
|
+
}
|
280
|
+
req.body = event_req.to_json
|
281
|
+
set_header(req)
|
282
|
+
send_request(req, uri)
|
283
|
+
end
|
269
284
|
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
# "#{e.class}, '#{e.message}', \n Request: #{req.body[1..1024]}"
|
281
|
-
retry unless (retries -= 1).zero?
|
282
|
-
raise e if @raise_on_error
|
285
|
+
def handle_records(chunk)
|
286
|
+
url = format_url()
|
287
|
+
uri = URI.parse(url)
|
288
|
+
events = []
|
289
|
+
count = 0
|
290
|
+
chunk.each do |time, record|
|
291
|
+
new_event = create_loginsight_event(time, record)
|
292
|
+
new_event_size = new_event.to_json.size
|
293
|
+
if new_event_size > @max_batch_size
|
294
|
+
$log.warn "dropping event larger than max_batch_size: #{new_event.to_json[1..1024]}"
|
283
295
|
else
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
"Response Body: #{response.body}"
|
289
|
-
else
|
290
|
-
"Response = nil"
|
291
|
-
end
|
292
|
-
# ditto cyclic warning
|
293
|
-
# $log.warn "Failed to #{req.method} #{uri}\n(#{res_summary})\n" \
|
294
|
-
# "Request Size: #{req.body.size} Request Body: #{req.body[1..1024]}"
|
295
|
-
end #end unless
|
296
|
-
end # end begin
|
297
|
-
end # end send_request
|
298
|
-
|
299
|
-
def send_events(uri, events)
|
300
|
-
req = Net::HTTP.const_get(@http_method.to_s.capitalize).new(uri.path)
|
301
|
-
event_req = {
|
302
|
-
"events" => events
|
303
|
-
}
|
304
|
-
req.body = event_req.to_json
|
305
|
-
set_header(req)
|
306
|
-
send_request(req, uri)
|
307
|
-
end
|
308
|
-
|
309
|
-
def handle_records(tag, es)
|
310
|
-
url = format_url()
|
311
|
-
uri = URI.parse(url)
|
312
|
-
events = []
|
313
|
-
count = 0
|
314
|
-
es.each do |time, record|
|
315
|
-
new_event = create_loginsight_event(tag, time, record)
|
316
|
-
new_event_size = new_event.to_json.size
|
317
|
-
if new_event_size > @max_batch_size
|
318
|
-
$log.warn "dropping event larger than max_batch_size: #{new_event.to_json[1..1024]}"
|
319
|
-
else
|
320
|
-
if (count + new_event_size) > @max_batch_size
|
321
|
-
send_events(uri, events)
|
322
|
-
events = []
|
323
|
-
count = 0
|
324
|
-
end
|
325
|
-
count += new_event_size
|
326
|
-
events << new_event
|
296
|
+
if (count + new_event_size) > @max_batch_size
|
297
|
+
send_events(uri, events)
|
298
|
+
events = []
|
299
|
+
count = 0
|
327
300
|
end
|
328
|
-
|
329
|
-
|
330
|
-
send_events(uri, events)
|
301
|
+
count += new_event_size
|
302
|
+
events << new_event
|
331
303
|
end
|
332
304
|
end
|
333
|
-
|
334
|
-
|
335
|
-
handle_records(tag, es)
|
336
|
-
chain.next
|
305
|
+
if count > 0
|
306
|
+
send_events(uri, events)
|
337
307
|
end
|
338
308
|
end
|
309
|
+
|
310
|
+
# Sync Buffered Output
|
311
|
+
def write(chunk)
|
312
|
+
handle_records(chunk)
|
313
|
+
end
|
339
314
|
end
|
340
315
|
end
|
341
|
-
|