fluent-plugin-vmware-loginsight 0.1.6 → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/gem-push.yml +38 -0
- data/CHANGELOG.md +63 -0
- data/README.md +71 -18
- data/VERSION +1 -0
- data/examples/fluent.conf +104 -52
- data/examples/fluentd-vrli-plugin-debian.dockerfile +24 -12
- data/examples/k8s-log-collector-ds.yaml +108 -49
- data/fluent-plugin-vmware-loginsight.gemspec +1 -1
- data/lib/fluent/plugin/out_vmware_loginsight.rb +273 -272
- metadata +9 -7
@@ -8,29 +8,41 @@
|
|
8
8
|
#
|
9
9
|
# SPDX-License-Identifier: MIT
|
10
10
|
|
11
|
-
|
12
|
-
#
|
11
|
+
|
12
|
+
# Sample Dockerfile to use as log collector
|
13
|
+
# Builds a debian-based fluentd image that has fluent-plugin-kubernetes_metadata_filter,
|
14
|
+
# fluent-plugin-rewrite-tag-filter, fluent-plugin-systemd and
|
15
|
+
# fluent-plugin-vmware-loginsight gem installed.
|
13
16
|
#
|
14
|
-
#
|
17
|
+
# This image will get preconfigured with the fluent.conf if avaialble at the
|
18
|
+
# same dir level. For fluentd config example, see
|
15
19
|
# https://github.com/vmware/fluent-plugin-vmware-loginsight/blob/master/examples/fluent.conf
|
16
|
-
FROM fluent/fluentd:v0.14.15-debian-onbuild
|
17
|
-
# Above image expects the loginsight plugin vmware_loginsight to be available under ./plugins/vmware_loginsight.rb
|
18
|
-
# and fluentd config under ./fluent.conf by default
|
19
20
|
|
21
|
+
# This base image is built from https://github.com/fluent/fluentd-kubernetes-daemonset
|
22
|
+
FROM fluent/fluentd:v1.11-debian-1
|
23
|
+
|
24
|
+
# Use root account to use apt
|
20
25
|
USER root
|
21
26
|
|
22
|
-
|
27
|
+
# You can install your plugins here
|
28
|
+
RUN buildDeps="sudo make gcc g++ libc-dev" \
|
23
29
|
&& apt-get update \
|
24
30
|
&& apt-get install -y --no-install-recommends $buildDeps \
|
25
31
|
&& sudo gem install \
|
26
|
-
fluent-plugin-
|
27
|
-
fluent-plugin-
|
28
|
-
fluent-plugin-
|
32
|
+
fluent-plugin-kubernetes_metadata_filter:2.4.6 \
|
33
|
+
fluent-plugin-rewrite-tag-filter:2.3.0 \
|
34
|
+
fluent-plugin-systemd:1.0.2 \
|
35
|
+
fluent-plugin-vmware-loginsight:0.1.10 \
|
29
36
|
&& sudo gem sources --clear-all \
|
30
37
|
&& SUDO_FORCE_REMOVE=yes \
|
31
38
|
apt-get purge -y --auto-remove \
|
32
39
|
-o APT::AutoRemove::RecommendsImportant=false \
|
33
40
|
$buildDeps \
|
34
41
|
&& rm -rf /var/lib/apt/lists/* \
|
35
|
-
|
36
|
-
|
42
|
+
&& rm -rf /tmp/* /var/tmp/* /usr/lib/ruby/gems/*/cache/*.gem
|
43
|
+
|
44
|
+
# You can install the LI plugin using a gem or if you want to test your
|
45
|
+
# changes to plugin, you may add the .rb directly under `plugins` dir, then
|
46
|
+
# you don't need to install the gem
|
47
|
+
COPY plugins /fluentd/plugins/
|
48
|
+
|
@@ -23,30 +23,32 @@ data:
|
|
23
23
|
myapp-fluent.conf: |
|
24
24
|
# Input sources
|
25
25
|
@include general.conf
|
26
|
-
@include systemd
|
27
|
-
@include kubernetes
|
28
|
-
|
29
|
-
# Parsing/Filtering
|
30
|
-
@include kubernetes-filter.conf
|
26
|
+
@include systemd.conf
|
27
|
+
@include kubernetes.conf
|
28
|
+
@include kube-audit.conf
|
31
29
|
|
32
30
|
# Forwading - Be vigilant of the order in which these plugins are specified. Order matters!
|
33
|
-
@include
|
31
|
+
@include vmw-li.conf
|
34
32
|
|
35
33
|
general.conf: |
|
36
34
|
<system>
|
37
35
|
log_level info
|
38
36
|
</system>
|
39
37
|
# Prevent fluentd from handling records containing its own logs to handle cycles.
|
40
|
-
<
|
41
|
-
|
42
|
-
|
38
|
+
<label @FLUENT_LOG>
|
39
|
+
<match fluent.**>
|
40
|
+
@type null
|
41
|
+
</match>
|
42
|
+
</label>
|
43
43
|
|
44
|
-
systemd
|
44
|
+
systemd.conf: |
|
45
|
+
# Journal logs
|
45
46
|
<source>
|
46
47
|
@type systemd
|
48
|
+
@id in_systemd_logs
|
47
49
|
path /run/log/journal
|
48
50
|
# Can filter logs if we want, e.g.
|
49
|
-
#
|
51
|
+
#filters [{ "_SYSTEMD_UNIT": "kubelet.service" }]
|
50
52
|
<storage>
|
51
53
|
@type local
|
52
54
|
persistent true
|
@@ -57,70 +59,114 @@ data:
|
|
57
59
|
strip_underscores true
|
58
60
|
</source>
|
59
61
|
|
60
|
-
kubernetes
|
62
|
+
kubernetes.conf: |
|
63
|
+
# Container logs
|
64
|
+
# Kubernetes docker logs are stored under /var/lib/docker/containers for
|
65
|
+
# which kubernetes creates a symlink at /var/log/containers
|
61
66
|
<source>
|
62
67
|
@type tail
|
68
|
+
@id in_tail_container_logs
|
63
69
|
path /var/log/containers/*.log
|
64
70
|
# One could exclude certain logs like:
|
65
|
-
#
|
71
|
+
#exclude_path ["/var/log/containers/log-collector*.log"]
|
66
72
|
pos_file /var/log/fluentd-docker.pos
|
67
|
-
time_format %Y-%m-%dT%H:%M:%S
|
68
|
-
tag kubernetes.*
|
69
|
-
format json
|
70
73
|
read_from_head true
|
74
|
+
# Set this watcher to false if you have many files to tail
|
75
|
+
enable_stat_watcher false
|
76
|
+
refresh_interval 5
|
77
|
+
tag kubernetes.*
|
78
|
+
<parse>
|
79
|
+
@type json
|
80
|
+
time_key time
|
81
|
+
keep_time_key true
|
82
|
+
time_format %Y-%m-%dT%H:%M:%S.%NZ
|
83
|
+
</parse>
|
71
84
|
</source>
|
72
|
-
|
73
|
-
kubernetes-filter.conf: |
|
85
|
+
# Kubernetes metadata filter that tags additional meta data for each container event
|
74
86
|
<filter kubernetes.**>
|
75
87
|
@type kubernetes_metadata
|
76
|
-
|
77
|
-
|
88
|
+
@id filter_kube_metadata
|
89
|
+
kubernetes_url "#{ENV['FLUENT_FILTER_KUBERNETES_URL'] || 'https://' + ENV.fetch('KUBERNETES_SERVICE_HOST') + ':' + ENV. fetch('KUBERNETES_SERVICE_PORT') + '/api'}"
|
90
|
+
verify_ssl "#{ENV['KUBERNETES_VERIFY_SSL'] || true}"
|
91
|
+
ca_file "#{ENV['KUBERNETES_CA_FILE']}"
|
92
|
+
skip_labels "#{ENV['FLUENT_KUBERNETES_METADATA_SKIP_LABELS'] || 'false'}"
|
93
|
+
skip_container_metadata "#{ENV['FLUENT_KUBERNETES_METADATA_SKIP_CONTAINER_METADATA'] || 'false'}"
|
94
|
+
skip_master_url "#{ENV['FLUENT_KUBERNETES_METADATA_SKIP_MASTER_URL'] || 'false'}"
|
95
|
+
skip_namespace_metadata "#{ENV['FLUENT_KUBERNETES_METADATA_SKIP_NAMESPACE_METADATA'] || 'false'}"
|
96
|
+
</filter>
|
97
|
+
|
98
|
+
# Prefix the tag by namespace. This would make it easy to match logs by namespaces
|
99
|
+
<match kubernetes.**>
|
100
|
+
@type rewrite_tag_filter
|
101
|
+
<rule>
|
102
|
+
key $.kubernetes.namespace_name
|
103
|
+
pattern ^(.+)$
|
104
|
+
tag $1.${tag}
|
105
|
+
</rule>
|
106
|
+
</match>
|
107
|
+
|
108
|
+
kube-audit.conf: |
|
109
|
+
# Kube-apiserver audit logs
|
110
|
+
<source>
|
111
|
+
@type tail
|
112
|
+
@id in_tail_kube_audit_logs
|
113
|
+
# path to audit logs for kube-apiserver
|
114
|
+
path "/var/log/kube-audit/audit.log"
|
115
|
+
pos_file /var/log/kube-audit.pos
|
116
|
+
tag kube-audit
|
117
|
+
<parse>
|
118
|
+
@type json
|
119
|
+
time_key timestamp
|
120
|
+
keep_time_key false
|
121
|
+
time_format %Y-%m-%dT%H:%M:%SZ
|
122
|
+
</parse>
|
123
|
+
</source>
|
124
|
+
# Loginsight doesn't support ingesting `source` as a field name, get rid of it
|
125
|
+
<filter kube-audit>
|
126
|
+
@type record_transformer
|
127
|
+
@id filter_kube_audit_logs
|
128
|
+
enable_ruby
|
129
|
+
remove_keys source
|
130
|
+
<record>
|
131
|
+
log ${record}
|
132
|
+
</record>
|
78
133
|
</filter>
|
79
134
|
|
80
|
-
|
135
|
+
vmw-li.conf: |
|
136
|
+
# Match everything
|
81
137
|
# We are capturing all log messages and redirecting them to endpoints mentioned in each <store> tag.
|
82
138
|
# One may redirect these logs to muliple endpoints (including multiple LI instances).
|
83
139
|
# Or one may chose to tag their specific logs and add their own config to capture those tagged logs and redirect
|
84
|
-
# them to appropriate endpoint.
|
140
|
+
# them to appropriate endpoint. That specific config needs to preceed this generic one.
|
85
141
|
<match **>
|
86
142
|
@type copy
|
87
143
|
<store>
|
88
144
|
@type vmware_loginsight
|
145
|
+
@id out_vmw_li_all_container_logs
|
89
146
|
scheme https
|
90
147
|
ssl_verify true
|
91
148
|
# Loginsight host: One may use IP address or cname
|
92
|
-
#
|
93
|
-
host
|
94
|
-
port
|
95
|
-
path api/v1/events/ingest
|
149
|
+
#host X.X.X.X
|
150
|
+
host MY_LOGINSIGHT_HOST
|
151
|
+
port 9543
|
96
152
|
agent_id XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
tag_key tag
|
153
|
+
# Keys from log event whose values should be added as log message/text to
|
154
|
+
# Loginsight. Note these key/value pairs won't be added as metadata/fields
|
155
|
+
log_text_keys ["log","msg","message"]
|
156
|
+
# Use this flag if you want to enable http debug logs
|
157
|
+
http_conn_debug false
|
103
158
|
</store>
|
104
|
-
#
|
105
|
-
#
|
106
|
-
#
|
107
|
-
#
|
159
|
+
# copy plugin supports sending/copying logs to multiple plugins
|
160
|
+
# One may choose to send them to multiple LIs
|
161
|
+
# Or one may want send a copy to stdout for debugging
|
162
|
+
# Please note, if you use stdout along with LI, catch the logger's log to make
|
163
|
+
# sure they're not cyclic
|
164
|
+
#<store>
|
165
|
+
# @type stdout
|
166
|
+
#</store>
|
108
167
|
</match>
|
109
168
|
|
110
169
|
|
111
|
-
extra.conf: |
|
112
|
-
# If we want to transform events we could use:
|
113
|
-
#<filter **>
|
114
|
-
# @type record_transformer
|
115
|
-
# enable_ruby
|
116
|
-
# auto_typecast
|
117
|
-
# <record>
|
118
|
-
# hostname "#{Socket.gethostname}"
|
119
|
-
# mykey ${["message"=>record.to_json]}
|
120
|
-
# </record>
|
121
|
-
#</filter>
|
122
|
-
|
123
|
-
|
124
170
|
---
|
125
171
|
kind: DaemonSet
|
126
172
|
apiVersion: extensions/v1beta1
|
@@ -131,8 +177,21 @@ metadata:
|
|
131
177
|
app: "log-collector"
|
132
178
|
version: v1
|
133
179
|
spec:
|
180
|
+
selector:
|
181
|
+
matchLabels:
|
182
|
+
app: "log-collector"
|
183
|
+
revisionHistoryLimit: 3
|
184
|
+
minReadySeconds: 10
|
185
|
+
updateStrategy:
|
186
|
+
type: RollingUpdate
|
187
|
+
rollingUpdate:
|
188
|
+
# How many pods can be unavailable during the rolling update.
|
189
|
+
maxUnavailable: 3
|
134
190
|
template:
|
135
191
|
metadata:
|
192
|
+
annotations:
|
193
|
+
# One may use this annotation to trigger rollout whenever fluentd config changes
|
194
|
+
configHash: GENERATED_HASH
|
136
195
|
labels:
|
137
196
|
app: "log-collector"
|
138
197
|
version: v1
|
@@ -14,7 +14,7 @@ $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
|
|
14
14
|
|
15
15
|
Gem::Specification.new do |spec|
|
16
16
|
spec.name = "fluent-plugin-vmware-loginsight"
|
17
|
-
spec.version = "
|
17
|
+
spec.version = File.read("VERSION").strip
|
18
18
|
spec.authors = ["Vishal Mohite", "Chris Todd"]
|
19
19
|
spec.email = ["vmohite@vmware.com", "toddc@vmware.com"]
|
20
20
|
|
@@ -9,313 +9,314 @@
|
|
9
9
|
# SPDX-License-Identifier: MIT
|
10
10
|
|
11
11
|
|
12
|
-
require
|
12
|
+
require 'fluent/plugin/output'
|
13
13
|
require 'json'
|
14
14
|
require 'net/http'
|
15
15
|
require 'uri'
|
16
16
|
|
17
|
-
module Fluent
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
17
|
+
module Fluent::Plugin
|
18
|
+
class VmwareLoginsightOutput < Output
|
19
|
+
Fluent::Plugin.register_output('vmware_loginsight', self)
|
20
|
+
|
21
|
+
### Connection Params ###
|
22
|
+
config_param :scheme, :string, :default => 'http'
|
23
|
+
# Loginsight Host ex. localhost
|
24
|
+
config_param :host, :string, :default => 'localhost'
|
25
|
+
# In case we want to post to multiple hosts. This is futuristic, Fluentd copy plugin can support this as is
|
26
|
+
#config_param :hosts, :string, :default => nil
|
27
|
+
# Loginsight port ex. 9000. Default 80
|
28
|
+
config_param :port, :integer, :default => 80
|
29
|
+
# Loginsight ingestion api path ex. 'api/v1/events/ingest'
|
30
|
+
config_param :path, :string, :default => 'api/v1/events/ingest'
|
31
|
+
# agent_id generated by your LI
|
32
|
+
config_param :agent_id, :string, :default => '0'
|
33
|
+
# Credentials if used
|
34
|
+
config_param :username, :string, :default => nil
|
35
|
+
config_param :password, :string, :default => nil, :secret => true
|
36
|
+
# Authentication nil | 'basic'
|
37
|
+
config_param :authentication, :string, :default => nil
|
38
|
+
|
39
|
+
# Set Net::HTTP.verify_mode to `OpenSSL::SSL::VERIFY_NONE`
|
40
|
+
config_param :ssl_verify, :bool, :default => true
|
41
|
+
config_param :ca_file, :string, :default => nil
|
42
|
+
|
43
|
+
### API Params ###
|
44
|
+
# HTTP method
|
45
|
+
# post | put
|
46
|
+
config_param :http_method, :string, :default => :post
|
47
|
+
# form | json
|
48
|
+
config_param :serializer, :string, :default => :json
|
49
|
+
config_param :request_retries, :integer, :default => 3
|
50
|
+
config_param :request_timeout, :time, :default => 5
|
51
|
+
config_param :http_conn_debug, :bool, :default => false
|
52
|
+
config_param :max_batch_size, :integer, :default => 512000
|
53
|
+
|
54
|
+
# Simple rate limiting: ignore any records within `rate_limit_msec`
|
55
|
+
# since the last one.
|
56
|
+
config_param :rate_limit_msec, :integer, :default => 0
|
57
|
+
# Raise errors that were rescued during HTTP requests?
|
58
|
+
config_param :raise_on_error, :bool, :default => false
|
59
|
+
### Additional Params
|
60
|
+
config_param :include_tag_key, :bool, :default => true
|
61
|
+
# Metadata key that identifies Fluentd tags
|
62
|
+
config_param :tag_key, :string, :default => 'tag'
|
63
|
+
# Keys from log event whose values should be added as log message/text
|
64
|
+
# to loginsight. Note these key/value pairs won't be added as metadata/fields
|
65
|
+
config_param :log_text_keys, :array, default: ["log", "message", "msg"], value_type: :string
|
66
|
+
# Flatten hashes to create one key/val pair w/o losing log data
|
67
|
+
config_param :flatten_hashes, :bool, :default => true
|
68
|
+
# Seperator to use for joining flattened keys
|
69
|
+
config_param :flatten_hashes_separator, :string, :default => "_"
|
70
|
+
|
71
|
+
# Keys from log event to rewrite
|
72
|
+
# for instance from 'kubernetes_namespace' to 'k8s_namespace'
|
73
|
+
# tags will be rewritten with substring substitution
|
74
|
+
# and applied in the order present in the hash
|
75
|
+
# (Hashes enumerate their values in the order that the
|
76
|
+
# corresponding keys were inserted
|
77
|
+
# see https://ruby-doc.org/core-2.2.2/Hash.html)
|
78
|
+
# example config:
|
79
|
+
# shorten_keys {
|
80
|
+
# "__":"_",
|
81
|
+
# "container_":"",
|
82
|
+
# "kubernetes_":"k8s_",
|
83
|
+
# "labels_":"",
|
84
|
+
# }
|
85
|
+
config_param :shorten_keys, :hash, value_type: :string, default:
|
86
|
+
{
|
87
|
+
'kubernetes_':'k8s_',
|
88
|
+
'namespace':'ns',
|
89
|
+
'labels_':'',
|
90
|
+
'_name':'',
|
91
|
+
'_hash':'',
|
92
|
+
'container_':''
|
93
|
+
}
|
94
|
+
|
95
|
+
def configure(conf)
|
96
|
+
super
|
97
|
+
|
98
|
+
@ssl_verify_mode = @ssl_verify ? OpenSSL::SSL::VERIFY_PEER : OpenSSL::SSL::VERIFY_NONE
|
99
|
+
@auth = case @authentication
|
100
|
+
when 'basic'
|
101
|
+
:basic
|
102
|
+
else
|
103
|
+
:none
|
104
|
+
end
|
92
105
|
|
93
|
-
|
94
|
-
|
95
|
-
end
|
106
|
+
@last_request_time = nil
|
107
|
+
end
|
96
108
|
|
97
|
-
|
98
|
-
|
99
|
-
|
109
|
+
def format_url()
|
110
|
+
url = "#{@scheme}://#{host}:#{port}/#{path}/#{agent_id}"
|
111
|
+
url
|
112
|
+
end
|
100
113
|
|
101
|
-
|
102
|
-
|
103
|
-
|
114
|
+
def set_header(req)
|
115
|
+
if @serializer == 'json'
|
116
|
+
set_json_header(req)
|
104
117
|
end
|
118
|
+
req
|
119
|
+
end
|
105
120
|
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
req
|
111
|
-
end
|
121
|
+
def set_json_header(req)
|
122
|
+
req['Content-Type'] = 'application/json'
|
123
|
+
req
|
124
|
+
end
|
112
125
|
|
113
|
-
|
114
|
-
|
115
|
-
|
126
|
+
def shorten_key(key)
|
127
|
+
# LI doesn't allow some characters in field 'name'
|
128
|
+
# like '/', '-', '\', '.', etc. so replace them with @flatten_hashes_separator
|
129
|
+
key = key.gsub(/[\/\.\-\\\@]/,@flatten_hashes_separator).downcase
|
130
|
+
# shorten field names using provided shorten_keys parameters
|
131
|
+
@shorten_keys.each do | match, replace |
|
132
|
+
key = key.gsub(match.to_s,replace)
|
116
133
|
end
|
134
|
+
key
|
135
|
+
end
|
117
136
|
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
key = key.gsub(/namespace/,'ns')
|
125
|
-
key = key.gsub(/labels_/,'')
|
126
|
-
key = key.gsub(/_name/,'')
|
127
|
-
key = key.gsub(/_hash/,'')
|
128
|
-
key = key.gsub(/container_/,'')
|
129
|
-
key
|
137
|
+
def create_loginsight_event(tag, time, record)
|
138
|
+
flattened_records = {}
|
139
|
+
if @flatten_hashes
|
140
|
+
flattened_records = flatten_record(record, [])
|
141
|
+
else
|
142
|
+
flattened_records = record
|
130
143
|
end
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
144
|
+
# tag can be immutable in some cases, use a copy.
|
145
|
+
flattened_records[@tag_key] = tag.dup if @include_tag_key
|
146
|
+
fields = []
|
147
|
+
keys = []
|
148
|
+
log = ''
|
149
|
+
flattened_records.each do |key, value|
|
150
|
+
begin
|
151
|
+
next if value.nil?
|
152
|
+
# LI doesn't support duplicate fields, make unique names by appending underscore
|
153
|
+
key = shorten_key(key)
|
154
|
+
while keys.include?(key)
|
155
|
+
key = key + '_'
|
156
|
+
end
|
157
|
+
keys.push(key)
|
158
|
+
key.force_encoding("utf-8")
|
159
|
+
# convert value to json string if its a hash and to string if not already a string
|
145
160
|
begin
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
# convert value to json string if its a hash and to string if not already a string
|
155
|
-
begin
|
156
|
-
value = value.to_json if value.is_a?(Hash)
|
157
|
-
value = value.to_s
|
158
|
-
value = value.frozen? ? value.dup : value # if value is immutable, use a copy.
|
159
|
-
value.force_encoding("utf-8")
|
160
|
-
rescue Exception=>e
|
161
|
-
$log.warn "force_encoding exception: " "#{e.class}, '#{e.message}', " \
|
162
|
-
"\n Request: #{key} #{record.to_json[1..1024]}"
|
163
|
-
value = "Exception during conversion: #{e.message}"
|
164
|
-
end
|
161
|
+
value = value.to_json if value.is_a?(Hash)
|
162
|
+
value = value.to_s
|
163
|
+
value = value.frozen? ? value.dup : value # if value is immutable, use a copy.
|
164
|
+
value.force_encoding("utf-8")
|
165
|
+
rescue Exception=>e
|
166
|
+
$log.warn "force_encoding exception: " "#{e.class}, '#{e.message}', " \
|
167
|
+
"\n Request: #{key} #{record.to_json[1..1024]}"
|
168
|
+
value = "Exception during conversion: #{e.message}"
|
165
169
|
end
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
170
|
+
end
|
171
|
+
if @log_text_keys.include?(key)
|
172
|
+
if log != "#{value}"
|
173
|
+
if log.empty?
|
174
|
+
log = "#{value}"
|
175
|
+
else
|
176
|
+
log += " #{value}"
|
173
177
|
end
|
174
|
-
else
|
175
|
-
# If there is time information available, update time for LI. LI ignores
|
176
|
-
# time if it is out of the error/adjusment window of 10 mins. in such
|
177
|
-
# cases we would still like to preserve time info, so add it as event.
|
178
|
-
# TODO Ignore the below block for now. Handle the case for time being in
|
179
|
-
# different formats than milliseconds
|
180
|
-
#if ['time', '_source_realtime_timestamp'].include?(key)
|
181
|
-
# time = value
|
182
|
-
#end
|
183
|
-
fields << {"name" => key, "content" => value}
|
184
178
|
end
|
179
|
+
else
|
180
|
+
# If there is time information available, update time for LI. LI ignores
|
181
|
+
# time if it is out of the error/adjusment window of 10 mins. in such
|
182
|
+
# cases we would still like to preserve time info, so add it as event.
|
183
|
+
# TODO Ignore the below block for now. Handle the case for time being in
|
184
|
+
# different formats than milliseconds
|
185
|
+
#if ['time', '_source_realtime_timestamp'].include?(key)
|
186
|
+
# time = value
|
187
|
+
#end
|
188
|
+
fields << {"name" => key, "content" => value}
|
185
189
|
end
|
186
|
-
event = {
|
187
|
-
"fields" => fields,
|
188
|
-
"text" => log.gsub(/^$\n/, ''),
|
189
|
-
"timestamp" => time * 1000
|
190
|
-
}
|
191
|
-
event
|
192
190
|
end
|
191
|
+
event = {
|
192
|
+
"fields" => fields,
|
193
|
+
"text" => log.gsub(/^$\n/, ''),
|
194
|
+
"timestamp" => time * 1000
|
195
|
+
}
|
196
|
+
event
|
197
|
+
end
|
193
198
|
|
194
|
-
|
195
|
-
|
199
|
+
def flatten_record(record, prefix=[])
|
200
|
+
ret = {}
|
196
201
|
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
end
|
202
|
+
case record
|
203
|
+
when Hash
|
204
|
+
record.each do |key, value|
|
205
|
+
if @log_text_keys.include?(key)
|
206
|
+
ret.merge!({key.to_s => value})
|
207
|
+
else
|
208
|
+
ret.merge! flatten_record(value, prefix + [key.to_s])
|
205
209
|
end
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
ret
|
210
|
+
end
|
211
|
+
when Array
|
212
|
+
record.each do |value|
|
213
|
+
ret.merge! flatten_record(value, prefix)
|
214
|
+
end
|
215
|
+
else
|
216
|
+
return {prefix.join(@flatten_hashes_separator) => record}
|
214
217
|
end
|
218
|
+
ret
|
219
|
+
end
|
215
220
|
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
221
|
+
def create_request(tag, time, record)
|
222
|
+
url = format_url()
|
223
|
+
uri = URI.parse(url)
|
224
|
+
req = Net::HTTP.const_get(@http_method.to_s.capitalize).new(uri.path)
|
225
|
+
set_body(req, tag, time, record)
|
226
|
+
set_header(req)
|
227
|
+
return req, uri
|
228
|
+
end
|
224
229
|
|
230
|
+
def send_request(req, uri)
|
231
|
+
is_rate_limited = (@rate_limit_msec != 0 and not @last_request_time.nil?)
|
232
|
+
if is_rate_limited and ((Time.now.to_f - @last_request_time) * 1000.0 < @rate_limit_msec)
|
233
|
+
$log.info('Dropped request due to rate limiting')
|
234
|
+
return
|
235
|
+
end
|
225
236
|
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
237
|
+
if @auth and @auth.to_s.eql? "basic"
|
238
|
+
req.basic_auth(@username, @password)
|
239
|
+
end
|
240
|
+
begin
|
241
|
+
retries ||= 2
|
242
|
+
response = nil
|
243
|
+
@last_request_time = Time.now.to_f
|
244
|
+
|
245
|
+
http_conn = Net::HTTP.new(uri.host, uri.port)
|
246
|
+
# For debugging, set this
|
247
|
+
http_conn.set_debug_output($stdout) if @http_conn_debug
|
248
|
+
http_conn.use_ssl = (uri.scheme == 'https')
|
249
|
+
if http_conn.use_ssl?
|
250
|
+
http_conn.ca_file = @ca_file
|
231
251
|
end
|
252
|
+
http_conn.verify_mode = @ssl_verify_mode
|
232
253
|
|
233
|
-
|
234
|
-
|
254
|
+
response = http_conn.start do |http|
|
255
|
+
http.read_timeout = @request_timeout
|
256
|
+
http.request(req)
|
235
257
|
end
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
258
|
+
rescue => e # rescue all StandardErrors
|
259
|
+
# server didn't respond
|
260
|
+
# Be careful while turning on below log, if LI instance can't be reached and you're sending
|
261
|
+
# log-container logs to LI as well, you may end up in a cycle.
|
262
|
+
# TODO handle the cyclic case at plugin level if possible.
|
263
|
+
# $log.warn "Net::HTTP.#{req.method.capitalize} raises exception: " \
|
264
|
+
# "#{e.class}, '#{e.message}', \n Request: #{req.body[1..1024]}"
|
265
|
+
retry unless (retries -= 1).zero?
|
266
|
+
raise e if @raise_on_error
|
267
|
+
else
|
268
|
+
unless response and response.is_a?(Net::HTTPSuccess)
|
269
|
+
res_summary = if response
|
270
|
+
"Response Code: #{response.code}\n"\
|
271
|
+
"Response Message: #{response.message}\n" \
|
272
|
+
"Response Body: #{response.body}"
|
273
|
+
else
|
274
|
+
"Response = nil"
|
275
|
+
end
|
276
|
+
# ditto cyclic warning
|
277
|
+
# $log.warn "Failed to #{req.method} #{uri}\n(#{res_summary})\n" \
|
278
|
+
# "Request Size: #{req.body.size} Request Body: #{req.body[1..1024]}"
|
279
|
+
end #end unless
|
280
|
+
end # end begin
|
281
|
+
end # end send_request
|
282
|
+
|
283
|
+
def send_events(uri, events)
|
284
|
+
req = Net::HTTP.const_get(@http_method.to_s.capitalize).new(uri.path)
|
285
|
+
event_req = {
|
286
|
+
"events" => events
|
287
|
+
}
|
288
|
+
req.body = event_req.to_json
|
289
|
+
set_header(req)
|
290
|
+
send_request(req, uri)
|
291
|
+
end
|
249
292
|
|
250
|
-
|
251
|
-
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
# "#{e.class}, '#{e.message}', \n Request: #{req.body[1..1024]}"
|
261
|
-
retry unless (retries -= 1).zero?
|
262
|
-
raise e if @raise_on_error
|
293
|
+
def handle_records(tag, es)
|
294
|
+
url = format_url()
|
295
|
+
uri = URI.parse(url)
|
296
|
+
events = []
|
297
|
+
count = 0
|
298
|
+
es.each do |time, record|
|
299
|
+
new_event = create_loginsight_event(tag, time, record)
|
300
|
+
new_event_size = new_event.to_json.size
|
301
|
+
if new_event_size > @max_batch_size
|
302
|
+
$log.warn "dropping event larger than max_batch_size: #{new_event.to_json[1..1024]}"
|
263
303
|
else
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
"Response Body: #{response.body}"
|
269
|
-
else
|
270
|
-
"Response = nil"
|
271
|
-
end
|
272
|
-
# ditto cyclic warning
|
273
|
-
# $log.warn "Failed to #{req.method} #{uri}\n(#{res_summary})\n" \
|
274
|
-
# "Request Size: #{req.body.size} Request Body: #{req.body[1..1024]}"
|
275
|
-
end #end unless
|
276
|
-
end # end begin
|
277
|
-
end # end send_request
|
278
|
-
|
279
|
-
def send_events(uri, events)
|
280
|
-
req = Net::HTTP.const_get(@http_method.to_s.capitalize).new(uri.path)
|
281
|
-
event_req = {
|
282
|
-
"events" => events
|
283
|
-
}
|
284
|
-
req.body = event_req.to_json
|
285
|
-
set_header(req)
|
286
|
-
send_request(req, uri)
|
287
|
-
end
|
288
|
-
|
289
|
-
def handle_records(tag, es)
|
290
|
-
url = format_url()
|
291
|
-
uri = URI.parse(url)
|
292
|
-
events = []
|
293
|
-
count = 0
|
294
|
-
es.each do |time, record|
|
295
|
-
new_event = create_loginsight_event(tag, time, record)
|
296
|
-
new_event_size = new_event.to_json.size
|
297
|
-
if new_event_size > @max_batch_size
|
298
|
-
$log.warn "dropping event larger than max_batch_size: #{new_event.to_json[1..1024]}"
|
299
|
-
else
|
300
|
-
if (count + new_event_size) > @max_batch_size
|
301
|
-
send_events(uri, events)
|
302
|
-
events = []
|
303
|
-
count = 0
|
304
|
-
end
|
305
|
-
count += new_event_size
|
306
|
-
events << new_event
|
304
|
+
if (count + new_event_size) > @max_batch_size
|
305
|
+
send_events(uri, events)
|
306
|
+
events = []
|
307
|
+
count = 0
|
307
308
|
end
|
308
|
-
|
309
|
-
|
310
|
-
send_events(uri, events)
|
309
|
+
count += new_event_size
|
310
|
+
events << new_event
|
311
311
|
end
|
312
312
|
end
|
313
|
-
|
314
|
-
|
315
|
-
handle_records(tag, es)
|
316
|
-
chain.next
|
313
|
+
if count > 0
|
314
|
+
send_events(uri, events)
|
317
315
|
end
|
318
316
|
end
|
317
|
+
|
318
|
+
def process(tag, es)
|
319
|
+
handle_records(tag, es)
|
320
|
+
end
|
319
321
|
end
|
320
322
|
end
|
321
|
-
|