fluent-plugin-containiq 0.0.7

Sign up to get free protection for your applications and to get access to all the features.
Files changed (40) hide show
  1. checksums.yaml +7 -0
  2. data/.github/workflows/main.yaml +22 -0
  3. data/.github/workflows/release.yaml +22 -0
  4. data/.gitignore +5 -0
  5. data/Dockerfile +58 -0
  6. data/Gemfile +3 -0
  7. data/LICENSE +202 -0
  8. data/README.md +67 -0
  9. data/Rakefile +14 -0
  10. data/fluent-plugin-containiq.gemspec +28 -0
  11. data/k8s/Gemfile +19 -0
  12. data/k8s/README.md +3 -0
  13. data/k8s/conf/fluent.conf +16 -0
  14. data/k8s/conf/kubernetes/cluster-autoscaler.conf +11 -0
  15. data/k8s/conf/kubernetes/containers.conf +10 -0
  16. data/k8s/conf/kubernetes/docker.conf +11 -0
  17. data/k8s/conf/kubernetes/etcd.conf +10 -0
  18. data/k8s/conf/kubernetes/glbc.conf +11 -0
  19. data/k8s/conf/kubernetes/kube-apiserver-audit.conf +23 -0
  20. data/k8s/conf/kubernetes/kube-apiserver.conf +11 -0
  21. data/k8s/conf/kubernetes/kube-controller-manager.conf +11 -0
  22. data/k8s/conf/kubernetes/kube-proxy.conf +11 -0
  23. data/k8s/conf/kubernetes/kube-scheduler.conf +11 -0
  24. data/k8s/conf/kubernetes/kubelet.conf +11 -0
  25. data/k8s/conf/kubernetes/rescheduler.conf +11 -0
  26. data/k8s/conf/kubernetes/salt.conf +13 -0
  27. data/k8s/conf/kubernetes/startupscript.conf +10 -0
  28. data/k8s/conf/kubernetes.conf +36 -0
  29. data/k8s/conf/prometheus.conf +13 -0
  30. data/k8s/conf/systemd.conf +43 -0
  31. data/k8s/conf/tail_container_parse.conf +4 -0
  32. data/k8s/entrypoint.sh +4 -0
  33. data/k8s/fluentd-daemonset.yaml +111 -0
  34. data/k8s/plugins/.gitkeep +0 -0
  35. data/k8s/plugins/parser_kubernetes.rb +68 -0
  36. data/k8s/plugins/parser_multiline_kubernetes.rb +69 -0
  37. data/lib/fluent/plugin/out_containiq.rb +173 -0
  38. data/test/helper.rb +8 -0
  39. data/test/plugin/test_out_containiq.rb +18 -0
  40. metadata +159 -0
@@ -0,0 +1,10 @@
1
+ <source>
2
+ @type tail
3
+ @id in_tail_startupscript
4
+ path /var/log/startupscript.log
5
+ pos_file /var/log/fluentd-startupscript.log.pos
6
+ tag startupscript
7
+ <parse>
8
+ @type syslog
9
+ </parse>
10
+ </source>
@@ -0,0 +1,36 @@
1
+ <label @FLUENT_LOG>
2
+ <match fluent.**>
3
+ @type null
4
+ @id ignore_fluent_logs
5
+ </match>
6
+ </label>
7
+
8
+ @include kubernetes/cluster-autoscaler.conf
9
+ @include kubernetes/containers.conf
10
+ @include kubernetes/docker.conf
11
+ @include kubernetes/etcd.conf
12
+ @include kubernetes/glbc.conf
13
+ @include kubernetes/kube-apiserver-audit.conf
14
+ @include kubernetes/kube-apiserver.conf
15
+ @include kubernetes/kube-controller-manager.conf
16
+ @include kubernetes/kube-proxy.conf
17
+ @include kubernetes/kube-scheduler.conf
18
+ @include kubernetes/kubelet.conf
19
+ @include kubernetes/rescheduler.conf
20
+ @include kubernetes/salt.conf
21
+ @include kubernetes/startupscript.conf
22
+
23
+
24
+ <filter kubernetes.**>
25
+ @type kubernetes_metadata
26
+ @id filter_kube_metadata
27
+ kubernetes_url "#{ENV['FLUENT_FILTER_KUBERNETES_URL'] || 'https://' + ENV.fetch('KUBERNETES_SERVICE_HOST') + ':' + ENV.fetch('KUBERNETES_SERVICE_PORT') + '/api'}"
28
+ verify_ssl "#{ENV['KUBERNETES_VERIFY_SSL'] || true}"
29
+ ca_file "#{ENV['KUBERNETES_CA_FILE']}"
30
+ skip_labels "#{ENV['FLUENT_KUBERNETES_METADATA_SKIP_LABELS'] || 'false'}"
31
+ skip_container_metadata "#{ENV['FLUENT_KUBERNETES_METADATA_SKIP_CONTAINER_METADATA'] || 'false'}"
32
+ skip_master_url "#{ENV['FLUENT_KUBERNETES_METADATA_SKIP_MASTER_URL'] || 'false'}"
33
+ skip_namespace_metadata "#{ENV['FLUENT_KUBERNETES_METADATA_SKIP_NAMESPACE_METADATA'] || 'false'}"
34
+ watch "#{ENV['FLUENT_KUBERNETES_WATCH'] || 'true'}"
35
+ </filter>
36
+
@@ -0,0 +1,13 @@
1
+ # Prometheus metric exposed on 0.0.0.0:24231/metrics
2
+ <source>
3
+ @type prometheus
4
+ @id in_prometheus
5
+ bind "#{ENV['FLUENTD_PROMETHEUS_BIND'] || '0.0.0.0'}"
6
+ port "#{ENV['FLUENTD_PROMETHEUS_PORT'] || '24231'}"
7
+ metrics_path "#{ENV['FLUENTD_PROMETHEUS_PATH'] || '/metrics'}"
8
+ </source>
9
+
10
+ <source>
11
+ @type prometheus_output_monitor
12
+ @id in_prometheus_output_monitor
13
+ </source>
@@ -0,0 +1,43 @@
1
+
2
+ # Logs from systemd-journal for interesting services.
3
+ <source>
4
+ @type systemd
5
+ @id in_systemd_kubelet
6
+ matches [{ "_SYSTEMD_UNIT": "kubelet.service" }]
7
+ <storage>
8
+ @type local
9
+ persistent true
10
+ path /var/log/fluentd-journald-kubelet-cursor.json
11
+ </storage>
12
+ read_from_head true
13
+ tag kubelet
14
+ </source>
15
+
16
+ # Logs from docker-systemd
17
+ <source>
18
+ @type systemd
19
+ @id in_systemd_docker
20
+ matches [{ "_SYSTEMD_UNIT": "docker.service" }]
21
+ <storage>
22
+ @type local
23
+ persistent true
24
+ path /var/log/fluentd-journald-docker-cursor.json
25
+ </storage>
26
+ read_from_head true
27
+ tag docker.systemd
28
+ </source>
29
+
30
+ # Logs from systemd-journal for interesting services.
31
+ <source>
32
+ @type systemd
33
+ @id in_systemd_bootkube
34
+ matches [{ "_SYSTEMD_UNIT": "bootkube.service" }]
35
+ <storage>
36
+ @type local
37
+ persistent true
38
+ path /var/log/fluentd-journald-bootkube-cursor.json
39
+ </storage>
40
+ read_from_head true
41
+ tag bootkube
42
+ </source>
43
+
@@ -0,0 +1,4 @@
1
+ <parse>
2
+ @type "#{ENV['FLUENT_CONTAINER_TAIL_PARSER_TYPE'] || 'json'}"
3
+ time_format "#{ENV['FLUENT_CONTAINER_TAIL_PARSER_TIME_FORMAT'] || '%Y-%m-%dT%H:%M:%S.%NZ'}"
4
+ </parse>
data/k8s/entrypoint.sh ADDED
@@ -0,0 +1,4 @@
1
+ #!/usr/bin/env sh
2
+
3
+ # run with -vv to show debugging messages
4
+ exec fluentd -vv -c /fluentd/etc/${FLUENTD_CONF} -p /fluentd/plugins --gemfile /fluentd/Gemfile ${FLUENTD_OPT}
@@ -0,0 +1,111 @@
1
+ apiVersion: v1
2
+ kind: Namespace
3
+ metadata:
4
+ name: containiq
5
+ ---
6
+ apiVersion: v1
7
+ kind: ServiceAccount
8
+ metadata:
9
+ name: fluentd
10
+ namespace: containiq
11
+ ---
12
+ apiVersion: rbac.authorization.k8s.io/v1
13
+ kind: ClusterRole
14
+ metadata:
15
+ name: fluentd
16
+ namespace: containiq
17
+ rules:
18
+ - apiGroups:
19
+ - ''
20
+ resources:
21
+ - pods
22
+ - namespaces
23
+ verbs:
24
+ - get
25
+ - list
26
+ - watch
27
+ ---
28
+ kind: ClusterRoleBinding
29
+ apiVersion: rbac.authorization.k8s.io/v1
30
+ metadata:
31
+ name: fluentd
32
+ roleRef:
33
+ kind: ClusterRole
34
+ name: fluentd
35
+ apiGroup: rbac.authorization.k8s.io
36
+ subjects:
37
+ - kind: ServiceAccount
38
+ name: fluentd
39
+ namespace: containiq
40
+ ---
41
+ apiVersion: apps/v1
42
+ kind: DaemonSet
43
+ metadata:
44
+ name: fluentd
45
+ namespace: containiq
46
+ labels:
47
+ name: fluentd
48
+ spec:
49
+ selector:
50
+ matchLabels:
51
+ name: fluentd
52
+ template:
53
+ metadata:
54
+ labels:
55
+ name: fluentd
56
+ spec:
57
+ serviceAccount: fluentd
58
+ serviceAccountName: fluentd
59
+ tolerations:
60
+ - key: node-role.kubernetes.io/master
61
+ effect: NoSchedule
62
+ containers:
63
+ - name: fluentd
64
+ imagePullPolicy: Always
65
+ image: containiq/logging-agent
66
+ env:
67
+ # https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter#environment-variables-for-kubernetes
68
+ - name: K8S_NODE_NAME
69
+ valueFrom:
70
+ fieldRef:
71
+ fieldPath: spec.nodeName
72
+ # TODO: should we set this here or another k8s resource?
73
+ - name: INGEST_LOGS_ENDPOINT_URL
74
+ value: https://localhost
75
+ - name: NOTIFICATION_FILE_LOCATION
76
+ value: /containiq/notification-config.yaml
77
+ - name: FLUENT_KUBERNETES_METADATA_SKIP_LABELS
78
+ value: 'true'
79
+ - name: FLUENT_KUBERNETES_METADATA_SKIP_CONTAINER_METADATA
80
+ value: 'true'
81
+ - name: FLUENT_KUBERNETES_METADATA_SKIP_NAMESPACE_METADATA
82
+ value: 'true'
83
+ - name: FLUENT_KUBERNETES_METADATA_SKIP_MASTER_URL
84
+ value: 'true'
85
+ resources:
86
+ limits:
87
+ memory: 200Mi
88
+ requests:
89
+ cpu: 100m
90
+ memory: 200Mi
91
+ volumeMounts:
92
+ - name: varlog
93
+ mountPath: /var/log
94
+ - name: varlibdockercontainers
95
+ mountPath: /var/lib/docker/containers
96
+ readOnly: true
97
+ - name: containiqconfig
98
+ mountPath: /containiq
99
+ volumes:
100
+ - name: varlog
101
+ hostPath:
102
+ path: /var/log
103
+ - name: varlibdockercontainers
104
+ hostPath:
105
+ path: /var/lib/docker/containers
106
+
107
+ - name: containiqconfig
108
+ projected:
109
+ sources:
110
+ - secret:
111
+ name: containiq-notification
File without changes
@@ -0,0 +1,68 @@
1
+ #
2
+ # Fluentd
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+
17
+ # The following Fluentd parser plugin, aims to simplify the parsing of multiline
18
+ # logs found in Kubernetes nodes. Since many log files shared the same format and
19
+ # in order to simplify the configuration, this plugin provides a 'kubernetes' format
20
+ # parser (built on top of MultilineParser).
21
+ #
22
+ # When tailing files, this 'kubernetes' format should be applied to the following
23
+ # log file sources:
24
+ #
25
+ # - /var/log/kubelet.log
26
+ # - /var/log/kube-proxy.log
27
+ # - /var/log/kube-apiserver.log
28
+ # - /var/log/kube-controller-manager.log
29
+ # - /var/log/kube-scheduler.log
30
+ # - /var/log/rescheduler.log
31
+ # - /var/log/glbc.log
32
+ # - /var/log/cluster-autoscaler.log
33
+ #
34
+ # Usage:
35
+ #
36
+ # ---- fluentd.conf ----
37
+ #
38
+ # <source>
39
+ # @type tail
40
+ # path ./kubelet.log
41
+ # read_from_head yes
42
+ # tag kubelet
43
+ # <parse>
44
+ # @type kubernetes
45
+ # </parse>
46
+ # </source>
47
+ #
48
+ # ---- EOF ---
49
+
50
+ require 'fluent/plugin/parser_regexp'
51
+
52
+ module Fluent
53
+ module Plugin
54
+ class KubernetesParser < RegexpParser
55
+ Fluent::Plugin.register_parser("kubernetes", self)
56
+
57
+ CONF_FORMAT_FIRSTLINE = %q{/^\w\d{4}/}
58
+ CONF_FORMAT1 = %q{/^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/m}
59
+ CONF_TIME_FORMAT = "%m%d %H:%M:%S.%N"
60
+
61
+ def configure(conf)
62
+ conf['expression'] = CONF_FORMAT1
63
+ conf['time_format'] = CONF_TIME_FORMAT
64
+ super
65
+ end
66
+ end
67
+ end
68
+ end
@@ -0,0 +1,69 @@
1
+ #
2
+ # Fluentd
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+
17
+ # The following Fluentd parser plugin, aims to simplify the parsing of multiline
18
+ # logs found in Kubernetes nodes. Since many log files shared the same format and
19
+ # in order to simplify the configuration, this plugin provides a 'kubernetes' format
20
+ # parser (built on top of MultilineParser).
21
+ #
22
+ # When tailing files, this 'kubernetes' format should be applied to the following
23
+ # log file sources:
24
+ #
25
+ # - /var/log/kubelet.log
26
+ # - /var/log/kube-proxy.log
27
+ # - /var/log/kube-apiserver.log
28
+ # - /var/log/kube-controller-manager.log
29
+ # - /var/log/kube-scheduler.log
30
+ # - /var/log/rescheduler.log
31
+ # - /var/log/glbc.log
32
+ # - /var/log/cluster-autoscaler.log
33
+ #
34
+ # Usage:
35
+ #
36
+ # ---- fluentd.conf ----
37
+ #
38
+ # <source>
39
+ # @type tail
40
+ # path ./kubelet.log
41
+ # read_from_head yes
42
+ # tag kubelet
43
+ # <parse>
44
+ # @type multiline_kubernetes
45
+ # </parse>
46
+ # </source>
47
+ #
48
+ # ---- EOF ---
49
+
50
+ require 'fluent/plugin/parser_multiline'
51
+
52
+ module Fluent
53
+ module Plugin
54
+ class MultilineKubernetesParser < MultilineParser
55
+ Fluent::Plugin.register_parser("multiline_kubernetes", self)
56
+
57
+ CONF_FORMAT_FIRSTLINE = %q{/^\w\d{4}/}
58
+ CONF_FORMAT1 = %q{/^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/}
59
+ CONF_TIME_FORMAT = "%m%d %H:%M:%S.%N"
60
+
61
+ def configure(conf)
62
+ conf['format_firstline'] = CONF_FORMAT_FIRSTLINE
63
+ conf['format1'] = CONF_FORMAT1
64
+ conf['time_format'] = CONF_TIME_FORMAT
65
+ super
66
+ end
67
+ end
68
+ end
69
+ end
@@ -0,0 +1,173 @@
1
+ require "fluent/plugin/output"
2
+
3
+ module Fluent::Plugin
4
+ class ContainiqOutput < Fluent::Plugin::Output
5
+ Fluent::Plugin.register_output("containiq", self)
6
+
7
+ config_param :bulk_limit, :integer, default: 1000000 # Logz.io has a 1MB limit and recommends leaving some overhead
8
+ config_param :bulk_limit_warning_limit, :integer, default: nil # If fluent warnings are sent to the output, truncating is necessary to prevent a recursion
9
+ config_param :http_idle_timeout, :integer, default: 5
10
+ config_param :gzip, :bool, default: false # False for backward compatibility
11
+
12
+ def start
13
+ super
14
+ require 'net/http/persistent'
15
+
16
+ endpoint_url = ENV["INGEST_LOGS_ENDPOINT_URL"]
17
+ raise 'missing environment variable: INGEST_LOGS_ENDPOINT_URL' if endpoint_url.nil?
18
+
19
+ @uri = URI endpoint_url
20
+ log.debug "ContainIQ URL #{endpoint_url}"
21
+
22
+ @http = Net::HTTP::Persistent.new name: 'fluent-plugin-containiq'
23
+
24
+ api_key = get_api_key()
25
+ @http.headers['Authorization'] = "Bearer #{api_key}"
26
+
27
+ @http.headers['Content-Type'] = 'text/plain'
28
+ if @gzip
29
+ @http.headers['Content-Encoding'] = 'gzip'
30
+ end
31
+ @http.idle_timeout = @http_idle_timeout
32
+ @http.socket_options << [Socket::SOL_SOCKET, Socket::SO_KEEPALIVE, 1]
33
+
34
+ log.debug "Started ContainIQ shipper.."
35
+ end
36
+
37
+ def shutdown
38
+ super
39
+ end
40
+
41
+ def formatted_to_msgpack_binary?
42
+ true
43
+ end
44
+
45
+ def multi_workers_ready?
46
+ true
47
+ end
48
+
49
+ def format(tag, time, record)
50
+ if time.is_a?(Fluent::EventTime)
51
+ sec_frac = time.to_f
52
+ else
53
+ sec_frac = time * 1.0
54
+ end
55
+ [tag, sec_frac, record].to_msgpack
56
+ end
57
+
58
+ def write(chunk)
59
+ encode_chunk(chunk) { |bulk_records, bulk_size|
60
+ send_bulk(bulk_records, bulk_size)
61
+ }
62
+ end
63
+
64
+ def encode_chunk(chunk)
65
+ records = []
66
+ bulk_size = 0
67
+ chunk.each { |tag, time, record|
68
+ record['timestamp'] ||= Time.at(time).iso8601(3)
69
+
70
+ begin
71
+ json_record = Yajl.dump(record)
72
+ record_size = json_record.size + (1 if !records.empty?).to_i # Accounting for trailing "\n"
73
+ rescue
74
+ log.error "Adding record #{record} to buffer failed. Exception: #{$!}"
75
+ next
76
+ end
77
+
78
+ if record_size > @bulk_limit
79
+ if @bulk_limit_warning_limit.is_a?(Integer)
80
+ log.warn "Record with size #{record_size} exceeds #{@bulk_limit} and can't be sent to ContainIQ. Record starts with (truncated at #{@bulk_limit_warning_limit} characters): #{json_record[0,@bulk_limit_warning_limit]}"
81
+ # Send the full message to debug facility
82
+ log.debug "Record with size #{record_size} exceeds #{@bulk_limit} and can't be sent to ContainIQ. Record is: #{json_record}"
83
+ else
84
+ log.warn "Record with size #{record_size} exceeds #{@bulk_limit} and can't be sent to ContainIQ. Record is: #{json_record}"
85
+ end
86
+ next
87
+ end
88
+ if bulk_size + record_size > @bulk_limit
89
+ yield(records, bulk_size)
90
+ records = []
91
+ bulk_size = 0
92
+ end
93
+ records.push(json_record)
94
+ bulk_size += record_size
95
+ }
96
+ if records
97
+ yield(records, bulk_size)
98
+ end
99
+ end
100
+
101
+ def send_bulk(bulk_records, bulk_size)
102
+ log.debug "Sending a bulk of #{bulk_records.size} records, size #{bulk_size}B to ContainIQ"
103
+
104
+ # Setting our request
105
+ post = Net::HTTP::Post.new @uri.request_uri
106
+
107
+ # TODO: not sure we need this; Logz.io included it with the following comment:
108
+ # Logz.io bulk http endpoint expecting log line with \n delimiter
109
+ post.body = bulk_records.join("\n")
110
+ if gzip
111
+ post.body = compress(post.body)
112
+ end
113
+
114
+ retry_count = 4 # How many times to resend failed bulks
115
+ sleep_interval = 2 # How long to sleep initially between retries
116
+
117
+ begin
118
+ retry_count.times do |counter|
119
+ should_retry = true
120
+ begin
121
+ response = @http.request @uri, post
122
+ if response.code != '200'
123
+ if response.code == '401'
124
+ log.error "You are not authorized with ContainIQ! Token OK? dropping logs..."
125
+ should_retry = false
126
+ elsif response.code == '400'
127
+ log.info "Got 400 code from ContainIQ. This means that some of your logs are too big, or badly formatted. Response: #{response.body}"
128
+ should_retry = false
129
+ else
130
+ log.warn "Got HTTP #{response.code} from ContainIQ, not giving up just yet (Try #{counter + 1}/#{retry_count})"
131
+ end
132
+ else
133
+ log.debug "Successfully sent bulk of #{bulk_records.size} records, size #{bulk_size}B to ContainIQ"
134
+ should_retry = false
135
+ end
136
+ rescue StandardError => e
137
+ log.warn "Error connecting to ContainIQ. Got exception: #{e} (Try #{counter + 1}/#{retry_count})"
138
+ end
139
+
140
+ if should_retry
141
+ if counter == retry_count - 1
142
+ log.error "Could not send your bulk after #{retry_count} tries Sorry! Your bulk is: #{post.body}"
143
+ break
144
+ end
145
+ sleep(sleep_interval)
146
+ sleep_interval *= 2
147
+ else
148
+ return
149
+ end
150
+ end
151
+ rescue Exception => e
152
+ log.error "Got unexpected exception! Here: #{e}"
153
+ end
154
+ end
155
+
156
+ def compress(string)
157
+ wio = StringIO.new("w")
158
+ w_gz = Zlib::GzipWriter.new(wio)
159
+ w_gz.write(string)
160
+ w_gz.close
161
+ wio.string
162
+ end
163
+
164
+ def get_api_key
165
+ file = ENV["NOTIFICATION_FILE_LOCATION"]
166
+ raise 'missing environment variable: NOTIFICATION_FILE_LOCATION' if file.nil?
167
+ fileFirstLine = File.open(file, &:readline)
168
+ scan = fileFirstLine.gsub("\n",'').scan(/key: (.+)/i)
169
+ raise 'unable to parse secret key' if scan.empty?
170
+ api_key = scan.first.first
171
+ end
172
+ end
173
+ end
data/test/helper.rb ADDED
@@ -0,0 +1,8 @@
1
+ $LOAD_PATH.unshift(File.expand_path("../../", __FILE__))
2
+ require "test-unit"
3
+ require "fluent/test"
4
+ require "fluent/test/driver/output"
5
+ require "fluent/test/helpers"
6
+
7
+ Test::Unit::TestCase.include(Fluent::Test::Helpers)
8
+ Test::Unit::TestCase.extend(Fluent::Test::Helpers)
@@ -0,0 +1,18 @@
1
+ require "helper"
2
+ require "fluent/plugin/out_containiq.rb"
3
+
4
+ class ContainiqOutputTest < Test::Unit::TestCase
5
+ setup do
6
+ Fluent::Test.setup
7
+ end
8
+
9
+ test "failure" do
10
+ flunk
11
+ end
12
+
13
+ private
14
+
15
+ def create_driver(conf)
16
+ Fluent::Test::Driver::Output.new(Fluent::Plugin::ContainiqOutput).configure(conf)
17
+ end
18
+ end