fluent-plugin-datadog 0.11.1 → 0.14.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.gitignore +0 -1
- data/README.md +47 -8
- data/fluent-plugin-datadog.gemspec +12 -5
- data/lib/fluent/plugin/out_datadog.rb +338 -124
- data/lib/fluent/plugin/version.rb +5 -0
- metadata +77 -14
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 8907f2de3502bd5d7e4ee138dba441f1d5d0197537b8a9b246d46038f12fa045
|
4
|
+
data.tar.gz: '09e4e69d440b126a68f54bc535c2c2bba8ab1b199e70e306fd0e08ae9fbc5a37'
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: f895096ecb110d7d9a269e882f02c5ba844a4b6593163dcfd82f81f570eb4a3979ffa2f0fedf398f02d22445651005fe6cd9b98172fa91e1ef49b5f55d8c9d11
|
7
|
+
data.tar.gz: ac780383e151377090900d904dc855a161c2c6abaff43bf7ac7b27140fecb1ee461e0cd4bdbcdbc39c9cdf28d569eede8a30634094e7516f0b60d899246846a8
|
data/.gitignore
CHANGED
data/README.md
CHANGED
@@ -1,11 +1,15 @@
|
|
1
1
|
# Fluentd output plugin for Datadog
|
2
2
|
|
3
|
-
|
4
|
-
streams logs directly to Datadog - so no need to use a log shipper
|
3
|
+
This output plugin allows sending logs directly from Fluentd to Datadog - so you don't have to use a separate log shipper
|
5
4
|
if you don't wan't to.
|
6
5
|
|
7
6
|
## Pre-requirements
|
8
7
|
|
8
|
+
| fluent-plugin-datadog | Fluentd | Ruby |
|
9
|
+
|:--------------------------|:-----------|:-------|
|
10
|
+
| \>= 0.12.0 | \>= v1 | \>= 2.4 |
|
11
|
+
| < 0.12.0 | \>= v0.12.0 | \>= 2.1 |
|
12
|
+
|
9
13
|
To add the plugin to your fluentd agent, use the following command:
|
10
14
|
|
11
15
|
gem install fluent-plugin-datadog
|
@@ -19,7 +23,7 @@ If you installed the td-agent instead
|
|
19
23
|
|
20
24
|
To match events and send them to Datadog, simply add the following code to your configuration file.
|
21
25
|
|
22
|
-
|
26
|
+
HTTP example:
|
23
27
|
|
24
28
|
```xml
|
25
29
|
# Match events tagged with "datadog.**" and
|
@@ -35,10 +39,21 @@ TCP example:
|
|
35
39
|
tag_key 'tag'
|
36
40
|
|
37
41
|
# Optional parameters
|
38
|
-
dd_source '<INTEGRATION_NAME>'
|
39
|
-
dd_tags '<KEY1:
|
42
|
+
dd_source '<INTEGRATION_NAME>'
|
43
|
+
dd_tags '<KEY1:VALUE1>,<KEY2:VALUE2>'
|
40
44
|
dd_sourcecategory '<MY_SOURCE_CATEGORY>'
|
41
45
|
|
46
|
+
# Optional http proxy
|
47
|
+
http_proxy 'http://my-proxy.example'
|
48
|
+
|
49
|
+
<buffer>
|
50
|
+
@type memory
|
51
|
+
flush_thread_count 4
|
52
|
+
flush_interval 3s
|
53
|
+
chunk_limit_size 5m
|
54
|
+
chunk_limit_records 500
|
55
|
+
</buffer>
|
56
|
+
|
42
57
|
</match>
|
43
58
|
```
|
44
59
|
|
@@ -72,15 +87,21 @@ As fluent-plugin-datadog is an output_buffer, you can set all output_buffer prop
|
|
72
87
|
| **tag_key** | Where to store the Fluentd tag. | "tag" |
|
73
88
|
| **timestamp_key** | Name of the attribute which will contain timestamp of the log event. If nil, timestamp attribute is not added. | "@timestamp" |
|
74
89
|
| **use_ssl** | If true, the agent initializes a secure connection to Datadog. In clear TCP otherwise. | true |
|
75
|
-
| **
|
90
|
+
| **no_ssl_validation** | Disable SSL validation (useful for proxy forwarding) | false |
|
91
|
+
| **ssl_port** | Port used to send logs over a SSL encrypted connection to Datadog. If use_http is disabled, use 10516 for the US region and 443 for the EU region. | 443 |
|
76
92
|
| **max_retries** | The number of retries before the output plugin stops. Set to -1 for unlimited retries | -1 |
|
93
|
+
| **max_backoff** | The maximum time waited between each retry in seconds | 30 |
|
94
|
+
| **use_http** | Enable HTTP forwarding. If you disable it, make sure to change the port to 10514 or ssl_port to 10516 | true |
|
95
|
+
| **use_compression** | Enable log compression for HTTP | true |
|
96
|
+
| **compression_level** | Set the log compression level for HTTP (1 to 9, 9 being the best ratio) | 6 |
|
77
97
|
| **dd_source** | This tells Datadog what integration it is | nil |
|
78
98
|
| **dd_sourcecategory** | Multiple value attribute. Can be used to refine the source attribute | nil |
|
79
99
|
| **dd_tags** | Custom tags with the following format "key1:value1, key2:value2" | nil |
|
80
100
|
| **dd_hostname** | Used by Datadog to identify the host submitting the logs. | `hostname -f` |
|
81
101
|
| **service** | Used by Datadog to correlate between logs, traces and metrics. | nil |
|
82
|
-
| **port** | Proxy port when logs are not directly forwarded to Datadog and ssl is not used |
|
83
|
-
| **host** | Proxy endpoint when logs are not directly forwarded to Datadog | intake.logs.datadoghq.com |
|
102
|
+
| **port** | Proxy port when logs are not directly forwarded to Datadog and ssl is not used | 80 |
|
103
|
+
| **host** | Proxy endpoint when logs are not directly forwarded to Datadog | http-intake.logs.datadoghq.com |
|
104
|
+
| **http_proxy** | HTTP proxy, only takes effect if HTTP forwarding is enabled (`use_http`). Defaults to `HTTP_PROXY`/`http_proxy` env vars. | nil |
|
84
105
|
|
85
106
|
### Docker and Kubernetes tags
|
86
107
|
|
@@ -105,6 +126,24 @@ Configuration example:
|
|
105
126
|
</filter>
|
106
127
|
```
|
107
128
|
|
129
|
+
### Encoding
|
130
|
+
|
131
|
+
Datadog's API expects log messages to be encoded in UTF-8.
|
132
|
+
If some of your logs are encoded with a different encoding, we recommend using the [`record_modifier` filter plugin](https://github.com/repeatedly/fluent-plugin-record-modifier#char_encoding)
|
133
|
+
to encode these logs to UTF-8.
|
134
|
+
|
135
|
+
Configuration example:
|
136
|
+
|
137
|
+
```
|
138
|
+
# Change encoding of logs tagged with "datadog.**"
|
139
|
+
<filter datadog.**>
|
140
|
+
@type record_modifier
|
141
|
+
|
142
|
+
# change the encoding from the '<SOURCE_ENCODING>' of your logs to 'utf-8'
|
143
|
+
char_encoding <SOURCE_ENCODING>:utf-8
|
144
|
+
</filter>
|
145
|
+
```
|
146
|
+
|
108
147
|
## Build
|
109
148
|
|
110
149
|
To build a new version of this plugin and push it to RubyGems:
|
@@ -7,21 +7,28 @@
|
|
7
7
|
lib = File.expand_path('../lib', __FILE__)
|
8
8
|
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
|
9
9
|
|
10
|
+
require "fluent/plugin/version.rb"
|
11
|
+
|
10
12
|
Gem::Specification.new do |spec|
|
11
13
|
spec.name = "fluent-plugin-datadog"
|
12
|
-
spec.version =
|
14
|
+
spec.version = DatadogFluentPlugin::VERSION
|
13
15
|
spec.authors = ["Datadog Solutions Team"]
|
14
16
|
spec.email = ["support@datadoghq.com"]
|
15
17
|
spec.summary = "Datadog output plugin for Fluent event collector"
|
16
18
|
spec.homepage = "http://datadoghq.com"
|
17
|
-
spec.license = "Apache
|
19
|
+
spec.license = "Apache-2.0"
|
18
20
|
|
19
|
-
spec.files = [".gitignore", "Gemfile", "LICENSE", "README.md", "Rakefile", "fluent-plugin-datadog.gemspec", "lib/fluent/plugin/out_datadog.rb"]
|
21
|
+
spec.files = [".gitignore", "Gemfile", "LICENSE", "README.md", "Rakefile", "fluent-plugin-datadog.gemspec", "lib/fluent/plugin/version.rb", "lib/fluent/plugin/out_datadog.rb"]
|
20
22
|
spec.executables = spec.files.grep(%r{^bin/}) { |f| File.basename(f) }
|
21
23
|
spec.test_files = spec.files.grep(%r{^(test|spec|features)/})
|
22
24
|
spec.require_paths = ["lib"]
|
23
25
|
|
24
|
-
spec.
|
25
|
-
spec.
|
26
|
+
spec.add_runtime_dependency "fluentd", [">= 1", "< 2"]
|
27
|
+
spec.add_runtime_dependency "net-http-persistent", '~> 3.1'
|
28
|
+
|
29
|
+
spec.add_development_dependency "bundler", "~> 2.1"
|
30
|
+
spec.add_development_dependency "test-unit", '~> 3.1'
|
31
|
+
spec.add_development_dependency "rake", "~> 12.0"
|
26
32
|
spec.add_development_dependency "yajl-ruby", "~> 1.2"
|
33
|
+
spec.add_development_dependency 'webmock', "~> 3.6.0"
|
27
34
|
end
|
@@ -3,51 +3,81 @@
|
|
3
3
|
# This product includes software developed at Datadog (https://www.datadoghq.com/).
|
4
4
|
# Copyright 2018 Datadog, Inc.
|
5
5
|
|
6
|
-
require
|
7
|
-
require
|
8
|
-
require
|
6
|
+
require "socket"
|
7
|
+
require "openssl"
|
8
|
+
require "yajl"
|
9
|
+
require "zlib"
|
10
|
+
require "fluent/plugin/output"
|
9
11
|
|
10
|
-
|
11
|
-
|
12
|
+
require_relative "version"
|
13
|
+
|
14
|
+
class Fluent::DatadogOutput < Fluent::Plugin::Output
|
15
|
+
class RetryableError < StandardError;
|
16
|
+
end
|
17
|
+
|
18
|
+
# Max limits for transport regardless of Fluentd buffer, respecting https://docs.datadoghq.com/api/?lang=bash#logs
|
19
|
+
DD_MAX_BATCH_LENGTH = 500
|
20
|
+
DD_MAX_BATCH_SIZE = 5000000
|
21
|
+
DD_TRUNCATION_SUFFIX = "...TRUNCATED..."
|
22
|
+
|
23
|
+
DD_DEFAULT_HTTP_ENDPOINT = "http-intake.logs.datadoghq.com"
|
24
|
+
DD_DEFAULT_TCP_ENDPOINT = "intake.logs.datadoghq.com"
|
25
|
+
|
26
|
+
helpers :compat_parameters
|
27
|
+
|
28
|
+
DEFAULT_BUFFER_TYPE = "memory"
|
12
29
|
|
13
30
|
# Register the plugin
|
14
31
|
Fluent::Plugin.register_output('datadog', self)
|
15
32
|
|
16
33
|
# Output settings
|
17
|
-
config_param :
|
18
|
-
config_param :
|
19
|
-
config_param :
|
20
|
-
config_param :
|
21
|
-
config_param :
|
22
|
-
config_param :
|
23
|
-
config_param :
|
24
|
-
config_param :
|
25
|
-
config_param :dd_hostname, :string, :default => nil
|
34
|
+
config_param :include_tag_key, :bool, :default => false
|
35
|
+
config_param :tag_key, :string, :default => 'tag'
|
36
|
+
config_param :timestamp_key, :string, :default => '@timestamp'
|
37
|
+
config_param :service, :string, :default => nil
|
38
|
+
config_param :dd_sourcecategory, :string, :default => nil
|
39
|
+
config_param :dd_source, :string, :default => nil
|
40
|
+
config_param :dd_tags, :string, :default => nil
|
41
|
+
config_param :dd_hostname, :string, :default => nil
|
26
42
|
|
27
43
|
# Connection settings
|
28
|
-
config_param :host,
|
29
|
-
config_param :use_ssl,
|
30
|
-
config_param :port,
|
31
|
-
config_param :ssl_port,
|
32
|
-
config_param :max_retries,
|
33
|
-
config_param :
|
44
|
+
config_param :host, :string, :default => DD_DEFAULT_HTTP_ENDPOINT
|
45
|
+
config_param :use_ssl, :bool, :default => true
|
46
|
+
config_param :port, :integer, :default => 80
|
47
|
+
config_param :ssl_port, :integer, :default => 443
|
48
|
+
config_param :max_retries, :integer, :default => -1
|
49
|
+
config_param :max_backoff, :integer, :default => 30
|
50
|
+
config_param :use_http, :bool, :default => true
|
51
|
+
config_param :use_compression, :bool, :default => true
|
52
|
+
config_param :compression_level, :integer, :default => 6
|
53
|
+
config_param :no_ssl_validation, :bool, :default => false
|
54
|
+
config_param :http_proxy, :string, :default => nil
|
55
|
+
config_param :force_v1_routes, :bool, :default => false
|
56
|
+
|
57
|
+
# Format settings
|
58
|
+
config_param :use_json, :bool, :default => true
|
34
59
|
|
35
60
|
# API Settings
|
36
|
-
config_param :api_key,
|
61
|
+
config_param :api_key, :string, secret: true
|
37
62
|
|
38
|
-
|
39
|
-
|
63
|
+
config_section :buffer do
|
64
|
+
config_set_default :@type, DEFAULT_BUFFER_TYPE
|
40
65
|
end
|
41
66
|
|
42
|
-
|
43
|
-
|
44
|
-
define_method("log") { $log }
|
67
|
+
def initialize
|
68
|
+
super
|
45
69
|
end
|
46
70
|
|
47
71
|
def configure(conf)
|
72
|
+
compat_parameters_convert(conf, :buffer)
|
48
73
|
super
|
49
74
|
return if @dd_hostname
|
50
75
|
|
76
|
+
if not @use_http and @host == DD_DEFAULT_HTTP_ENDPOINT
|
77
|
+
@host = DD_DEFAULT_TCP_ENDPOINT
|
78
|
+
end
|
79
|
+
|
80
|
+
# Set dd_hostname if not already set (can be set when using fluentd as aggregator)
|
51
81
|
@dd_hostname = %x[hostname -f 2> /dev/null].strip
|
52
82
|
@dd_hostname = Socket.gethostname if @dd_hostname.empty?
|
53
83
|
end
|
@@ -56,41 +86,22 @@ class Fluent::DatadogOutput < Fluent::BufferedOutput
|
|
56
86
|
true
|
57
87
|
end
|
58
88
|
|
59
|
-
def
|
60
|
-
|
61
|
-
context = OpenSSL::SSL::SSLContext.new
|
62
|
-
socket = TCPSocket.new @host, @ssl_port
|
63
|
-
ssl_client = OpenSSL::SSL::SSLSocket.new socket, context
|
64
|
-
ssl_client.connect
|
65
|
-
return ssl_client
|
66
|
-
else
|
67
|
-
return TCPSocket.new @host, @port
|
68
|
-
end
|
89
|
+
def formatted_to_msgpack_binary?
|
90
|
+
true
|
69
91
|
end
|
70
92
|
|
71
93
|
def start
|
72
94
|
super
|
73
|
-
@
|
74
|
-
@running = true
|
75
|
-
|
76
|
-
if @tcp_ping_rate > 0
|
77
|
-
@timer = Thread.new do
|
78
|
-
while @running do
|
79
|
-
messages = Array.new
|
80
|
-
messages.push("fp\n")
|
81
|
-
send_to_datadog(messages)
|
82
|
-
sleep(@tcp_ping_rate)
|
83
|
-
end
|
84
|
-
end
|
85
|
-
end
|
95
|
+
@client = new_client(log, @api_key, @use_http, @use_ssl, @no_ssl_validation, @host, @ssl_port, @port, @http_proxy, @use_compression, @force_v1_routes)
|
86
96
|
end
|
87
97
|
|
88
98
|
def shutdown
|
89
99
|
super
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
100
|
+
end
|
101
|
+
|
102
|
+
def terminate
|
103
|
+
super
|
104
|
+
@client.close if @client
|
94
105
|
end
|
95
106
|
|
96
107
|
# This method is called when an event reaches Fluentd.
|
@@ -98,92 +109,296 @@ class Fluent::DatadogOutput < Fluent::BufferedOutput
|
|
98
109
|
# When Fluent::EventTime is msgpack'ed it gets converted to int with seconds
|
99
110
|
# precision only. We explicitly convert it to floating point number, which
|
100
111
|
# is compatible with Time.at below.
|
101
|
-
|
112
|
+
record = enrich_record(tag, time.to_f, record)
|
113
|
+
if @use_http
|
114
|
+
record = Yajl.dump(record)
|
115
|
+
else
|
116
|
+
if @use_json
|
117
|
+
record = "#{api_key} #{Yajl.dump(record)}"
|
118
|
+
else
|
119
|
+
record = "#{api_key} #{record}"
|
120
|
+
end
|
121
|
+
end
|
122
|
+
[record].to_msgpack
|
102
123
|
end
|
103
124
|
|
125
|
+
|
104
126
|
# NOTE! This method is called by internal thread, not Fluentd's main thread.
|
105
127
|
# 'chunk' is a buffer chunk that includes multiple formatted events.
|
106
128
|
def write(chunk)
|
107
|
-
|
129
|
+
begin
|
130
|
+
if @use_http
|
131
|
+
events = Array.new
|
132
|
+
chunk.msgpack_each do |record|
|
133
|
+
next if record.empty?
|
134
|
+
events.push record[0]
|
135
|
+
end
|
136
|
+
process_http_events(events, @use_compression, @compression_level, @max_retries, @max_backoff, DD_MAX_BATCH_LENGTH, DD_MAX_BATCH_SIZE)
|
137
|
+
else
|
138
|
+
chunk.msgpack_each do |record|
|
139
|
+
next if record.empty?
|
140
|
+
process_tcp_event(record[0], @max_retries, @max_backoff, DD_MAX_BATCH_SIZE)
|
141
|
+
end
|
142
|
+
end
|
143
|
+
rescue Exception => e
|
144
|
+
@logger.error("Uncaught processing exception in datadog forwarder #{e.message}")
|
145
|
+
end
|
146
|
+
end
|
147
|
+
|
148
|
+
# Process and send a set of http events. Potentially break down this set of http events in smaller batches
|
149
|
+
def process_http_events(events, use_compression, compression_level, max_retries, max_backoff, max_batch_length, max_batch_size)
|
150
|
+
batches = batch_http_events(events, max_batch_length, max_batch_size)
|
151
|
+
batches.each do |batched_event|
|
152
|
+
formatted_events = format_http_event_batch(batched_event)
|
153
|
+
if use_compression
|
154
|
+
formatted_events = gzip_compress(formatted_events, compression_level)
|
155
|
+
end
|
156
|
+
@client.send_retries(formatted_events, max_retries, max_backoff)
|
157
|
+
end
|
158
|
+
end
|
108
159
|
|
109
|
-
|
110
|
-
|
111
|
-
|
160
|
+
# Process and send a single tcp event
|
161
|
+
def process_tcp_event(event, max_retries, max_backoff, max_batch_size)
|
162
|
+
if event.bytesize > max_batch_size
|
163
|
+
event = truncate(event, max_batch_size)
|
164
|
+
end
|
165
|
+
@client.send_retries(event, max_retries, max_backoff)
|
166
|
+
end
|
112
167
|
|
113
|
-
|
114
|
-
|
168
|
+
# Group HTTP events in batches
|
169
|
+
def batch_http_events(encoded_events, max_batch_length, max_request_size)
|
170
|
+
batches = []
|
171
|
+
current_batch = []
|
172
|
+
current_batch_size = 0
|
173
|
+
encoded_events.each_with_index do |encoded_event, i|
|
174
|
+
current_event_size = encoded_event.bytesize
|
175
|
+
# If this unique log size is bigger than the request size, truncate it
|
176
|
+
if current_event_size > max_request_size
|
177
|
+
encoded_event = truncate(encoded_event, max_request_size)
|
178
|
+
current_event_size = encoded_event.bytesize
|
115
179
|
end
|
116
|
-
|
117
|
-
|
180
|
+
|
181
|
+
if (i > 0 and i % max_batch_length == 0) or (current_batch_size + current_event_size > max_request_size)
|
182
|
+
batches << current_batch
|
183
|
+
current_batch = []
|
184
|
+
current_batch_size = 0
|
118
185
|
end
|
119
|
-
|
120
|
-
|
186
|
+
|
187
|
+
current_batch_size += encoded_event.bytesize
|
188
|
+
current_batch << encoded_event
|
189
|
+
end
|
190
|
+
batches << current_batch
|
191
|
+
batches
|
192
|
+
end
|
193
|
+
|
194
|
+
# Truncate events over the provided max length, appending a marker when truncated
|
195
|
+
def truncate(event, max_length)
|
196
|
+
if event.length > max_length
|
197
|
+
event = event[0..max_length - 1]
|
198
|
+
event[max(0, max_length - DD_TRUNCATION_SUFFIX.length)..max_length - 1] = DD_TRUNCATION_SUFFIX
|
199
|
+
return event
|
200
|
+
end
|
201
|
+
event
|
202
|
+
end
|
203
|
+
|
204
|
+
def max(a, b)
|
205
|
+
a > b ? a : b
|
206
|
+
end
|
207
|
+
|
208
|
+
# Format batch of http events
|
209
|
+
def format_http_event_batch(events)
|
210
|
+
"[#{events.join(',')}]"
|
211
|
+
end
|
212
|
+
|
213
|
+
# Enrich records with metadata such as service, tags or source
|
214
|
+
def enrich_record(tag, time, record)
|
215
|
+
if @dd_sourcecategory
|
216
|
+
record["ddsourcecategory"] ||= @dd_sourcecategory
|
217
|
+
end
|
218
|
+
if @dd_source
|
219
|
+
record["ddsource"] ||= @dd_source
|
220
|
+
end
|
221
|
+
if @dd_tags
|
222
|
+
record["ddtags"] ||= @dd_tags
|
223
|
+
end
|
224
|
+
if @service
|
225
|
+
record["service"] ||= @service
|
226
|
+
end
|
227
|
+
if @dd_hostname
|
228
|
+
# set the record hostname to the configured dd_hostname only
|
229
|
+
# if the record hostname is empty, ensuring having a hostname set
|
230
|
+
# even if the record doesn't contain any.
|
231
|
+
record["hostname"] ||= @dd_hostname
|
232
|
+
end
|
233
|
+
|
234
|
+
if @include_tag_key
|
235
|
+
record[@tag_key] = tag
|
236
|
+
end
|
237
|
+
# If @timestamp_key already exists, we don't overwrite it.
|
238
|
+
if @timestamp_key and record[@timestamp_key].nil? and time
|
239
|
+
record[@timestamp_key] = Time.at(time).utc.iso8601(3)
|
240
|
+
end
|
241
|
+
|
242
|
+
container_tags = get_container_tags(record)
|
243
|
+
unless container_tags.empty?
|
244
|
+
if record["ddtags"].nil? || record["ddtags"].empty?
|
245
|
+
record["ddtags"] = container_tags
|
246
|
+
else
|
247
|
+
record["ddtags"] = record["ddtags"] + "," + container_tags
|
121
248
|
end
|
122
|
-
|
123
|
-
|
249
|
+
end
|
250
|
+
record
|
251
|
+
end
|
252
|
+
|
253
|
+
# Compress logs with GZIP
|
254
|
+
def gzip_compress(payload, compression_level)
|
255
|
+
gz = StringIO.new
|
256
|
+
gz.set_encoding("BINARY")
|
257
|
+
z = Zlib::GzipWriter.new(gz, compression_level)
|
258
|
+
begin
|
259
|
+
z.write(payload)
|
260
|
+
ensure
|
261
|
+
z.close
|
262
|
+
end
|
263
|
+
gz.string
|
264
|
+
end
|
265
|
+
|
266
|
+
# Build a new transport client
|
267
|
+
def new_client(logger, api_key, use_http, use_ssl, no_ssl_validation, host, ssl_port, port, http_proxy, use_compression, force_v1_routes)
|
268
|
+
if use_http
|
269
|
+
DatadogHTTPClient.new logger, use_ssl, no_ssl_validation, host, ssl_port, port, http_proxy, use_compression, api_key, force_v1_routes
|
270
|
+
else
|
271
|
+
DatadogTCPClient.new logger, use_ssl, no_ssl_validation, host, ssl_port, port
|
272
|
+
end
|
273
|
+
end
|
274
|
+
|
275
|
+
# Top level class for datadog transport clients, managing retries and backoff
|
276
|
+
class DatadogClient
|
277
|
+
def send_retries(payload, max_retries, max_backoff)
|
278
|
+
backoff = 1
|
279
|
+
retries = 0
|
280
|
+
begin
|
281
|
+
send(payload)
|
282
|
+
rescue RetryableError => e
|
283
|
+
if retries < max_retries || max_retries < 0
|
284
|
+
@logger.warn("Retrying ", :exception => e, :backtrace => e.backtrace)
|
285
|
+
sleep backoff
|
286
|
+
backoff = 2 * backoff unless backoff > max_backoff
|
287
|
+
retries += 1
|
288
|
+
retry
|
289
|
+
end
|
124
290
|
end
|
125
|
-
|
126
|
-
|
291
|
+
end
|
292
|
+
|
293
|
+
def send(payload)
|
294
|
+
raise NotImplementedError, "Datadog transport client should implement the send method"
|
295
|
+
end
|
296
|
+
|
297
|
+
def close
|
298
|
+
raise NotImplementedError, "Datadog transport client should implement the close method"
|
299
|
+
end
|
300
|
+
end
|
301
|
+
|
302
|
+
# HTTP datadog client
|
303
|
+
class DatadogHTTPClient < DatadogClient
|
304
|
+
require 'net/http'
|
305
|
+
require 'net/http/persistent'
|
306
|
+
|
307
|
+
def initialize(logger, use_ssl, no_ssl_validation, host, ssl_port, port, http_proxy, use_compression, api_key, force_v1_routes = false)
|
308
|
+
@logger = logger
|
309
|
+
protocol = use_ssl ? "https" : "http"
|
310
|
+
port = use_ssl ? ssl_port : port
|
311
|
+
if force_v1_routes
|
312
|
+
@uri = URI("#{protocol}://#{host}:#{port.to_s}/v1/input/#{api_key}")
|
313
|
+
else
|
314
|
+
@uri = URI("#{protocol}://#{host}:#{port.to_s}/api/v2/logs")
|
315
|
+
end
|
316
|
+
proxy_uri = :ENV
|
317
|
+
if http_proxy
|
318
|
+
proxy_uri = URI.parse(http_proxy)
|
319
|
+
elsif ENV['HTTP_PROXY'] || ENV['http_proxy']
|
320
|
+
logger.info("Using HTTP proxy defined in `HTTP_PROXY`/`http_proxy` env vars")
|
321
|
+
end
|
322
|
+
logger.info("Starting HTTP connection to #{protocol}://#{host}:#{port.to_s} with compression " + (use_compression ? "enabled" : "disabled") + (force_v1_routes ? " using v1 routes" : " using v2 routes"))
|
323
|
+
@client = Net::HTTP::Persistent.new name: "fluent-plugin-datadog-logcollector", proxy: proxy_uri
|
324
|
+
@client.verify_mode = OpenSSL::SSL::VERIFY_NONE if no_ssl_validation
|
325
|
+
unless force_v1_routes
|
326
|
+
@client.override_headers["DD-API-KEY"] = api_key
|
327
|
+
@client.override_headers["DD-EVP-ORIGIN"] = "fluent"
|
328
|
+
@client.override_headers["DD-EVP-ORIGIN-VERSION"] = DatadogFluentPlugin::VERSION
|
127
329
|
end
|
330
|
+
@client.override_headers["Content-Type"] = "application/json"
|
331
|
+
if use_compression
|
332
|
+
@client.override_headers["Content-Encoding"] = "gzip"
|
333
|
+
end
|
334
|
+
if !@client.proxy_uri.nil?
|
335
|
+
# Log the proxy settings as resolved by the HTTP client
|
336
|
+
logger.info("Using HTTP proxy #{@client.proxy_uri.scheme}://#{@client.proxy_uri.host}:#{@client.proxy_uri.port} username: #{@client.proxy_uri.user ? "set" : "unset"}, password: #{@client.proxy_uri.password ? "set" : "unset"}")
|
337
|
+
end
|
338
|
+
end
|
128
339
|
|
129
|
-
|
130
|
-
|
340
|
+
def send(payload)
|
341
|
+
request = Net::HTTP::Post.new @uri.request_uri
|
342
|
+
request.body = payload
|
343
|
+
response = @client.request @uri, request
|
344
|
+
res_code = response.code.to_i
|
345
|
+
# on a backend error or on an http 429, retry with backoff
|
346
|
+
if res_code >= 500 || res_code == 429
|
347
|
+
raise RetryableError.new "Unable to send payload: #{res_code} #{response.message}"
|
131
348
|
end
|
132
|
-
|
133
|
-
|
134
|
-
record[@timestamp_key] = Time.at(time).utc.iso8601(3)
|
349
|
+
if res_code >= 400
|
350
|
+
@logger.error("Unable to send payload due to client error: #{res_code} #{response.message}")
|
135
351
|
end
|
352
|
+
end
|
136
353
|
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
354
|
+
def close
|
355
|
+
@client.shutdown
|
356
|
+
end
|
357
|
+
end
|
358
|
+
|
359
|
+
# TCP Datadog client
|
360
|
+
class DatadogTCPClient < DatadogClient
|
361
|
+
require "socket"
|
362
|
+
|
363
|
+
def initialize(logger, use_ssl, no_ssl_validation, host, ssl_port, port)
|
364
|
+
@logger = logger
|
365
|
+
@use_ssl = use_ssl
|
366
|
+
@no_ssl_validation = no_ssl_validation
|
367
|
+
@host = host
|
368
|
+
@port = use_ssl ? ssl_port : port
|
369
|
+
end
|
370
|
+
|
371
|
+
def connect
|
372
|
+
if @use_ssl
|
373
|
+
@logger.info("Starting SSL connection #{@host} #{@port}")
|
374
|
+
socket = TCPSocket.new @host, @port
|
375
|
+
ssl_context = OpenSSL::SSL::SSLContext.new
|
376
|
+
if @no_ssl_validation
|
377
|
+
ssl_context.set_params({:verify_mode => OpenSSL::SSL::VERIFY_NONE})
|
143
378
|
end
|
379
|
+
ssl_context = OpenSSL::SSL::SSLSocket.new socket, ssl_context
|
380
|
+
ssl_context.connect
|
381
|
+
ssl_context
|
382
|
+
else
|
383
|
+
@logger.info("Starting plaintext connection #{@host} #{@port}")
|
384
|
+
TCPSocket.new @host, @port
|
144
385
|
end
|
386
|
+
end
|
145
387
|
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
end
|
155
|
-
|
156
|
-
def send_to_datadog(events)
|
157
|
-
@my_mutex.synchronize do
|
158
|
-
events.each do |event|
|
159
|
-
log.trace "Datadog plugin: about to send event=#{event}"
|
160
|
-
retries = 0
|
161
|
-
begin
|
162
|
-
log.info "New attempt to Datadog attempt=#{retries}" if retries > 1
|
163
|
-
@client ||= new_client
|
164
|
-
@client.write(event)
|
165
|
-
rescue => e
|
166
|
-
@client.close rescue nil
|
167
|
-
@client = nil
|
168
|
-
|
169
|
-
if retries == 0
|
170
|
-
# immediately retry, in case it's just a server-side close
|
171
|
-
retries += 1
|
172
|
-
retry
|
173
|
-
end
|
174
|
-
|
175
|
-
if retries < @max_retries || @max_retries == -1
|
176
|
-
a_couple_of_seconds = retries ** 2
|
177
|
-
a_couple_of_seconds = 30 unless a_couple_of_seconds < 30
|
178
|
-
retries += 1
|
179
|
-
log.warn "Could not push event to Datadog, attempt=#{retries} max_attempts=#{max_retries} wait=#{a_couple_of_seconds}s error=#{e}"
|
180
|
-
sleep a_couple_of_seconds
|
181
|
-
retry
|
182
|
-
end
|
183
|
-
raise ConnectionFailure, "Could not push event to Datadog after #{retries} retries, #{e}"
|
184
|
-
end
|
388
|
+
def send(payload)
|
389
|
+
begin
|
390
|
+
@socket ||= connect
|
391
|
+
@socket.puts(payload)
|
392
|
+
rescue => e
|
393
|
+
@socket.close rescue nil
|
394
|
+
@socket = nil
|
395
|
+
raise RetryableError.new "Unable to send payload: #{e.message}."
|
185
396
|
end
|
186
397
|
end
|
398
|
+
|
399
|
+
def close
|
400
|
+
@socket.close rescue nil
|
401
|
+
end
|
187
402
|
end
|
188
403
|
|
189
404
|
# Collect docker and kubernetes tags for your logs using `filter_kubernetes_metadata` plugin,
|
@@ -191,9 +406,9 @@ class Fluent::DatadogOutput < Fluent::BufferedOutput
|
|
191
406
|
# https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter/blob/master/lib/fluent/plugin/filter_kubernetes_metadata.rb#L265
|
192
407
|
|
193
408
|
def get_container_tags(record)
|
194
|
-
|
195
|
-
|
196
|
-
|
409
|
+
[
|
410
|
+
get_kubernetes_tags(record),
|
411
|
+
get_docker_tags(record)
|
197
412
|
].compact.join(",")
|
198
413
|
end
|
199
414
|
|
@@ -207,7 +422,7 @@ class Fluent::DatadogOutput < Fluent::BufferedOutput
|
|
207
422
|
tags.push("pod_name:" + kubernetes['pod_name']) unless kubernetes['pod_name'].nil?
|
208
423
|
return tags.join(",")
|
209
424
|
end
|
210
|
-
|
425
|
+
nil
|
211
426
|
end
|
212
427
|
|
213
428
|
def get_docker_tags(record)
|
@@ -217,7 +432,6 @@ class Fluent::DatadogOutput < Fluent::BufferedOutput
|
|
217
432
|
tags.push("container_id:" + docker['container_id']) unless docker['container_id'].nil?
|
218
433
|
return tags.join(",")
|
219
434
|
end
|
220
|
-
|
435
|
+
nil
|
221
436
|
end
|
222
|
-
|
223
437
|
end
|
metadata
CHANGED
@@ -1,43 +1,91 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: fluent-plugin-datadog
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.14.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Datadog Solutions Team
|
8
|
-
autorequire:
|
8
|
+
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date:
|
11
|
+
date: 2021-10-18 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
|
+
- !ruby/object:Gem::Dependency
|
14
|
+
name: fluentd
|
15
|
+
requirement: !ruby/object:Gem::Requirement
|
16
|
+
requirements:
|
17
|
+
- - ">="
|
18
|
+
- !ruby/object:Gem::Version
|
19
|
+
version: '1'
|
20
|
+
- - "<"
|
21
|
+
- !ruby/object:Gem::Version
|
22
|
+
version: '2'
|
23
|
+
type: :runtime
|
24
|
+
prerelease: false
|
25
|
+
version_requirements: !ruby/object:Gem::Requirement
|
26
|
+
requirements:
|
27
|
+
- - ">="
|
28
|
+
- !ruby/object:Gem::Version
|
29
|
+
version: '1'
|
30
|
+
- - "<"
|
31
|
+
- !ruby/object:Gem::Version
|
32
|
+
version: '2'
|
33
|
+
- !ruby/object:Gem::Dependency
|
34
|
+
name: net-http-persistent
|
35
|
+
requirement: !ruby/object:Gem::Requirement
|
36
|
+
requirements:
|
37
|
+
- - "~>"
|
38
|
+
- !ruby/object:Gem::Version
|
39
|
+
version: '3.1'
|
40
|
+
type: :runtime
|
41
|
+
prerelease: false
|
42
|
+
version_requirements: !ruby/object:Gem::Requirement
|
43
|
+
requirements:
|
44
|
+
- - "~>"
|
45
|
+
- !ruby/object:Gem::Version
|
46
|
+
version: '3.1'
|
13
47
|
- !ruby/object:Gem::Dependency
|
14
48
|
name: bundler
|
15
49
|
requirement: !ruby/object:Gem::Requirement
|
16
50
|
requirements:
|
17
51
|
- - "~>"
|
18
52
|
- !ruby/object:Gem::Version
|
19
|
-
version: '1
|
53
|
+
version: '2.1'
|
54
|
+
type: :development
|
55
|
+
prerelease: false
|
56
|
+
version_requirements: !ruby/object:Gem::Requirement
|
57
|
+
requirements:
|
58
|
+
- - "~>"
|
59
|
+
- !ruby/object:Gem::Version
|
60
|
+
version: '2.1'
|
61
|
+
- !ruby/object:Gem::Dependency
|
62
|
+
name: test-unit
|
63
|
+
requirement: !ruby/object:Gem::Requirement
|
64
|
+
requirements:
|
65
|
+
- - "~>"
|
66
|
+
- !ruby/object:Gem::Version
|
67
|
+
version: '3.1'
|
20
68
|
type: :development
|
21
69
|
prerelease: false
|
22
70
|
version_requirements: !ruby/object:Gem::Requirement
|
23
71
|
requirements:
|
24
72
|
- - "~>"
|
25
73
|
- !ruby/object:Gem::Version
|
26
|
-
version: '1
|
74
|
+
version: '3.1'
|
27
75
|
- !ruby/object:Gem::Dependency
|
28
76
|
name: rake
|
29
77
|
requirement: !ruby/object:Gem::Requirement
|
30
78
|
requirements:
|
31
|
-
- - "
|
79
|
+
- - "~>"
|
32
80
|
- !ruby/object:Gem::Version
|
33
|
-
version: '0'
|
81
|
+
version: '12.0'
|
34
82
|
type: :development
|
35
83
|
prerelease: false
|
36
84
|
version_requirements: !ruby/object:Gem::Requirement
|
37
85
|
requirements:
|
38
|
-
- - "
|
86
|
+
- - "~>"
|
39
87
|
- !ruby/object:Gem::Version
|
40
|
-
version: '0'
|
88
|
+
version: '12.0'
|
41
89
|
- !ruby/object:Gem::Dependency
|
42
90
|
name: yajl-ruby
|
43
91
|
requirement: !ruby/object:Gem::Requirement
|
@@ -52,7 +100,21 @@ dependencies:
|
|
52
100
|
- - "~>"
|
53
101
|
- !ruby/object:Gem::Version
|
54
102
|
version: '1.2'
|
55
|
-
|
103
|
+
- !ruby/object:Gem::Dependency
|
104
|
+
name: webmock
|
105
|
+
requirement: !ruby/object:Gem::Requirement
|
106
|
+
requirements:
|
107
|
+
- - "~>"
|
108
|
+
- !ruby/object:Gem::Version
|
109
|
+
version: 3.6.0
|
110
|
+
type: :development
|
111
|
+
prerelease: false
|
112
|
+
version_requirements: !ruby/object:Gem::Requirement
|
113
|
+
requirements:
|
114
|
+
- - "~>"
|
115
|
+
- !ruby/object:Gem::Version
|
116
|
+
version: 3.6.0
|
117
|
+
description:
|
56
118
|
email:
|
57
119
|
- support@datadoghq.com
|
58
120
|
executables: []
|
@@ -66,11 +128,12 @@ files:
|
|
66
128
|
- Rakefile
|
67
129
|
- fluent-plugin-datadog.gemspec
|
68
130
|
- lib/fluent/plugin/out_datadog.rb
|
131
|
+
- lib/fluent/plugin/version.rb
|
69
132
|
homepage: http://datadoghq.com
|
70
133
|
licenses:
|
71
|
-
- Apache
|
134
|
+
- Apache-2.0
|
72
135
|
metadata: {}
|
73
|
-
post_install_message:
|
136
|
+
post_install_message:
|
74
137
|
rdoc_options: []
|
75
138
|
require_paths:
|
76
139
|
- lib
|
@@ -85,9 +148,9 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
85
148
|
- !ruby/object:Gem::Version
|
86
149
|
version: '0'
|
87
150
|
requirements: []
|
88
|
-
rubyforge_project:
|
151
|
+
rubyforge_project:
|
89
152
|
rubygems_version: 2.7.10
|
90
|
-
signing_key:
|
153
|
+
signing_key:
|
91
154
|
specification_version: 4
|
92
155
|
summary: Datadog output plugin for Fluent event collector
|
93
156
|
test_files: []
|