logstash-output-influxdb 2.0.2 → 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 2c79dca49ee87b2359d0f3e2b88801cbfbf55e10
4
- data.tar.gz: ae1dee54d2c300bb765d737cf9fcec6fbe14c422
3
+ metadata.gz: dc53920ba345ebe5e4e289819a5dfac55406720d
4
+ data.tar.gz: deb992fd046dbe441ba216971b96037169f45ef8
5
5
  SHA512:
6
- metadata.gz: 6419b39ea5ef1c5910448b849e10256f5883f4046684b239d3864853dd57abcce59f18e81ced84750e60f4fe6c9e813a1706e8d03832b10c871b1970cec17cc0
7
- data.tar.gz: 61242367818256debd322e8cb11a60e9d1706ba80d860060d126f5c1176a6391c3c224c19e62749754268243fe9b7f0e85cc087cb564b3994d354f5b2efb782c
6
+ metadata.gz: 96f4b4d2aff815c0df8cfba3f73030ff7fcf2402c16ec4123b5f844964b2cfa4049c0f34bc42d4ca60c81bd3b1278ac1e9c12cbf6b19ff6f4f256f97f2812606
7
+ data.tar.gz: ca510bc40ac465550485e9fbb128e9adfdaf86b2e78abba05844ef5745634f753bd7398bfbc2287c2fe3cb0b319cc02d882c9cb97eb8c8b4d0b8e7769473e972
data/README.md CHANGED
@@ -1,5 +1,8 @@
1
1
  # Logstash Plugin
2
2
 
3
+ [![Build
4
+ Status](http://build-eu-00.elastic.co/view/LS%20Plugins/view/LS%20Outputs/job/logstash-plugin-output-influxdb-unit/badge/icon)](http://build-eu-00.elastic.co/view/LS%20Plugins/view/LS%20Outputs/job/logstash-plugin-output-influxdb-unit/)
5
+
3
6
  This is a plugin for [Logstash](https://github.com/elastic/logstash).
4
7
 
5
8
  It is fully free and fully open source. The license is Apache 2.0, meaning you are pretty much free to use it however you want in whatever way.
@@ -4,11 +4,11 @@ require "logstash/outputs/base"
4
4
  require "logstash/json"
5
5
  require "stud/buffer"
6
6
 
7
- # This output lets you output Metrics to InfluxDB
7
+ # This output lets you output Metrics to InfluxDB (>= 0.9.0-rc31)
8
8
  #
9
9
  # The configuration here attempts to be as friendly as possible
10
10
  # and minimize the need for multiple definitions to write to
11
- # multiple series and still be efficient
11
+ # multiple measurements and still be efficient
12
12
  #
13
13
  # the InfluxDB API let's you do some semblance of bulk operation
14
14
  # per http call but each call is database-specific
@@ -20,7 +20,10 @@ class LogStash::Outputs::InfluxDB < LogStash::Outputs::Base
20
20
  config_name "influxdb"
21
21
 
22
22
  # The database to write
23
- config :db, :validate => :string, :default => "stats"
23
+ config :db, :validate => :string, :default => "statistics"
24
+
25
+ # The retention policy to use
26
+ config :retention_policy, :validate => :string, :default => "default"
24
27
 
25
28
  # The hostname or IP address to reach your InfluxDB instance
26
29
  config :host, :validate => :string, :required => true
@@ -29,18 +32,18 @@ class LogStash::Outputs::InfluxDB < LogStash::Outputs::Base
29
32
  config :port, :validate => :number, :default => 8086
30
33
 
31
34
  # The user who has access to the named database
32
- config :user, :validate => :string, :default => nil, :required => true
35
+ config :user, :validate => :string, :default => nil
33
36
 
34
37
  # The password for the user who access to the named database
35
- config :password, :validate => :password, :default => nil, :required => true
38
+ config :password, :validate => :password, :default => nil
36
39
 
37
- # Series name - supports sprintf formatting
38
- config :series, :validate => :string, :default => "logstash"
40
+ # Measurement name - supports sprintf formatting
41
+ config :measurement, :validate => :string, :default => "logstash"
39
42
 
40
43
  # Hash of key/value pairs representing data points to send to the named database
41
44
  # Example: `{'column1' => 'value1', 'column2' => 'value2'}`
42
45
  #
43
- # Events for the same series will be batched together where possible
46
+ # Events for the same measurement will be batched together where possible
44
47
  # Both keys and values support sprintf formatting
45
48
  config :data_points, :validate => :hash, :default => {}, :required => true
46
49
 
@@ -57,7 +60,7 @@ class LogStash::Outputs::InfluxDB < LogStash::Outputs::Base
57
60
  # Set the level of precision of `time`
58
61
  #
59
62
  # only useful when overriding the time value
60
- config :time_precision, :validate => ["m", "s", "u"], :default => "s"
63
+ config :time_precision, :validate => ["n", "u", "ms", "s", "m", "h"], :default => "ms"
61
64
 
62
65
  # Allow value coercion
63
66
  #
@@ -69,8 +72,26 @@ class LogStash::Outputs::InfluxDB < LogStash::Outputs::Base
69
72
  #
70
73
  config :coerce_values, :validate => :hash, :default => {}
71
74
 
75
+ # Automatically use fields from the event as the data points sent to Influxdb
76
+ config :use_event_fields_for_data_points, :validate => :boolean, :default => false
77
+
78
+ # An array containing the names of fields from the event to exclude from the
79
+ # data points
80
+ #
81
+ # Events, in general, contain keys "@version" and "@timestamp". Other plugins
82
+ # may add others that you'll want to exclude (such as "command" from the
83
+ # exec plugin).
84
+ #
85
+ # This only applies when use_event_fields_for_data_points is true.
86
+ config :exclude_fields, :validate => :array, :default => ["@timestamp", "@version", "sequence", "message", "type"]
87
+
88
+ # An array containing the names of fields to send to Influxdb as tags instead
89
+ # of fields. Influxdb 0.9 convention is that values that do not change every
90
+ # request should be considered metadata and given as tags.
91
+ config :send_as_tags, :validate => :array, :default => ["host"]
92
+
72
93
  # This setting controls how many events will be buffered before sending a batch
73
- # of events. Note that these are only batched for the same series
94
+ # of events. Note that these are only batched for the same measurement
74
95
  config :flush_size, :validate => :number, :default => 100
75
96
 
76
97
  # The amount of time since last flush before a flush is forced.
@@ -84,15 +105,17 @@ class LogStash::Outputs::InfluxDB < LogStash::Outputs::Base
84
105
  # near-real-time.
85
106
  config :idle_flush_time, :validate => :number, :default => 1
86
107
 
108
+
87
109
  public
88
110
  def register
89
- require "ftw" # gem ftw
111
+ require 'manticore'
90
112
  require 'cgi'
91
- @agent = FTW::Agent.new
113
+
114
+ @client = Manticore::Client.new
92
115
  @queue = []
93
116
 
94
- @query_params = "u=#{@user}&p=#{@password.value}&time_precision=#{@time_precision}"
95
- @base_url = "http://#{@host}:#{@port}/db/#{@db}/series"
117
+ @query_params = "db=#{@db}&rp=#{@retention_policy}&precision=#{@time_precision}&u=#{@user}&p=#{@password.value}"
118
+ @base_url = "http://#{@host}:#{@port}/write"
96
119
  @url = "#{@base_url}?#{@query_params}"
97
120
 
98
121
  buffer_initialize(
@@ -102,128 +125,229 @@ class LogStash::Outputs::InfluxDB < LogStash::Outputs::Base
102
125
  )
103
126
  end # def register
104
127
 
128
+
105
129
  public
106
130
  def receive(event)
107
131
 
108
132
 
109
- # A batch POST for InfluxDB looks like this:
110
- # [
111
- # {
112
- # "name": "events",
113
- # "columns": ["state", "email", "type"],
114
- # "points": [
115
- # ["ny", "paul@influxdb.org", "follow"],
116
- # ["ny", "todd@influxdb.org", "open"]
117
- # ]
118
- # },
119
- # {
120
- # "name": "errors",
121
- # "columns": ["class", "file", "user", "severity"],
122
- # "points": [
123
- # ["DivideByZero", "example.py", "someguy@influxdb.org", "fatal"]
124
- # ]
125
- # }
126
- # ]
127
- event_hash = {}
128
- event_hash['name'] = event.sprintf(@series)
129
-
130
- sprintf_points = Hash[@data_points.map {|k,v| [event.sprintf(k), event.sprintf(v)]}]
131
- if sprintf_points.has_key?('time')
133
+ @logger.debug? and @logger.debug("Influxdb output: Received event: #{event}")
134
+
135
+ # An Influxdb 0.9 event looks like this:
136
+ # cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000
137
+ # ^ measurement ^ tags (optional) ^ fields ^ timestamp (optional)
138
+ #
139
+ # Since we'll be buffering them to send as a batch, we'll only collect
140
+ # the values going into the points array
141
+
142
+ time = timestamp_at_precision(event.timestamp, @time_precision.to_sym)
143
+ point = create_point_from_event(event)
144
+
145
+ if point.has_key?('time')
132
146
  unless @allow_time_override
133
147
  logger.error("Cannot override value of time without 'allow_time_override'. Using event timestamp")
134
- sprintf_points['time'] = event.timestamp.to_i
148
+ else
149
+ time = point.delete("time")
135
150
  end
136
- else
137
- sprintf_points['time'] = event.timestamp.to_i
138
151
  end
139
152
 
140
- @coerce_values.each do |column, value_type|
141
- if sprintf_points.has_key?(column)
142
- begin
143
- case value_type
144
- when "integer"
145
- @logger.debug? and @logger.debug("Converting column #{column} to type #{value_type}: Current value: #{sprintf_points[column]}")
146
- sprintf_points[column] = sprintf_points[column].to_i
147
- when "float"
148
- @logger.debug? and @logger.debug("Converting column #{column} to type #{value_type}: Current value: #{sprintf_points[column]}")
149
- sprintf_points[column] = sprintf_points[column].to_f
150
- else
151
- @logger.error("Don't know how to convert to #{value_type}")
152
- end
153
- rescue => e
154
- @logger.error("Unhandled exception", :error => e.message)
155
- end
156
- end
157
- end
153
+ exclude_fields!(point)
154
+ coerce_values!(point)
158
155
 
159
- event_hash['columns'] = sprintf_points.keys
160
- event_hash['points'] = []
161
- event_hash['points'] << sprintf_points.values
156
+ tags, point = extract_tags(point)
157
+
158
+ event_hash = {
159
+ "measurement" => event.sprintf(@measurement),
160
+ "time" => time,
161
+ "fields" => point
162
+ }
163
+ event_hash["tags"] = tags unless tags.empty?
162
164
 
163
165
  buffer_receive(event_hash)
164
166
  end # def receive
165
167
 
168
+
166
169
  def flush(events, teardown = false)
167
- # seen_series stores a list of series and associated columns
168
- # we've seen for each event
169
- # so that we can attempt to batch up points for a given series.
170
- #
171
- # Columns *MUST* be exactly the same
172
- seen_series = {}
173
- event_collection = []
174
-
175
- events.each do |ev|
176
- begin
177
- if seen_series.has_key?(ev['name']) and (seen_series[ev['name']] == ev['columns'])
178
- @logger.info("Existing series data found. Appending points to that series")
179
- event_collection.select {|h| h['points'] << ev['points'][0] if h['name'] == ev['name']}
180
- elsif seen_series.has_key?(ev['name']) and (seen_series[ev['name']] != ev['columns'])
181
- @logger.warn("Series '#{ev['name']}' has been seen but columns are different or in a different order. Adding to batch but not under existing series")
182
- @logger.warn("Existing series columns were: #{seen_series[ev['name']].join(",")} and event columns were: #{ev['columns'].join(",")}")
183
- event_collection << ev
184
- else
185
- seen_series[ev['name']] = ev['columns']
186
- event_collection << ev
187
- end
188
- rescue => e
189
- @logger.warn("Error adding event to collection", :exception => e)
190
- next
191
- end
192
- end
170
+ @logger.debug? and @logger.debug("Flushing #{events.size} events to #{@url} - Teardown? #{teardown}")
171
+ post(events_to_request_body(events))
172
+ end # def flush
193
173
 
194
- post(LogStash::Json.dump(event_collection))
195
- end # def receive_bulk
196
174
 
197
175
  def post(body)
198
176
  begin
199
- @logger.debug("Post body: #{body}")
200
- response = @agent.post!(@url, :body => body)
177
+ @logger.debug? and @logger.debug("Post body: #{body}")
178
+ response = @client.post!(@url, :body => body)
179
+
201
180
  rescue EOFError
202
181
  @logger.warn("EOF while writing request or reading response header from InfluxDB",
203
182
  :host => @host, :port => @port)
204
183
  return # abort this flush
205
184
  end
206
185
 
207
- # Consume the body for error checking
208
- # This will also free up the connection for reuse.
209
- body = ""
210
- begin
211
- response.read_body { |chunk| body += chunk }
212
- rescue EOFError
213
- @logger.warn("EOF while reading response body from InfluxDB",
214
- :host => @host, :port => @port)
215
- return # abort this flush
186
+ if read_body?(response)
187
+ # Consume the body for error checking
188
+ # This will also free up the connection for reuse.
189
+ body = ""
190
+ begin
191
+ response.read_body { |chunk| body += chunk }
192
+ rescue EOFError
193
+ @logger.warn("EOF while reading response body from InfluxDB",
194
+ :host => @host, :port => @port)
195
+ return # abort this flush
196
+ end
197
+
198
+ @logger.debug? and @logger.debug("Body: #{body}")
216
199
  end
217
200
 
218
- if response.status != 200
201
+ unless response && (200..299).include?(response.code)
219
202
  @logger.error("Error writing to InfluxDB",
220
203
  :response => response, :response_body => body,
221
204
  :request_body => @queue.join("\n"))
222
205
  return
206
+ else
207
+ @logger.debug? and @logger.debug("Post response: #{response}")
223
208
  end
224
209
  end # def post
225
210
 
226
211
  def close
227
212
  buffer_flush(:final => true)
228
213
  end # def teardown
214
+
215
+
216
+ # A batch POST for InfluxDB 0.9 looks like this:
217
+ # cpu_load_short,host=server01,region=us-west value=0.64 cpu_load_short,host=server02,region=us-west value=0.55 1422568543702900257 cpu_load_short,direction=in,host=server01,region=us-west value=23422.0 1422568543702900257
218
+ def events_to_request_body(events)
219
+ events.map do |event|
220
+ result = escaped_measurement(event["measurement"].dup)
221
+ result << "," << event["tags"].map { |tag,value| "#{escaped(tag)}=#{escaped(value)}" }.join(',') if event.has_key?("tags")
222
+ result << " " << event["fields"].map { |field,value| "#{escaped(field)}=#{quoted(value)}" }.join(',')
223
+ result << " #{event["time"]}"
224
+ end.join("\n") #each measurement should be on a separate line
225
+ end
226
+
227
+ # Create a data point from an event. If @use_event_fields_for_data_points is
228
+ # true, convert the event to a hash. Otherwise, use @data_points. Each key and
229
+ # value will be run through event#sprintf with the exception of a non-String
230
+ # value (which will be passed through)
231
+ def create_point_from_event(event)
232
+ Hash[ (@use_event_fields_for_data_points ? event.to_hash : @data_points).map do |k,v|
233
+ [event.sprintf(k), (String === v ? event.sprintf(v) : v)]
234
+ end ]
235
+ end
236
+
237
+
238
+ # Coerce values in the event data to their appropriate type. This requires
239
+ # foreknowledge of what's in the data point, which is less than ideal. An
240
+ # alternative is to use a `code` filter and manipulate the individual point's
241
+ # data before sending to the output pipeline
242
+ def coerce_values!(event_data)
243
+ @coerce_values.each do |column, value_type|
244
+ if event_data.has_key?(column)
245
+ begin
246
+ @logger.debug? and @logger.debug("Converting column #{column} to type #{value_type}: Current value: #{event_data[column]}")
247
+ event_data[column] = coerce_value(value_type, event_data[column])
248
+
249
+ rescue => e
250
+ @logger.error("Unhandled exception", :error => e.message)
251
+ end
252
+ end
253
+ end
254
+
255
+ event_data
256
+ end
257
+
258
+
259
+ def coerce_value(value_type, value)
260
+ case value_type.to_sym
261
+ when :integer
262
+ value.to_i
263
+
264
+ when :float
265
+ value.to_f
266
+
267
+ when :string
268
+ value.to_s
269
+
270
+ else
271
+ @logger.warn("Don't know how to convert to #{value_type}. Returning value unchanged")
272
+ value
273
+ end
274
+ end
275
+
276
+
277
+ # Remove a set of fields from the event data before sending it to Influxdb. This
278
+ # is useful for removing @timestamp, @version, etc
279
+ def exclude_fields!(event_data)
280
+ @exclude_fields.each { |field| event_data.delete(field) }
281
+ end
282
+
283
+
284
+ # Extract tags from a hash of fields.
285
+ # Returns a tuple containing a hash of tags (as configured by send_as_tags)
286
+ # and a hash of fields that exclude the tags. If fields contains a key
287
+ # "tags" with an array, they will be moved to the tags hash (and each will be
288
+ # given a value of true)
289
+ #
290
+ # Example:
291
+ # # Given send_as_tags: ["bar"]
292
+ # original_fields = {"foo" => 1, "bar" => 2, "tags" => ["tag"]}
293
+ # tags, fields = extract_tags(original_fields)
294
+ # # tags: {"bar" => 2, "tag" => "true"} and fields: {"foo" => 1}
295
+ def extract_tags(fields)
296
+ remainder = fields.dup
297
+
298
+ tags = if remainder.has_key?("tags") && remainder["tags"].respond_to?(:inject)
299
+ remainder.delete("tags").inject({}) { |tags, tag| tags[tag] = "true"; tags }
300
+ else
301
+ {}
302
+ end
303
+
304
+ @send_as_tags.each { |key| (tags[key] = remainder.delete(key)) if remainder.has_key?(key) }
305
+
306
+ tags.delete_if { |key,value| value.nil? || value == "" }
307
+ remainder.delete_if { |key,value| value.nil? || value == "" }
308
+
309
+ [tags, remainder]
310
+ end
311
+
312
+
313
+ # Returns the numeric value of the given timestamp in the requested precision.
314
+ # precision must be one of the valid values for time_precision
315
+ def timestamp_at_precision( timestamp, precision )
316
+ multiplier = case precision
317
+ when :h then 1.0/3600
318
+ when :m then 1.0/60
319
+ when :s then 1
320
+ when :ms then 1000
321
+ when :u then 1000000
322
+ end
323
+
324
+ (timestamp.to_f * multiplier).to_i
325
+ end
326
+
327
+
328
+ # Only read the response body if its status is not 1xx, 204, or 304. TODO: Should
329
+ # also not try reading the body if the request was a HEAD
330
+ def read_body?( response )
331
+ ! (response.nil? || [204,304].include?(response.code) || (100..199).include?(response.code))
332
+ end
333
+
334
+
335
+ # Return a quoted string of the given value if it's not a number
336
+ def quoted(value)
337
+ Numeric === value ? value : %Q|"#{value.gsub('"','\"')}"|
338
+ end
339
+
340
+
341
+ # Escape tag key, tag value, or field key
342
+ def escaped(value)
343
+ value.gsub(/[ ,=]/, ' ' => '\ ', ',' => '\,', '=' => '\=')
344
+ end
345
+
346
+
347
+ # Escape measurements note they don't need to worry about the '=' case
348
+ def escaped_measurement(value)
349
+ value.gsub(/[ ,]/, ' ' => '\ ', ',' => '\,')
350
+ end
351
+
352
+
229
353
  end # class LogStash::Outputs::InfluxDB
@@ -1,7 +1,7 @@
1
1
  Gem::Specification.new do |s|
2
2
 
3
3
  s.name = 'logstash-output-influxdb'
4
- s.version = '2.0.2'
4
+ s.version = '3.0.0'
5
5
  s.licenses = ['Apache License (2.0)']
6
6
  s.summary = "This output lets you output Metrics to InfluxDB"
7
7
  s.description = "This gem is a logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/plugin install gemname. This gem is not a stand-alone program"
@@ -23,7 +23,7 @@ Gem::Specification.new do |s|
23
23
  s.add_runtime_dependency "logstash-core", ">= 2.0.0.beta2", "< 3.0.0"
24
24
 
25
25
  s.add_runtime_dependency 'stud'
26
- s.add_runtime_dependency 'ftw', ['~> 0.0.40']
26
+ s.add_runtime_dependency 'manticore'
27
27
 
28
28
  s.add_development_dependency 'logstash-devutils'
29
29
  s.add_development_dependency 'logstash-input-generator'
@@ -1,5 +1,6 @@
1
1
  require "logstash/devutils/rspec/spec_helper"
2
2
  require "logstash/outputs/influxdb"
3
+ require "manticore"
3
4
 
4
5
  describe LogStash::Outputs::InfluxDB do
5
6
 
@@ -25,7 +26,7 @@ describe LogStash::Outputs::InfluxDB do
25
26
 
26
27
  before do
27
28
  subject.register
28
- allow(subject).to receive(:post).with(json_result)
29
+ allow(subject).to receive(:post).with(result)
29
30
 
30
31
  2.times do
31
32
  subject.receive(LogStash::Event.new("foo" => "1", "bar" => "2", "time" => "3", "type" => "generator"))
@@ -35,11 +36,322 @@ describe LogStash::Outputs::InfluxDB do
35
36
  subject.close
36
37
  end
37
38
 
38
- let(:json_result) { "[{\"name\":\"logstash\",\"columns\":[\"foo\",\"bar\",\"time\"],\"points\":[[\"1\",\"2\",\"3\"],[\"1\",\"2\",\"3\"]]}]" }
39
+ let(:result) { "logstash foo=\"1\",bar=\"2\" 3\nlogstash foo=\"1\",bar=\"2\" 3" }
39
40
 
40
41
  it "should receive 2 events, flush and call post with 2 items json array" do
41
- expect(subject).to have_received(:post).with(json_result)
42
+ expect(subject).to have_received(:post).with(result)
42
43
  end
43
44
 
44
45
  end
46
+
47
+ context "using event fields as data points" do
48
+ let(:config) do <<-CONFIG
49
+ input {
50
+ generator {
51
+ message => "foo=1 bar=2 time=3"
52
+ count => 1
53
+ type => "generator"
54
+ }
55
+ }
56
+
57
+ filter {
58
+ kv { }
59
+ }
60
+
61
+ output {
62
+ influxdb {
63
+ host => "localhost"
64
+ measurement => "my_series"
65
+ allow_time_override => true
66
+ use_event_fields_for_data_points => true
67
+ exclude_fields => ["@version", "@timestamp", "sequence", "message", "type", "host"]
68
+ }
69
+ }
70
+ CONFIG
71
+ end
72
+
73
+ let(:expected_url) { 'http://localhost:8086/write?db=statistics&rp=default&precision=ms&u=&p='}
74
+ let(:expected_body) { 'my_series foo="1",bar="2" 3' }
75
+
76
+ it "should use the event fields as the data points, excluding @version and @timestamp by default as well as any fields configured by exclude_fields" do
77
+ expect_any_instance_of(Manticore::Client).to receive(:post!).with(expected_url, body: expected_body)
78
+ pipeline.run
79
+ end
80
+ end
81
+
82
+ context "sending some fields as Influxdb tags" do
83
+ let(:config) do <<-CONFIG
84
+ input {
85
+ generator {
86
+ message => "foo=1 bar=2 baz=3 time=4"
87
+ count => 1
88
+ type => "generator"
89
+ }
90
+ }
91
+
92
+ filter {
93
+ kv { }
94
+ }
95
+
96
+ output {
97
+ influxdb {
98
+ host => "localhost"
99
+ measurement => "my_series"
100
+ allow_time_override => true
101
+ use_event_fields_for_data_points => true
102
+ exclude_fields => ["@version", "@timestamp", "sequence", "message", "type", "host"]
103
+ send_as_tags => ["bar", "baz", "qux"]
104
+ }
105
+ }
106
+ CONFIG
107
+ end
108
+
109
+ let(:expected_url) { 'http://localhost:8086/write?db=statistics&rp=default&precision=ms&u=&p='}
110
+ let(:expected_body) { 'my_series,bar=2,baz=3 foo="1" 4' }
111
+
112
+ it "should send the specified fields as tags" do
113
+ expect_any_instance_of(Manticore::Client).to receive(:post!).with(expected_url, body: expected_body)
114
+ pipeline.run
115
+ end
116
+ end
117
+
118
+ context "Escapeing space characters" do
119
+ let(:config) do <<-CONFIG
120
+ input {
121
+ generator {
122
+ message => "foo=1 bar=2 baz=3 time=4"
123
+ count => 1
124
+ type => "generator"
125
+ }
126
+ }
127
+
128
+ filter {
129
+ kv {
130
+ add_field => {
131
+ "test1" => "yellow cat"
132
+ "test space" => "making life hard"
133
+ "feild space" => "pink dog"
134
+ }
135
+ }
136
+ }
137
+
138
+ output {
139
+ influxdb {
140
+ host => "localhost"
141
+ measurement => "my series"
142
+ allow_time_override => true
143
+ use_event_fields_for_data_points => true
144
+ exclude_fields => ["@version", "@timestamp", "sequence", "message", "type", "host"]
145
+ send_as_tags => ["bar", "baz", "test1", "test space"]
146
+ }
147
+ }
148
+ CONFIG
149
+ end
150
+
151
+ let(:expected_url) { 'http://localhost:8086/write?db=statistics&rp=default&precision=ms&u=&p='}
152
+ let(:expected_body) { 'my\ series,bar=2,baz=3,test1=yellow\ cat,test\ space=making\ life\ hard foo="1",feild\ space="pink dog" 4' }
153
+
154
+ it "should send the specified fields as tags" do
155
+ expect_any_instance_of(Manticore::Client).to receive(:post!).with(expected_url, body: expected_body)
156
+ pipeline.run
157
+ end
158
+ end
159
+
160
+ context "Escapeing comma characters" do
161
+ let(:config) do <<-CONFIG
162
+ input {
163
+ generator {
164
+ message => "foo=1 bar=2 baz=3 time=4"
165
+ count => 1
166
+ type => "generator"
167
+ }
168
+ }
169
+
170
+ filter {
171
+ kv {
172
+ add_field => {
173
+ "test1" => "yellow, cat"
174
+ "test, space" => "making, life, hard"
175
+ "feild, space" => "pink, dog"
176
+ }
177
+ }
178
+ }
179
+
180
+ output {
181
+ influxdb {
182
+ host => "localhost"
183
+ measurement => "my, series"
184
+ allow_time_override => true
185
+ use_event_fields_for_data_points => true
186
+ exclude_fields => ["@version", "@timestamp", "sequence", "message", "type", "host"]
187
+ send_as_tags => ["bar", "baz", "test1", "test, space"]
188
+ }
189
+ }
190
+ CONFIG
191
+ end
192
+
193
+ let(:expected_url) { 'http://localhost:8086/write?db=statistics&rp=default&precision=ms&u=&p='}
194
+ let(:expected_body) { 'my\,\ series,bar=2,baz=3,test1=yellow\,\ cat,test\,\ space=making\,\ life\,\ hard foo="1",feild\,\ space="pink, dog" 4' }
195
+
196
+ it "should send the specified fields as tags" do
197
+ expect_any_instance_of(Manticore::Client).to receive(:post!).with(expected_url, body: expected_body)
198
+ pipeline.run
199
+ end
200
+ end
201
+
202
+ context "Escapeing equal characters" do
203
+ let(:config) do <<-CONFIG
204
+ input {
205
+ generator {
206
+ message => "foo=1 bar=2 baz=3 time=4"
207
+ count => 1
208
+ type => "generator"
209
+ }
210
+ }
211
+
212
+ filter {
213
+ kv {
214
+ add_field => {
215
+ "test1" => "yellow=cat"
216
+ "test=space" => "making= life=hard"
217
+ "feild= space" => "pink= dog"
218
+ }
219
+ }
220
+ }
221
+
222
+ output {
223
+ influxdb {
224
+ host => "localhost"
225
+ measurement => "my=series"
226
+ allow_time_override => true
227
+ use_event_fields_for_data_points => true
228
+ exclude_fields => ["@version", "@timestamp", "sequence", "message", "type", "host"]
229
+ send_as_tags => ["bar", "baz", "test1", "test=space"]
230
+ }
231
+ }
232
+ CONFIG
233
+ end
234
+
235
+ let(:expected_url) { 'http://localhost:8086/write?db=statistics&rp=default&precision=ms&u=&p='}
236
+ let(:expected_body) { 'my=series,bar=2,baz=3,test1=yellow\=cat,test\=space=making\=\ life\=hard foo="1",feild\=\ space="pink= dog" 4' }
237
+
238
+ it "should send the specified fields as tags" do
239
+ expect_any_instance_of(Manticore::Client).to receive(:post!).with(expected_url, body: expected_body)
240
+ pipeline.run
241
+ end
242
+ end
243
+
244
+ context "testing backslash characters" do
245
+ let(:config) do <<-CONFIG
246
+ input {
247
+ generator {
248
+ message => 'foo\\=1 bar=2 baz=3 time=4'
249
+ count => 1
250
+ type => "generator"
251
+ }
252
+ }
253
+
254
+ filter {
255
+ kv {
256
+ add_field => {
257
+ "test1" => "yellow=cat"
258
+ "test=space" => "making=, life=hard"
259
+ "feildspace" => 'C:\\Griffo'
260
+ }
261
+ }
262
+ }
263
+
264
+ output {
265
+ influxdb {
266
+ host => "localhost"
267
+ measurement => 'my\\series'
268
+ allow_time_override => true
269
+ use_event_fields_for_data_points => true
270
+ exclude_fields => ["@version", "@timestamp", "sequence", "message", "type", "host"]
271
+ send_as_tags => ['bar', "baz", "test1", "test=space"]
272
+ }
273
+ }
274
+ CONFIG
275
+ end
276
+
277
+ let(:expected_url) { 'http://localhost:8086/write?db=statistics&rp=default&precision=ms&u=&p='}
278
+ let(:expected_body) { 'my\series,bar=2,baz=3,test1=yellow\=cat,test\=space=making\=\,\ life\=hard foo\="1",feildspace="C:\Griffo" 4' }
279
+
280
+ it "should send the specified fields as tags" do
281
+ expect_any_instance_of(Manticore::Client).to receive(:post!).with(expected_url, body: expected_body)
282
+ pipeline.run
283
+ end
284
+ end
285
+
286
+
287
+ context "when fields data contains a list of tags" do
288
+ let(:config) do <<-CONFIG
289
+ input {
290
+ generator {
291
+ message => "foo=1 time=2"
292
+ count => 1
293
+ type => "generator"
294
+ }
295
+ }
296
+
297
+ filter {
298
+ kv { add_tag => [ "tagged" ] }
299
+ }
300
+
301
+ output {
302
+ influxdb {
303
+ host => "localhost"
304
+ measurement => "my_series"
305
+ allow_time_override => true
306
+ use_event_fields_for_data_points => true
307
+ exclude_fields => ["@version", "@timestamp", "sequence", "message", "type", "host"]
308
+ }
309
+ }
310
+ CONFIG
311
+ end
312
+
313
+ let(:expected_url) { 'http://localhost:8086/write?db=statistics&rp=default&precision=ms&u=&p='}
314
+ let(:expected_body) { 'my_series,tagged=true foo="1" 2' }
315
+
316
+ it "should move them to the tags data" do
317
+ expect_any_instance_of(Manticore::Client).to receive(:post!).with(expected_url, body: expected_body)
318
+ pipeline.run
319
+ end
320
+ end
321
+
322
+ context "when fields are coerced to numerics" do
323
+ let(:config) do <<-CONFIG
324
+ input {
325
+ generator {
326
+ message => "foo=1 bar=2 baz=\\\"quotes\\\" time=3"
327
+ count => 1
328
+ type => "generator"
329
+ }
330
+ }
331
+
332
+ filter {
333
+ kv { }
334
+ }
335
+
336
+ output {
337
+ influxdb {
338
+ host => "localhost"
339
+ measurement => "my_series"
340
+ allow_time_override => true
341
+ use_event_fields_for_data_points => true
342
+ exclude_fields => ["@version", "@timestamp", "sequence", "message", "type", "host"]
343
+ coerce_values => { "foo" => "integer" "bar" => "float" }
344
+ }
345
+ }
346
+ CONFIG
347
+ end
348
+
349
+ let(:expected_url) { 'http://localhost:8086/write?db=statistics&rp=default&precision=ms&u=&p='}
350
+ let(:expected_body) { 'my_series foo=1,bar=2.0,baz="\\\"quotes\\\"" 3' } # We want the backslash and the escaped-quote in the request body
351
+
352
+ it "should quote all other values (and escaping double quotes)" do
353
+ expect_any_instance_of(Manticore::Client).to receive(:post!).with(expected_url, body: expected_body)
354
+ pipeline.run
355
+ end
356
+ end
45
357
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-output-influxdb
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.0.2
4
+ version: 3.0.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Elastic
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2015-10-14 00:00:00.000000000 Z
11
+ date: 2016-02-28 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  requirement: !ruby/object:Gem::Requirement
@@ -47,17 +47,17 @@ dependencies:
47
47
  - !ruby/object:Gem::Dependency
48
48
  requirement: !ruby/object:Gem::Requirement
49
49
  requirements:
50
- - - ~>
50
+ - - '>='
51
51
  - !ruby/object:Gem::Version
52
- version: 0.0.40
53
- name: ftw
52
+ version: '0'
53
+ name: manticore
54
54
  prerelease: false
55
55
  type: :runtime
56
56
  version_requirements: !ruby/object:Gem::Requirement
57
57
  requirements:
58
- - - ~>
58
+ - - '>='
59
59
  - !ruby/object:Gem::Version
60
- version: 0.0.40
60
+ version: '0'
61
61
  - !ruby/object:Gem::Dependency
62
62
  requirement: !ruby/object:Gem::Requirement
63
63
  requirements:
@@ -137,7 +137,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
137
137
  version: '0'
138
138
  requirements: []
139
139
  rubyforge_project:
140
- rubygems_version: 2.4.8
140
+ rubygems_version: 2.4.5
141
141
  signing_key:
142
142
  specification_version: 4
143
143
  summary: This output lets you output Metrics to InfluxDB