logstash-output-splunk 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +2 -0
- data/CONTRIBUTORS +16 -0
- data/Gemfile +11 -0
- data/LICENSE +13 -0
- data/NOTICE.TXT +5 -0
- data/README.md +93 -0
- data/docs/index.asciidoc +392 -0
- data/lib/logstash/outputs/splunk.rb +344 -0
- data/logstash-output-splunk.gemspec +29 -0
- data/spec/outputs/splunk_spec.rb +364 -0
- data/spec/supports/compressed_requests.rb +38 -0
- metadata +142 -0
@@ -0,0 +1,344 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require "logstash/outputs/base"
|
3
|
+
require "logstash/namespace"
|
4
|
+
require "logstash/json"
|
5
|
+
require "uri"
|
6
|
+
require "logstash/plugin_mixins/http_client"
|
7
|
+
require "zlib"
|
8
|
+
|
9
|
+
class LogStash::Outputs::Splunk < LogStash::Outputs::Base
|
10
|
+
include LogStash::PluginMixins::HttpClient
|
11
|
+
|
12
|
+
concurrency :shared
|
13
|
+
|
14
|
+
attr_accessor :is_batch
|
15
|
+
|
16
|
+
RETRYABLE_MANTICORE_EXCEPTIONS = [
|
17
|
+
::Manticore::Timeout,
|
18
|
+
::Manticore::SocketException,
|
19
|
+
::Manticore::ClientProtocolException,
|
20
|
+
::Manticore::ResolutionFailure,
|
21
|
+
::Manticore::SocketTimeout
|
22
|
+
]
|
23
|
+
|
24
|
+
# This output lets you send events to a
|
25
|
+
# generic HTTP(S) endpoint
|
26
|
+
#
|
27
|
+
# This output will execute up to 'pool_max' requests in parallel for performance.
|
28
|
+
# Consider this when tuning this plugin for performance.
|
29
|
+
#
|
30
|
+
# Additionally, note that when parallel execution is used strict ordering of events is not
|
31
|
+
# guaranteed!
|
32
|
+
#
|
33
|
+
# Beware, this gem does not yet support codecs. Please use the 'format' option for now.
|
34
|
+
|
35
|
+
config_name "splunk"
|
36
|
+
|
37
|
+
# URL to use
|
38
|
+
config :url, :validate => :string, :required => :true
|
39
|
+
|
40
|
+
# Custom headers to use
|
41
|
+
# format is `headers => ["X-My-Header", "%{host}"]`
|
42
|
+
config :headers, :validate => :hash, :default => {}
|
43
|
+
|
44
|
+
# Splunk HTTP Event Collector tokens to use
|
45
|
+
config :token, :validate => :string, :required => :true
|
46
|
+
|
47
|
+
# Content type
|
48
|
+
#
|
49
|
+
# If not specified, this defaults to the following:
|
50
|
+
#
|
51
|
+
config :content_type, :validate => :string
|
52
|
+
|
53
|
+
# Set this to false if you don't want this output to retry failed requests
|
54
|
+
config :retry_failed, :validate => :boolean, :default => true
|
55
|
+
|
56
|
+
# If encountered as response codes this plugin will retry these requests
|
57
|
+
config :retryable_codes, :validate => :number, :list => true, :default => [429, 500, 502, 503, 504]
|
58
|
+
|
59
|
+
# If you would like to consider some non-2xx codes to be successes
|
60
|
+
# enumerate them here. Responses returning these codes will be considered successes
|
61
|
+
config :ignorable_codes, :validate => :number, :list => true
|
62
|
+
|
63
|
+
# Set this to false if you don't want batch
|
64
|
+
config :is_batch, :validate => :boolean, :default => true
|
65
|
+
|
66
|
+
# This lets you choose the structure and parts of the event that are sent.
|
67
|
+
#
|
68
|
+
#
|
69
|
+
# For example:
|
70
|
+
# [source,ruby]
|
71
|
+
# mapping => {"foo" => "%{host}"
|
72
|
+
# "bar" => "%{type}"}
|
73
|
+
config :mapping, :validate => :hash
|
74
|
+
|
75
|
+
# Set the format of the http body.
|
76
|
+
#
|
77
|
+
# If form, then the body will be the mapping (or whole event) converted
|
78
|
+
# into a query parameter string, e.g. `foo=bar&baz=fizz...`
|
79
|
+
#
|
80
|
+
# If message, then the body will be the result of formatting the event according to message
|
81
|
+
#
|
82
|
+
# Set this to true if you want to enable gzip compression for your http requests
|
83
|
+
config :http_compression, :validate => :boolean, :default => false
|
84
|
+
|
85
|
+
config :message, :validate => :string
|
86
|
+
|
87
|
+
def register
|
88
|
+
# We count outstanding requests with this queue
|
89
|
+
# This queue tracks the requests to create backpressure
|
90
|
+
# When this queue is empty no new requests may be sent,
|
91
|
+
# tokens must be added back by the client on success
|
92
|
+
@request_tokens = SizedQueue.new(@pool_max)
|
93
|
+
@pool_max.times {|t| @request_tokens << true }
|
94
|
+
@requests = Array.new
|
95
|
+
@content_type = "application/json"
|
96
|
+
@is_batch = @is_batch
|
97
|
+
@headers["Content-Type"] = @content_type
|
98
|
+
|
99
|
+
# Splunk HEC token
|
100
|
+
@headers["Authorization"] = "Splunk " + @token
|
101
|
+
|
102
|
+
# Run named Timer as daemon thread
|
103
|
+
@timer = java.util.Timer.new("Splunk Output #{self.params['id']}", true)
|
104
|
+
end # def register
|
105
|
+
|
106
|
+
def multi_receive(events)
|
107
|
+
return if events.empty?
|
108
|
+
send_events(events)
|
109
|
+
end
|
110
|
+
|
111
|
+
class RetryTimerTask < java.util.TimerTask
|
112
|
+
def initialize(pending, event, attempt)
|
113
|
+
@pending = pending
|
114
|
+
@event = event
|
115
|
+
@attempt = attempt
|
116
|
+
super()
|
117
|
+
end
|
118
|
+
|
119
|
+
def run
|
120
|
+
@pending << [@event, @attempt]
|
121
|
+
end
|
122
|
+
end
|
123
|
+
|
124
|
+
def log_retryable_response(response)
|
125
|
+
if (response.code == 429)
|
126
|
+
@logger.debug? && @logger.debug("Encountered a 429 response, will retry. This is not serious, just flow control via HTTP")
|
127
|
+
else
|
128
|
+
@logger.warn("Encountered a retryable HTTP request in HTTP output, will retry", :code => response.code, :body => response.body)
|
129
|
+
end
|
130
|
+
end
|
131
|
+
|
132
|
+
def log_error_response(response, url, event)
|
133
|
+
log_failure(
|
134
|
+
"Encountered non-2xx HTTP code #{response.code}",
|
135
|
+
:response_code => response.code,
|
136
|
+
:url => url,
|
137
|
+
:event => event
|
138
|
+
)
|
139
|
+
end
|
140
|
+
|
141
|
+
def send_events(events)
|
142
|
+
successes = java.util.concurrent.atomic.AtomicInteger.new(0)
|
143
|
+
failures = java.util.concurrent.atomic.AtomicInteger.new(0)
|
144
|
+
retries = java.util.concurrent.atomic.AtomicInteger.new(0)
|
145
|
+
event_count = @is_batch ? 1 : events.size
|
146
|
+
|
147
|
+
pending = Queue.new
|
148
|
+
if @is_batch
|
149
|
+
pending << [events, 0]
|
150
|
+
else
|
151
|
+
events.each {|e| pending << [e, 0]}
|
152
|
+
end
|
153
|
+
|
154
|
+
while popped = pending.pop
|
155
|
+
break if popped == :done
|
156
|
+
|
157
|
+
event, attempt = popped
|
158
|
+
|
159
|
+
action, event, attempt = send_event(event, attempt)
|
160
|
+
begin
|
161
|
+
action = :failure if action == :retry && !@retry_failed
|
162
|
+
|
163
|
+
case action
|
164
|
+
when :success
|
165
|
+
successes.incrementAndGet
|
166
|
+
when :retry
|
167
|
+
retries.incrementAndGet
|
168
|
+
|
169
|
+
next_attempt = attempt+1
|
170
|
+
sleep_for = sleep_for_attempt(next_attempt)
|
171
|
+
@logger.info("Retrying http request, will sleep for #{sleep_for} seconds")
|
172
|
+
timer_task = RetryTimerTask.new(pending, event, next_attempt)
|
173
|
+
@timer.schedule(timer_task, sleep_for*1000)
|
174
|
+
when :failure
|
175
|
+
failures.incrementAndGet
|
176
|
+
else
|
177
|
+
raise "Unknown action #{action}"
|
178
|
+
end
|
179
|
+
|
180
|
+
if action == :success || action == :failure
|
181
|
+
if successes.get+failures.get == event_count
|
182
|
+
pending << :done
|
183
|
+
end
|
184
|
+
end
|
185
|
+
rescue => e
|
186
|
+
# This should never happen unless there's a flat out bug in the code
|
187
|
+
@logger.error("Error sending HTTP Request",
|
188
|
+
:class => e.class.name,
|
189
|
+
:message => e.message,
|
190
|
+
:backtrace => e.backtrace)
|
191
|
+
failures.incrementAndGet
|
192
|
+
raise e
|
193
|
+
end
|
194
|
+
end
|
195
|
+
rescue => e
|
196
|
+
@logger.error("Error in http output loop",
|
197
|
+
:class => e.class.name,
|
198
|
+
:message => e.message,
|
199
|
+
:backtrace => e.backtrace)
|
200
|
+
raise e
|
201
|
+
end
|
202
|
+
|
203
|
+
def sleep_for_attempt(attempt)
|
204
|
+
sleep_for = attempt**2
|
205
|
+
sleep_for = sleep_for <= 60 ? sleep_for : 60
|
206
|
+
(sleep_for/2) + (rand(0..sleep_for)/2)
|
207
|
+
end
|
208
|
+
|
209
|
+
def send_event(event, attempt)
|
210
|
+
body = event_body(event)
|
211
|
+
|
212
|
+
# Send the request
|
213
|
+
url = @is_batch ? @url : event.sprintf(@url)
|
214
|
+
headers = @is_batch ? @headers : event_headers(event)
|
215
|
+
|
216
|
+
# Compress the body and add appropriate header
|
217
|
+
if @http_compression == true
|
218
|
+
headers["Content-Encoding"] = "gzip"
|
219
|
+
body = gzip(body)
|
220
|
+
end
|
221
|
+
|
222
|
+
# Create an async request
|
223
|
+
response = client.send(:post, url, :body => body, :headers => headers).call
|
224
|
+
|
225
|
+
if !response_success?(response)
|
226
|
+
if retryable_response?(response)
|
227
|
+
log_retryable_response(response)
|
228
|
+
return :retry, event, attempt
|
229
|
+
else
|
230
|
+
log_error_response(response, url, event)
|
231
|
+
return :failure, event, attempt
|
232
|
+
end
|
233
|
+
else
|
234
|
+
return :success, event, attempt
|
235
|
+
end
|
236
|
+
|
237
|
+
rescue => exception
|
238
|
+
will_retry = retryable_exception?(exception)
|
239
|
+
log_failure("Could not fetch URL",
|
240
|
+
:url => url,
|
241
|
+
:body => body,
|
242
|
+
:headers => headers,
|
243
|
+
:message => exception.message,
|
244
|
+
:class => exception.class.name,
|
245
|
+
:backtrace => exception.backtrace,
|
246
|
+
:will_retry => will_retry
|
247
|
+
)
|
248
|
+
|
249
|
+
if will_retry
|
250
|
+
return :retry, event, attempt
|
251
|
+
else
|
252
|
+
return :failure, event, attempt
|
253
|
+
end
|
254
|
+
end
|
255
|
+
|
256
|
+
def close
|
257
|
+
@timer.cancel
|
258
|
+
client.close
|
259
|
+
end
|
260
|
+
|
261
|
+
private
|
262
|
+
|
263
|
+
def response_success?(response)
|
264
|
+
code = response.code
|
265
|
+
return true if @ignorable_codes && @ignorable_codes.include?(code)
|
266
|
+
return code >= 200 && code <= 299
|
267
|
+
end
|
268
|
+
|
269
|
+
def retryable_response?(response)
|
270
|
+
@retryable_codes && @retryable_codes.include?(response.code)
|
271
|
+
end
|
272
|
+
|
273
|
+
def retryable_exception?(exception)
|
274
|
+
RETRYABLE_MANTICORE_EXCEPTIONS.any? {|me| exception.is_a?(me) }
|
275
|
+
end
|
276
|
+
|
277
|
+
# This is split into a separate method mostly to help testing
|
278
|
+
def log_failure(message, opts)
|
279
|
+
@logger.error("[HTTP Output Failure] #{message}", opts)
|
280
|
+
end
|
281
|
+
|
282
|
+
# Format the HTTP body
|
283
|
+
def event_body(event)
|
284
|
+
# TODO: Create an HTTP post data codec, use that here
|
285
|
+
if @is_batch
|
286
|
+
event.map {|e| LogStash::Json.dump(map_event(e)) }.join("\n")
|
287
|
+
else
|
288
|
+
LogStash::Json.dump(map_event(event))
|
289
|
+
end
|
290
|
+
end
|
291
|
+
|
292
|
+
# gzip data
|
293
|
+
def gzip(data)
|
294
|
+
gz = StringIO.new
|
295
|
+
gz.set_encoding("BINARY")
|
296
|
+
z = Zlib::GzipWriter.new(gz)
|
297
|
+
z.write(data)
|
298
|
+
z.close
|
299
|
+
gz.string
|
300
|
+
end
|
301
|
+
|
302
|
+
def convert_mapping(mapping, event)
|
303
|
+
if mapping.is_a?(Hash)
|
304
|
+
mapping.reduce({}) do |acc, kv|
|
305
|
+
k, v = kv
|
306
|
+
acc[k] = convert_mapping(v, event)
|
307
|
+
acc
|
308
|
+
end
|
309
|
+
elsif mapping.is_a?(Array)
|
310
|
+
mapping.map { |elem| convert_mapping(elem, event) }
|
311
|
+
else
|
312
|
+
event.sprintf(mapping)
|
313
|
+
end
|
314
|
+
end
|
315
|
+
|
316
|
+
def map_event(event)
|
317
|
+
if @mapping
|
318
|
+
convert_mapping(@mapping, event)
|
319
|
+
else
|
320
|
+
event.to_hash
|
321
|
+
end
|
322
|
+
end
|
323
|
+
|
324
|
+
def event_headers(event)
|
325
|
+
custom_headers(event) || {}
|
326
|
+
end
|
327
|
+
|
328
|
+
def custom_headers(event)
|
329
|
+
return nil unless @headers
|
330
|
+
|
331
|
+
@headers.reduce({}) do |acc,kv|
|
332
|
+
k,v = kv
|
333
|
+
acc[k] = event.sprintf(v)
|
334
|
+
acc
|
335
|
+
end
|
336
|
+
end
|
337
|
+
|
338
|
+
#TODO Extract this to a codec
|
339
|
+
def encode(hash)
|
340
|
+
return hash.collect do |key, value|
|
341
|
+
CGI.escape(key) + "=" + CGI.escape(value.to_s)
|
342
|
+
end.join("&")
|
343
|
+
end
|
344
|
+
end
|
@@ -0,0 +1,29 @@
|
|
1
|
+
Gem::Specification.new do |s|
|
2
|
+
s.name = 'logstash-output-splunk'
|
3
|
+
s.version = '0.0.1'
|
4
|
+
s.licenses = ['Apache License (2.0)']
|
5
|
+
s.summary = "Sends events to a Splunk HTTP Event Collector REST API endpoints"
|
6
|
+
s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program"
|
7
|
+
s.authors = ["Jian Chen"]
|
8
|
+
s.email = 'jianchen2580@gmail.com'
|
9
|
+
# TODO
|
10
|
+
s.homepage = "http://www.elastic.co/guide/en/logstash/current/index.html"
|
11
|
+
s.require_paths = ["lib"]
|
12
|
+
|
13
|
+
# Files
|
14
|
+
s.files = Dir["lib/**/*","spec/**/*","*.gemspec","*.md","CONTRIBUTORS","Gemfile","LICENSE","NOTICE.TXT", "vendor/jar-dependencies/**/*.jar", "vendor/jar-dependencies/**/*.rb", "VERSION", "docs/**/*"]
|
15
|
+
|
16
|
+
# Tests
|
17
|
+
s.test_files = s.files.grep(%r{^(test|spec|features)/})
|
18
|
+
|
19
|
+
# Special flag to let us know this is actually a logstash plugin
|
20
|
+
s.metadata = { "logstash_plugin" => "true", "logstash_group" => "output" }
|
21
|
+
|
22
|
+
# Gem dependencies
|
23
|
+
s.add_runtime_dependency "logstash-core-plugin-api", ">= 1.60", "<= 2.99"
|
24
|
+
s.add_runtime_dependency "logstash-mixin-http_client", ">= 6.0.0", "< 8.0.0"
|
25
|
+
|
26
|
+
s.add_development_dependency 'logstash-devutils'
|
27
|
+
s.add_development_dependency 'sinatra'
|
28
|
+
s.add_development_dependency 'webrick'
|
29
|
+
end
|
@@ -0,0 +1,364 @@
|
|
1
|
+
require "logstash/devutils/rspec/spec_helper"
|
2
|
+
require "logstash/outputs/http"
|
3
|
+
require "logstash/codecs/plain"
|
4
|
+
require "thread"
|
5
|
+
require "sinatra"
|
6
|
+
require_relative "../supports/compressed_requests"
|
7
|
+
|
8
|
+
PORT = rand(65535-1024) + 1025
|
9
|
+
|
10
|
+
class LogStash::Outputs::Splunk
|
11
|
+
attr_writer :agent
|
12
|
+
attr_reader :request_tokens
|
13
|
+
end
|
14
|
+
|
15
|
+
# note that Sinatra startup and shutdown messages are directly logged to stderr so
|
16
|
+
# it is not really possible to disable them without reopening stderr which is not advisable.
|
17
|
+
#
|
18
|
+
# == Sinatra (v1.4.6) has taken the stage on 51572 for development with backup from WEBrick
|
19
|
+
# == Sinatra has ended his set (crowd applauds)
|
20
|
+
#
|
21
|
+
class TestApp < Sinatra::Base
|
22
|
+
# on the fly uncompress gzip content
|
23
|
+
use CompressedRequests
|
24
|
+
|
25
|
+
# disable WEBrick logging
|
26
|
+
def self.server_settings
|
27
|
+
{ :AccessLog => [], :Logger => WEBrick::BasicLog::new(nil, WEBrick::BasicLog::FATAL) }
|
28
|
+
end
|
29
|
+
|
30
|
+
def self.multiroute(methods, path, &block)
|
31
|
+
methods.each do |method|
|
32
|
+
method.to_sym
|
33
|
+
self.send method, path, &block
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
def self.last_request=(request)
|
38
|
+
@last_request = request
|
39
|
+
end
|
40
|
+
|
41
|
+
def self.last_request
|
42
|
+
@last_request
|
43
|
+
end
|
44
|
+
|
45
|
+
def self.retry_fail_count=(count)
|
46
|
+
@retry_fail_count = count
|
47
|
+
end
|
48
|
+
|
49
|
+
def self.retry_fail_count()
|
50
|
+
@retry_fail_count || 2
|
51
|
+
end
|
52
|
+
|
53
|
+
multiroute(%w(get post put patch delete), "/good") do
|
54
|
+
self.class.last_request = request
|
55
|
+
[200, "YUP"]
|
56
|
+
end
|
57
|
+
|
58
|
+
multiroute(%w(get post put patch delete), "/bad") do
|
59
|
+
self.class.last_request = request
|
60
|
+
[400, "YUP"]
|
61
|
+
end
|
62
|
+
|
63
|
+
multiroute(%w(get post put patch delete), "/retry") do
|
64
|
+
self.class.last_request = request
|
65
|
+
|
66
|
+
if self.class.retry_fail_count > 0
|
67
|
+
self.class.retry_fail_count -= 1
|
68
|
+
[429, "Will succeed in #{self.class.retry_fail_count}"]
|
69
|
+
else
|
70
|
+
[200, "Done Retrying"]
|
71
|
+
end
|
72
|
+
end
|
73
|
+
end
|
74
|
+
|
75
|
+
RSpec.configure do |config|
|
76
|
+
#http://stackoverflow.com/questions/6557079/start-and-call-ruby-http-server-in-the-same-script
|
77
|
+
def sinatra_run_wait(app, opts)
|
78
|
+
queue = Queue.new
|
79
|
+
|
80
|
+
t = java.lang.Thread.new(
|
81
|
+
proc do
|
82
|
+
begin
|
83
|
+
app.run!(opts) do |server|
|
84
|
+
queue.push("started")
|
85
|
+
end
|
86
|
+
rescue => e
|
87
|
+
puts "Error in webserver thread #{e}"
|
88
|
+
# ignore
|
89
|
+
end
|
90
|
+
end
|
91
|
+
)
|
92
|
+
t.daemon = true
|
93
|
+
t.start
|
94
|
+
queue.pop # blocks until the run! callback runs
|
95
|
+
end
|
96
|
+
|
97
|
+
config.before(:suite) do
|
98
|
+
sinatra_run_wait(TestApp, :port => PORT, :server => 'webrick')
|
99
|
+
puts "Test webserver on port #{PORT}"
|
100
|
+
end
|
101
|
+
end
|
102
|
+
|
103
|
+
describe LogStash::Outputs::Splunk do
|
104
|
+
# Wait for the async request to finish in this spinlock
|
105
|
+
# Requires pool_max to be 1
|
106
|
+
|
107
|
+
let(:port) { PORT }
|
108
|
+
let(:event) {
|
109
|
+
LogStash::Event.new({"message" => "hi"})
|
110
|
+
}
|
111
|
+
let(:url) { "http://localhost:#{port}/good" }
|
112
|
+
let(:method) { "post" }
|
113
|
+
|
114
|
+
shared_examples("verb behavior") do |method|
|
115
|
+
let(:verb_behavior_config) { {"url" => url, "http_method" => method, "pool_max" => 1} }
|
116
|
+
subject { LogStash::Outputs::Splunk.new(verb_behavior_config) }
|
117
|
+
|
118
|
+
let(:expected_method) { method.clone.to_sym }
|
119
|
+
let(:client) { subject.client }
|
120
|
+
|
121
|
+
before do
|
122
|
+
subject.register
|
123
|
+
allow(client).to receive(:send).
|
124
|
+
with(expected_method, url, anything).
|
125
|
+
and_call_original
|
126
|
+
allow(subject).to receive(:log_failure).with(any_args)
|
127
|
+
allow(subject).to receive(:log_retryable_response).with(any_args)
|
128
|
+
end
|
129
|
+
|
130
|
+
context 'sending no events' do
|
131
|
+
it 'should not block the pipeline' do
|
132
|
+
subject.multi_receive([])
|
133
|
+
end
|
134
|
+
end
|
135
|
+
|
136
|
+
context "performing a get" do
|
137
|
+
describe "invoking the request" do
|
138
|
+
before do
|
139
|
+
subject.multi_receive([event])
|
140
|
+
end
|
141
|
+
|
142
|
+
it "should execute the request" do
|
143
|
+
expect(client).to have_received(:send).
|
144
|
+
with(expected_method, url, anything)
|
145
|
+
end
|
146
|
+
end
|
147
|
+
|
148
|
+
context "with passing requests" do
|
149
|
+
before do
|
150
|
+
subject.multi_receive([event])
|
151
|
+
end
|
152
|
+
|
153
|
+
it "should not log a failure" do
|
154
|
+
expect(subject).not_to have_received(:log_failure).with(any_args)
|
155
|
+
end
|
156
|
+
end
|
157
|
+
|
158
|
+
context "with failing requests" do
|
159
|
+
let(:url) { "http://localhost:#{port}/bad"}
|
160
|
+
|
161
|
+
before do
|
162
|
+
subject.multi_receive([event])
|
163
|
+
end
|
164
|
+
|
165
|
+
it "should log a failure" do
|
166
|
+
expect(subject).to have_received(:log_failure).with(any_args)
|
167
|
+
end
|
168
|
+
end
|
169
|
+
|
170
|
+
context "with ignorable failing requests" do
|
171
|
+
let(:url) { "http://localhost:#{port}/bad"}
|
172
|
+
let(:verb_behavior_config) { super.merge("ignorable_codes" => [400]) }
|
173
|
+
|
174
|
+
before do
|
175
|
+
subject.multi_receive([event])
|
176
|
+
end
|
177
|
+
|
178
|
+
it "should log a failure" do
|
179
|
+
expect(subject).not_to have_received(:log_failure).with(any_args)
|
180
|
+
end
|
181
|
+
end
|
182
|
+
|
183
|
+
context "with retryable failing requests" do
|
184
|
+
let(:url) { "http://localhost:#{port}/retry"}
|
185
|
+
|
186
|
+
before do
|
187
|
+
TestApp.retry_fail_count=2
|
188
|
+
allow(subject).to receive(:send_event).and_call_original
|
189
|
+
subject.multi_receive([event])
|
190
|
+
end
|
191
|
+
|
192
|
+
it "should log a retryable response 2 times" do
|
193
|
+
expect(subject).to have_received(:log_retryable_response).with(any_args).twice
|
194
|
+
end
|
195
|
+
|
196
|
+
it "should make three total requests" do
|
197
|
+
expect(subject).to have_received(:send_event).exactly(3).times
|
198
|
+
end
|
199
|
+
end
|
200
|
+
|
201
|
+
end
|
202
|
+
end
|
203
|
+
|
204
|
+
LogStash::Outputs::Splunk::VALID_METHODS.each do |method|
|
205
|
+
context "when using '#{method}'" do
|
206
|
+
include_examples("verb behavior", method)
|
207
|
+
end
|
208
|
+
end
|
209
|
+
|
210
|
+
shared_examples("a received event") do
|
211
|
+
before do
|
212
|
+
TestApp.last_request = nil
|
213
|
+
end
|
214
|
+
|
215
|
+
let(:events) { [event] }
|
216
|
+
|
217
|
+
describe "with a good code" do
|
218
|
+
before do
|
219
|
+
subject.multi_receive(events)
|
220
|
+
end
|
221
|
+
|
222
|
+
let(:last_request) { TestApp.last_request }
|
223
|
+
let(:body) { last_request.body.read }
|
224
|
+
let(:content_type) { last_request.env["CONTENT_TYPE"] }
|
225
|
+
|
226
|
+
it "should receive the request" do
|
227
|
+
expect(last_request).to be_truthy
|
228
|
+
end
|
229
|
+
|
230
|
+
it "should receive the event as a hash" do
|
231
|
+
expect(body).to eql(expected_body)
|
232
|
+
end
|
233
|
+
|
234
|
+
it "should have the correct content type" do
|
235
|
+
expect(content_type).to eql(expected_content_type)
|
236
|
+
end
|
237
|
+
end
|
238
|
+
|
239
|
+
describe "a retryable code" do
|
240
|
+
let(:url) { "http://localhost:#{port}/retry" }
|
241
|
+
|
242
|
+
before do
|
243
|
+
TestApp.retry_fail_count=2
|
244
|
+
allow(subject).to receive(:send_event).and_call_original
|
245
|
+
allow(subject).to receive(:log_retryable_response)
|
246
|
+
subject.multi_receive(events)
|
247
|
+
end
|
248
|
+
|
249
|
+
it "should retry" do
|
250
|
+
expect(subject).to have_received(:log_retryable_response).with(any_args).twice
|
251
|
+
end
|
252
|
+
end
|
253
|
+
end
|
254
|
+
|
255
|
+
shared_examples "integration tests" do
|
256
|
+
let(:base_config) { {} }
|
257
|
+
let(:url) { "http://localhost:#{port}/good" }
|
258
|
+
let(:event) {
|
259
|
+
LogStash::Event.new("foo" => "bar", "baz" => "bot", "user" => "McBest")
|
260
|
+
}
|
261
|
+
|
262
|
+
subject { LogStash::Outputs::Http.new(config) }
|
263
|
+
|
264
|
+
before do
|
265
|
+
subject.register
|
266
|
+
end
|
267
|
+
|
268
|
+
describe "sending with the default (JSON) config" do
|
269
|
+
let(:config) {
|
270
|
+
base_config.merge({"url" => url, "http_method" => "post", "pool_max" => 1})
|
271
|
+
}
|
272
|
+
let(:expected_body) { LogStash::Json.dump(event) }
|
273
|
+
let(:expected_content_type) { "application/json" }
|
274
|
+
|
275
|
+
include_examples("a received event")
|
276
|
+
end
|
277
|
+
|
278
|
+
describe "sending the batch as JSON" do
|
279
|
+
let(:config) do
|
280
|
+
base_config.merge({"url" => url, "http_method" => "post", "format" => "json_batch"})
|
281
|
+
end
|
282
|
+
|
283
|
+
let(:expected_body) { ::LogStash::Json.dump events }
|
284
|
+
let(:events) { [::LogStash::Event.new("a" => 1), ::LogStash::Event.new("b" => 2)]}
|
285
|
+
let(:expected_content_type) { "application/json" }
|
286
|
+
|
287
|
+
include_examples("a received event")
|
288
|
+
|
289
|
+
end
|
290
|
+
|
291
|
+
describe "sending the event as a form" do
|
292
|
+
let(:config) {
|
293
|
+
base_config.merge({"url" => url, "http_method" => "post", "pool_max" => 1, "format" => "form"})
|
294
|
+
}
|
295
|
+
let(:expected_body) { subject.send(:encode, event.to_hash) }
|
296
|
+
let(:expected_content_type) { "application/x-www-form-urlencoded" }
|
297
|
+
|
298
|
+
include_examples("a received event")
|
299
|
+
end
|
300
|
+
|
301
|
+
describe "sending the event as a message" do
|
302
|
+
let(:config) {
|
303
|
+
base_config.merge({"url" => url, "http_method" => "post", "pool_max" => 1, "format" => "message", "message" => "%{foo} AND %{baz}"})
|
304
|
+
}
|
305
|
+
let(:expected_body) { "#{event.get("foo")} AND #{event.get("baz")}" }
|
306
|
+
let(:expected_content_type) { "text/plain" }
|
307
|
+
|
308
|
+
include_examples("a received event")
|
309
|
+
end
|
310
|
+
|
311
|
+
describe "sending a mapped event" do
|
312
|
+
let(:config) {
|
313
|
+
base_config.merge({"url" => url, "http_method" => "post", "pool_max" => 1, "mapping" => {"blah" => "X %{foo}"} })
|
314
|
+
}
|
315
|
+
let(:expected_body) { LogStash::Json.dump("blah" => "X #{event.get("foo")}") }
|
316
|
+
let(:expected_content_type) { "application/json" }
|
317
|
+
|
318
|
+
include_examples("a received event")
|
319
|
+
end
|
320
|
+
|
321
|
+
describe "sending a mapped, nested event" do
|
322
|
+
let(:config) {
|
323
|
+
base_config.merge({
|
324
|
+
"url" => url,
|
325
|
+
"http_method" => "post",
|
326
|
+
"pool_max" => 1,
|
327
|
+
"mapping" => {
|
328
|
+
"host" => "X %{foo}",
|
329
|
+
"event" => {
|
330
|
+
"user" => "Y %{user}"
|
331
|
+
},
|
332
|
+
"arrayevent" => [{
|
333
|
+
"user" => "Z %{user}"
|
334
|
+
}]
|
335
|
+
}
|
336
|
+
})
|
337
|
+
}
|
338
|
+
let(:expected_body) {
|
339
|
+
LogStash::Json.dump({
|
340
|
+
"host" => "X #{event.get("foo")}",
|
341
|
+
"event" => {
|
342
|
+
"user" => "Y #{event.get("user")}"
|
343
|
+
},
|
344
|
+
"arrayevent" => [{
|
345
|
+
"user" => "Z #{event.get("user")}"
|
346
|
+
}]
|
347
|
+
})
|
348
|
+
}
|
349
|
+
let(:expected_content_type) { "application/json" }
|
350
|
+
|
351
|
+
include_examples("a received event")
|
352
|
+
end
|
353
|
+
end
|
354
|
+
|
355
|
+
describe "integration test without gzip compression" do
|
356
|
+
include_examples("integration tests")
|
357
|
+
end
|
358
|
+
|
359
|
+
describe "integration test with gzip compression" do
|
360
|
+
include_examples("integration tests") do
|
361
|
+
let(:base_config) { { "http_compression" => true } }
|
362
|
+
end
|
363
|
+
end
|
364
|
+
end
|