logstash-input-okta_enterprise 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +2 -0
- data/CONTRIBUTORS +10 -0
- data/DEVELOPER.md +2 -0
- data/Gemfile +3 -0
- data/LICENSE +11 -0
- data/README.md +86 -0
- data/lib/logstash/inputs/okta_enterprise.rb +619 -0
- data/logstash-input-okta_enterprise.gemspec +33 -0
- data/spec/inputs/okta_enterprise_spec.rb +498 -0
- metadata +207 -0
@@ -0,0 +1,33 @@
|
|
1
|
+
Gem::Specification.new do |s|
|
2
|
+
s.name = 'logstash-input-okta_enterprise'
|
3
|
+
s.version = '0.1.0'
|
4
|
+
s.licenses = ['Apache License (2.0)']
|
5
|
+
s.summary = 'This plugin fetches log events from Okta'
|
6
|
+
s.description = 'This plugin fetches log events from Okta'
|
7
|
+
s.homepage = 'https://github.com/SecurityRiskAdvisors/logstash-input-okta_enterprise'
|
8
|
+
s.authors = ['Security Risk Advisors']
|
9
|
+
s.email = 'security@securityriskadvisors.com'
|
10
|
+
s.require_paths = ['lib']
|
11
|
+
|
12
|
+
# Files
|
13
|
+
s.files = Dir['lib/**/*','spec/**/*','vendor/**/*','*.gemspec','*.md','CONTRIBUTORS','Gemfile','LICENSE','NOTICE.TXT']
|
14
|
+
# Tests
|
15
|
+
s.test_files = s.files.grep(%r{^(test|spec|features)/})
|
16
|
+
|
17
|
+
# Special flag to let us know this is actually a logstash plugin
|
18
|
+
s.metadata = { "logstash_plugin" => "true", "logstash_group" => "input" }
|
19
|
+
|
20
|
+
# Gem dependencies
|
21
|
+
#s.add_runtime_dependency "logstash-core-plugin-api", ">= 1.60", "<= 2.99"
|
22
|
+
s.add_runtime_dependency "logstash-core", ">= 2.0.0", "< 3.0.0"
|
23
|
+
s.add_runtime_dependency 'logstash-codec-plain'
|
24
|
+
s.add_runtime_dependency 'stud', '>= 0.0.22'
|
25
|
+
s.add_runtime_dependency 'logstash-mixin-http_client', ">=2.2.4", "< 5.0.0"
|
26
|
+
s.add_runtime_dependency 'manticore', ">=0.6.1"
|
27
|
+
s.add_runtime_dependency 'rufus-scheduler', "~>3.0.9"
|
28
|
+
|
29
|
+
s.add_development_dependency 'logstash-devutils'
|
30
|
+
s.add_development_dependency 'logstash-codec-json'
|
31
|
+
s.add_development_dependency 'flores'
|
32
|
+
s.add_development_dependency 'timecop'
|
33
|
+
end
|
@@ -0,0 +1,498 @@
|
|
1
|
+
require "logstash/devutils/rspec/spec_helper"
|
2
|
+
require 'logstash/inputs/okta_enterprise'
|
3
|
+
require 'flores/random'
|
4
|
+
require "timecop"
|
5
|
+
require "base64"
|
6
|
+
require "rspec/wait"
|
7
|
+
|
8
|
+
describe LogStash::Inputs::OktaEnterprise do
|
9
|
+
let(:queue) { Queue.new }
|
10
|
+
let(:default_schedule) {
|
11
|
+
{ "every" => "30s" }
|
12
|
+
}
|
13
|
+
let(:default_chunk_size) { 1000 }
|
14
|
+
let(:default_auth_token_env) { "asdflkjasdflkjasdf932r098-asdf" }
|
15
|
+
let(:default_url) { "http://localhost:38432/" }
|
16
|
+
let(:metadata_target) { "_http_poller_metadata" }
|
17
|
+
|
18
|
+
let(:default_opts) {
|
19
|
+
{
|
20
|
+
"schedule" => default_schedule,
|
21
|
+
"chunk_size" => default_chunk_size,
|
22
|
+
"url" => default_url,
|
23
|
+
"auth_token_env" => default_auth_token_env,
|
24
|
+
"metadata_target" => metadata_target,
|
25
|
+
"codec" => "json"
|
26
|
+
}
|
27
|
+
}
|
28
|
+
let(:klass) { LogStash::Inputs::OktaEnterprise }
|
29
|
+
|
30
|
+
describe "config" do
|
31
|
+
shared_examples "configuration errors" do
|
32
|
+
it "raises an exception" do
|
33
|
+
expect {subject.register}.to raise_exception(LogStash::ConfigurationError)
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
before(:each) do
|
38
|
+
subject = klass.new(opts)
|
39
|
+
end
|
40
|
+
|
41
|
+
context "The start date is not in the correct format" do
|
42
|
+
let(:opts) { default_opts.merge({"start_date" => "1234567890"}) }
|
43
|
+
include_examples("configuration errors")
|
44
|
+
end
|
45
|
+
|
46
|
+
context "Both start date and filter are provided" do
|
47
|
+
let(:opts) { default_opts.merge({"start_date" => "2013-01-01T12:00:00.000-07:00","filter" => "this is a filter"}) }
|
48
|
+
include_examples("configuration errors")
|
49
|
+
end
|
50
|
+
|
51
|
+
context "auth_token management" do
|
52
|
+
let(:auth_file_opts) {
|
53
|
+
auth_file_opts = default_opts.merge({"auth_token_file" => "/dev/null"}).clone
|
54
|
+
auth_file_opts.delete("auth_token_env")
|
55
|
+
auth_file_opts
|
56
|
+
}
|
57
|
+
|
58
|
+
context "both auth_token env and file are provided" do
|
59
|
+
let(:opts) {default_opts.merge({"auth_token_file" => "/dev/null"})}
|
60
|
+
include_examples("configuration errors")
|
61
|
+
end
|
62
|
+
|
63
|
+
context "neither auth_token env nor file are provided" do
|
64
|
+
let(:opts) {
|
65
|
+
opts = default_opts.clone
|
66
|
+
opts.delete("auth_token_env")
|
67
|
+
opts
|
68
|
+
}
|
69
|
+
include_examples("configuration errors")
|
70
|
+
end
|
71
|
+
|
72
|
+
context "auth_token_file is too large" do
|
73
|
+
let(:opts) {auth_file_opts}
|
74
|
+
before {allow(File).to receive(:size).with(opts["auth_token_file"]) { 1 * 2**11 }}
|
75
|
+
include_examples("configuration errors")
|
76
|
+
end
|
77
|
+
|
78
|
+
context "auth_token_file could not be read" do
|
79
|
+
let(:opts) {auth_file_opts}
|
80
|
+
before {allow(File).to receive(:read).with(opts["auth_token_file"]) { raise IOError }}
|
81
|
+
include_examples("configuration errors")
|
82
|
+
end
|
83
|
+
|
84
|
+
context "auth_token_env with invalid characters" do
|
85
|
+
let(:opts) {default_opts.merge({"auth_token_env" => "%$%$%$%$%$"})}
|
86
|
+
include_examples("configuration errors")
|
87
|
+
end
|
88
|
+
end
|
89
|
+
end
|
90
|
+
|
91
|
+
describe "instances" do
|
92
|
+
subject { klass.new(default_opts) }
|
93
|
+
|
94
|
+
before do
|
95
|
+
subject.register
|
96
|
+
end
|
97
|
+
|
98
|
+
describe "#run" do
|
99
|
+
it "should setup a scheduler" do
|
100
|
+
runner = Thread.new do
|
101
|
+
subject.run(double("queue"))
|
102
|
+
expect(subject.instance_variable_get("@scheduler")).to be_a_kind_of(Rufus::Scheduler)
|
103
|
+
end
|
104
|
+
runner.kill
|
105
|
+
runner.join
|
106
|
+
end
|
107
|
+
end
|
108
|
+
|
109
|
+
describe "#run_once" do
|
110
|
+
it "should issue an async request for each url" do
|
111
|
+
expect(subject).to receive(:request_async).with(queue).once
|
112
|
+
|
113
|
+
subject.send(:run_once, queue) # :run_once is a private method
|
114
|
+
end
|
115
|
+
end
|
116
|
+
end
|
117
|
+
|
118
|
+
describe "scheduler configuration" do
|
119
|
+
before do
|
120
|
+
instance.register
|
121
|
+
end
|
122
|
+
|
123
|
+
context "given 'cron' expression" do
|
124
|
+
let(:opts) { default_opts.merge("schedule" => {"cron" => "* * * * * UTC"}) }
|
125
|
+
let(:instance) { klass.new(opts) }
|
126
|
+
it "should run at the schedule" do
|
127
|
+
Timecop.travel(Time.new(2000,1,1,0,0,0,'+00:00'))
|
128
|
+
Timecop.scale(60)
|
129
|
+
queue = Queue.new
|
130
|
+
runner = Thread.new do
|
131
|
+
instance.run(queue)
|
132
|
+
end
|
133
|
+
sleep 3
|
134
|
+
instance.stop
|
135
|
+
runner.kill
|
136
|
+
runner.join
|
137
|
+
expect(queue.size).to eq(2)
|
138
|
+
Timecop.return
|
139
|
+
end
|
140
|
+
end
|
141
|
+
|
142
|
+
context "given 'at' expression" do
|
143
|
+
let(:opts) { default_opts.merge("schedule" => {"at" => "2000-01-01 00:05:00 +0000"}) }
|
144
|
+
let(:instance) { klass.new(opts) }
|
145
|
+
it "should run at the schedule" do
|
146
|
+
Timecop.travel(Time.new(2000,1,1,0,0,0,'+00:00'))
|
147
|
+
Timecop.scale(60 * 5)
|
148
|
+
queue = Queue.new
|
149
|
+
runner = Thread.new do
|
150
|
+
instance.run(queue)
|
151
|
+
end
|
152
|
+
sleep 2
|
153
|
+
instance.stop
|
154
|
+
runner.kill
|
155
|
+
runner.join
|
156
|
+
expect(queue.size).to eq(1)
|
157
|
+
Timecop.return
|
158
|
+
end
|
159
|
+
end
|
160
|
+
|
161
|
+
context "given 'every' expression" do
|
162
|
+
let(:opts) { default_opts.merge("schedule" => {"every" => "2s"}) }
|
163
|
+
let(:instance) { klass.new(opts) }
|
164
|
+
it "should run at the schedule" do
|
165
|
+
queue = Queue.new
|
166
|
+
runner = Thread.new do
|
167
|
+
instance.run(queue)
|
168
|
+
end
|
169
|
+
#T 0123456
|
170
|
+
#events x x x x
|
171
|
+
#expects 3 events at T=5
|
172
|
+
sleep 5
|
173
|
+
instance.stop
|
174
|
+
runner.kill
|
175
|
+
runner.join
|
176
|
+
expect(queue.size).to eq(3)
|
177
|
+
end
|
178
|
+
end
|
179
|
+
|
180
|
+
context "given 'in' expression" do
|
181
|
+
let(:opts) { default_opts.merge("schedule" => {"in" => "2s"}) }
|
182
|
+
let(:instance) { klass.new(opts) }
|
183
|
+
it "should run at the schedule" do
|
184
|
+
queue = Queue.new
|
185
|
+
runner = Thread.new do
|
186
|
+
instance.run(queue)
|
187
|
+
end
|
188
|
+
sleep 3
|
189
|
+
instance.stop
|
190
|
+
runner.kill
|
191
|
+
runner.join
|
192
|
+
expect(queue.size).to eq(1)
|
193
|
+
end
|
194
|
+
end
|
195
|
+
end
|
196
|
+
|
197
|
+
describe "events" do
|
198
|
+
shared_examples("matching metadata") {
|
199
|
+
let(:metadata) { event.get(metadata_target) }
|
200
|
+
let(:options) { defined?(settings) ? settings : opts }
|
201
|
+
# The URL gets modified b/c of the limit that is placed on the API
|
202
|
+
let(:metadata_url) { "#{options["url"]}?limit=#{options["chunk_size"]}" }
|
203
|
+
it "should have the correct request url" do
|
204
|
+
expect(metadata["url"].to_s).to eql(metadata_url)
|
205
|
+
end
|
206
|
+
|
207
|
+
it "should have the correct code" do
|
208
|
+
expect(metadata["code"]).to eql(code)
|
209
|
+
end
|
210
|
+
}
|
211
|
+
|
212
|
+
shared_examples "unprocessable_requests" do
|
213
|
+
let(:poller) { LogStash::Inputs::OktaEnterprise.new(settings) }
|
214
|
+
subject(:event) {
|
215
|
+
poller.send(:run_once, queue)
|
216
|
+
queue.pop(true)
|
217
|
+
}
|
218
|
+
|
219
|
+
before do
|
220
|
+
poller.register
|
221
|
+
allow(poller).to receive(:handle_failure).and_call_original
|
222
|
+
allow(poller).to receive(:handle_success)
|
223
|
+
event # materialize the subject
|
224
|
+
end
|
225
|
+
|
226
|
+
it "should enqueue a message" do
|
227
|
+
expect(event).to be_a(LogStash::Event)
|
228
|
+
end
|
229
|
+
|
230
|
+
it "should enqueue a message with 'http_request_failure' set" do
|
231
|
+
expect(event.get("http_request_failure")).to be_a(Hash)
|
232
|
+
end
|
233
|
+
|
234
|
+
it "should tag the event with '_http_request_failure'" do
|
235
|
+
expect(event.get("tags")).to include('_http_request_failure')
|
236
|
+
end
|
237
|
+
|
238
|
+
it "should invoke handle failure exactly once" do
|
239
|
+
expect(poller).to have_received(:handle_failure).once
|
240
|
+
end
|
241
|
+
|
242
|
+
it "should not invoke handle success at all" do
|
243
|
+
expect(poller).not_to have_received(:handle_success)
|
244
|
+
end
|
245
|
+
|
246
|
+
include_examples("matching metadata")
|
247
|
+
|
248
|
+
end
|
249
|
+
|
250
|
+
context "with a non responsive server" do
|
251
|
+
context "due to a non-existent host" do # Fail with handlers
|
252
|
+
let(:url) { "http://thouetnhoeu89ueoueohtueohtneuohn" }
|
253
|
+
let(:code) { nil } # no response expected
|
254
|
+
|
255
|
+
let(:settings) { default_opts.merge("url" => url) }
|
256
|
+
|
257
|
+
include_examples("unprocessable_requests")
|
258
|
+
end
|
259
|
+
|
260
|
+
context "due to a bogus port number" do # fail with return?
|
261
|
+
let(:invalid_port) { Flores::Random.integer(65536..1000000) }
|
262
|
+
|
263
|
+
let(:url) { "http://127.0.0.1:#{invalid_port}" }
|
264
|
+
let(:settings) { default_opts.merge("url" => url) }
|
265
|
+
let(:code) { nil } # No response expected
|
266
|
+
|
267
|
+
include_examples("unprocessable_requests")
|
268
|
+
end
|
269
|
+
end
|
270
|
+
|
271
|
+
describe "a valid request and decoded response" do
|
272
|
+
let(:payload) {{"a" => 2, "hello" => ["a", "b", "c"]}}
|
273
|
+
let(:response_body) { LogStash::Json.dump(payload) }
|
274
|
+
let(:code) { 200 }
|
275
|
+
let(:url) { default_url }
|
276
|
+
|
277
|
+
let(:opts) { default_opts }
|
278
|
+
let(:instance) {
|
279
|
+
klass.new(opts)
|
280
|
+
}
|
281
|
+
|
282
|
+
subject(:event) {
|
283
|
+
queue.pop(true)
|
284
|
+
}
|
285
|
+
|
286
|
+
before do
|
287
|
+
instance.register
|
288
|
+
allow(instance).to receive(:decorate)
|
289
|
+
instance.client.stub(%r{#{url}.*},
|
290
|
+
:body => response_body,
|
291
|
+
:code => code
|
292
|
+
)
|
293
|
+
|
294
|
+
instance.send(:run_once, queue)
|
295
|
+
end
|
296
|
+
|
297
|
+
it "should have a matching message" do
|
298
|
+
expect(event.to_hash).to include(payload)
|
299
|
+
end
|
300
|
+
|
301
|
+
it "should decorate the event" do
|
302
|
+
expect(instance).to have_received(:decorate).once
|
303
|
+
end
|
304
|
+
|
305
|
+
include_examples("matching metadata")
|
306
|
+
|
307
|
+
context "with an empty body" do
|
308
|
+
let(:response_body) { "" }
|
309
|
+
it "should return an empty event" do
|
310
|
+
instance.send(:run_once, queue)
|
311
|
+
expect(event.get("[_http_poller_metadata][response_headers][content-length]")).to eql("0")
|
312
|
+
end
|
313
|
+
end
|
314
|
+
|
315
|
+
context "with metadata omitted" do
|
316
|
+
let(:opts) {
|
317
|
+
opts = default_opts.clone
|
318
|
+
opts.delete("metadata_target")
|
319
|
+
opts
|
320
|
+
}
|
321
|
+
|
322
|
+
it "should not have any metadata on the event" do
|
323
|
+
instance.send(:run_once, queue)
|
324
|
+
expect(event.get(metadata_target)).to be_nil
|
325
|
+
end
|
326
|
+
end
|
327
|
+
|
328
|
+
context "with a specified target" do
|
329
|
+
let(:target) { "mytarget" }
|
330
|
+
let(:opts) { default_opts.merge("target" => target) }
|
331
|
+
|
332
|
+
it "should store the event info in the target" do
|
333
|
+
# When events go through the pipeline they are java-ified
|
334
|
+
# this normalizes the payload to java types
|
335
|
+
payload_normalized = LogStash::Json.load(LogStash::Json.dump(payload))
|
336
|
+
expect(event.get(target)).to include(payload_normalized)
|
337
|
+
end
|
338
|
+
end
|
339
|
+
|
340
|
+
context "with non-200 HTTP response codes" do
|
341
|
+
let(:code) { |example| example.metadata[:http_code] }
|
342
|
+
let(:response_body) { "{}" }
|
343
|
+
|
344
|
+
it "responds to a 500 code", :http_code => 500 do
|
345
|
+
instance.send(:run_once, queue)
|
346
|
+
expect(event.to_hash).to include({"HTTP-Code" => 500})
|
347
|
+
expect(event.get("tags")).to include('_okta_response_error')
|
348
|
+
end
|
349
|
+
it "responds to a 401/Unauthorized code", :http_code => 401 do
|
350
|
+
instance.send(:run_once, queue)
|
351
|
+
expect(event.to_hash).to include({"HTTP-Code" => 401})
|
352
|
+
expect(event.get("tags")).to include('_okta_response_error')
|
353
|
+
end
|
354
|
+
it "responds to a 400 code", :http_code => 400 do
|
355
|
+
instance.send(:run_once, queue)
|
356
|
+
expect(event.to_hash).to include({"HTTP-Code" => 400})
|
357
|
+
expect(event.get("tags")).to include('_okta_response_error')
|
358
|
+
end
|
359
|
+
context "specific okta errors" do
|
360
|
+
let(:payload) { {:okta_error => "E0000031" } }
|
361
|
+
let(:response_body) { LogStash::Json.dump(payload) }
|
362
|
+
|
363
|
+
it "responds to a filter string error", :http_code => 400 do
|
364
|
+
expect(event.to_hash).to include({"HTTP-Code" => 400})
|
365
|
+
expect(event.to_hash).to include({"Okta-Plugin-Status" => "Filter string was not valid."})
|
366
|
+
expect(event.get("tags")).to include('_okta_response_error')
|
367
|
+
end
|
368
|
+
end
|
369
|
+
end
|
370
|
+
end
|
371
|
+
end
|
372
|
+
|
373
|
+
describe "stopping" do
|
374
|
+
let(:config) { default_opts }
|
375
|
+
it_behaves_like "an interruptible input plugin"
|
376
|
+
end
|
377
|
+
|
378
|
+
describe "state file" do
|
379
|
+
context "when being setup" do
|
380
|
+
|
381
|
+
let(:opts) { default_opts.merge({'state_file_base' => "/tmp/okta_test_"}) }
|
382
|
+
subject { klass.new(opts) }
|
383
|
+
|
384
|
+
let(:state_file_url) { "http://localhost:38432/?limit=1000&after=asdfasdf" }
|
385
|
+
let(:state_file_url_b64) { Base64.urlsafe_encode64(state_file_url) }
|
386
|
+
let(:test_url) { "#{opts["url"]}?limit=#{opts["chunk_size"]}" }
|
387
|
+
let(:state_file_url_changed) { "http://example.com/?limit=1000" }
|
388
|
+
let(:state_file_url_changed_b64) { Base64.urlsafe_encode64(state_file_url_changed) }
|
389
|
+
|
390
|
+
it "creates the file correctly" do
|
391
|
+
expect(File).to receive(:open).with("#{opts['state_file_base']}start","w") {}
|
392
|
+
subject.register
|
393
|
+
end
|
394
|
+
|
395
|
+
it "checks the file checks are running" do
|
396
|
+
#expect(File).to receive(:readable?).with(File.dirname(opts['state_file_base']))
|
397
|
+
allow(File).to receive(:readable?).with(File.dirname(opts['state_file_base'])) { false }
|
398
|
+
allow(File).to receive(:executable?).with(File.dirname(opts['state_file_base'])) { false }
|
399
|
+
allow(File).to receive(:writable?).with(File.dirname(opts['state_file_base'])) { false }
|
400
|
+
expect {subject.register}.to raise_exception(LogStash::ConfigurationError)
|
401
|
+
end
|
402
|
+
|
403
|
+
it "raises an error on file creation" do
|
404
|
+
allow(File).to receive(:open).with("#{opts['state_file_base']}start","w") { raise IOError }
|
405
|
+
expect {subject.register}.to raise_exception(LogStash::ConfigurationError)
|
406
|
+
end
|
407
|
+
|
408
|
+
it "raises exception when there is more than one file" do
|
409
|
+
allow(File).to receive(:open).with("#{opts['state_file_base']}start","w") {}
|
410
|
+
allow(Dir).to receive(:[]) { ["#{opts['state_file_base']}1","#{opts['state_file_base']}2"] }
|
411
|
+
expect {subject.register}.to raise_exception(LogStash::ConfigurationError)
|
412
|
+
end
|
413
|
+
|
414
|
+
it "creates a url based on the state file" do
|
415
|
+
allow(Dir).to receive(:[]) { [opts['state_file_base'] + state_file_url_b64] }
|
416
|
+
subject.register
|
417
|
+
expect(subject.instance_variable_get("@url")).to eql(state_file_url)
|
418
|
+
end
|
419
|
+
|
420
|
+
it "uses the URL from options when state file is in a start state" do
|
421
|
+
allow(Dir).to receive(:[]) { [opts['state_file_base'] + "start"] }
|
422
|
+
subject.register
|
423
|
+
expect(subject.instance_variable_get("@url").to_s).to eql(test_url)
|
424
|
+
end
|
425
|
+
|
426
|
+
it "raises an error when the config url is not part of the saved state" do
|
427
|
+
allow(Dir).to receive(:[]) { [opts['state_file_base'] + state_file_url_changed_b64] }
|
428
|
+
expect {subject.register}.to raise_exception(LogStash::ConfigurationError)
|
429
|
+
end
|
430
|
+
end
|
431
|
+
|
432
|
+
context "when running" do
|
433
|
+
let(:opts) { default_opts.merge({'state_file_base' => "/tmp/okta_test_"}) }
|
434
|
+
let(:instance) { klass.new(opts) }
|
435
|
+
|
436
|
+
let(:payload) { '[{"eventId":"tevIMARaEyiSzm3sm1gvfn8cA1479235809000"}]}]' }
|
437
|
+
let(:response_body) { LogStash::Json.dump(payload) }
|
438
|
+
|
439
|
+
let(:url_initial) { "http://localhost:38432/events?after=1" }
|
440
|
+
let(:url_initial_b64) { Base64.urlsafe_encode64(url_initial) }
|
441
|
+
let(:url_final) { "http://localhost:38432/events?after=2" }
|
442
|
+
let(:url_final_b64) { Base64.urlsafe_encode64(url_final) }
|
443
|
+
let(:headers) { {"link" => ["<#{url_initial}>; rel=\"self\"", "<#{url_final}>; rel=\"next\""]} }
|
444
|
+
let(:code) { 200 }
|
445
|
+
|
446
|
+
before(:each) do |example|
|
447
|
+
allow(Dir).to receive(:[]) { [opts['state_file_base'] + url_initial_b64] }
|
448
|
+
|
449
|
+
instance.register
|
450
|
+
instance.client.stub( url_initial,
|
451
|
+
:headers => headers,
|
452
|
+
:body => response_body,
|
453
|
+
:code => code )
|
454
|
+
|
455
|
+
allow(instance).to receive(:handle_failure) { instance.instance_variable_set(:@continue,false) }
|
456
|
+
end
|
457
|
+
|
458
|
+
it "updates the state file after data is fetched" do
|
459
|
+
expect(File).to receive(:rename).with(opts['state_file_base'] + url_initial_b64, opts['state_file_base'] + url_final_b64) { 0 }
|
460
|
+
instance.client.stub( url_final,
|
461
|
+
:headers => {:link => "<#{url_final}>; rel=\"self\""},
|
462
|
+
:body => "{}",
|
463
|
+
:code => code )
|
464
|
+
instance.send(:run_once, queue)
|
465
|
+
end
|
466
|
+
|
467
|
+
it "updates the state file after a failure" do
|
468
|
+
expect(File).to receive(:rename).with(opts['state_file_base'] + url_initial_b64, opts['state_file_base'] + url_final_b64) { 0 }
|
469
|
+
instance.send(:run_once, queue)
|
470
|
+
end
|
471
|
+
|
472
|
+
context "when stop is called" do
|
473
|
+
it "saves the state in the file" do
|
474
|
+
# We are still testing the same condition, file renaming.
|
475
|
+
expect(File).to receive(:rename).with(opts['state_file_base'] + url_initial_b64, opts['state_file_base'] + url_final_b64) { 0 }
|
476
|
+
|
477
|
+
# Force a sleep to make the thread hang in the failure condition.
|
478
|
+
allow(instance).to receive(:handle_failure) {
|
479
|
+
instance.instance_variable_set(:@continue,false)
|
480
|
+
sleep(30)
|
481
|
+
}
|
482
|
+
|
483
|
+
plugin_thread = Thread.new(instance,queue) { |subject, queue| instance.send(:run, queue) }
|
484
|
+
|
485
|
+
# Sleep for a bit to make sure things are started.
|
486
|
+
sleep 0.5
|
487
|
+
expect(plugin_thread).to be_alive
|
488
|
+
|
489
|
+
instance.do_stop
|
490
|
+
|
491
|
+
# As they say in the logstash thread, why 3?
|
492
|
+
# Because 2 is too short, and 4 is too long.
|
493
|
+
wait(3).for { plugin_thread }.to_not be_alive
|
494
|
+
end
|
495
|
+
end
|
496
|
+
end
|
497
|
+
end
|
498
|
+
end
|