logstash-integration-aws 7.1.1-java
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/CHANGELOG.PRE.MERGE.md +658 -0
- data/CHANGELOG.md +33 -0
- data/CONTRIBUTORS +40 -0
- data/Gemfile +11 -0
- data/LICENSE +202 -0
- data/NOTICE.TXT +5 -0
- data/README.md +205 -0
- data/VERSION +1 -0
- data/docs/codec-cloudfront.asciidoc +53 -0
- data/docs/codec-cloudtrail.asciidoc +45 -0
- data/docs/index.asciidoc +36 -0
- data/docs/input-cloudwatch.asciidoc +320 -0
- data/docs/input-s3.asciidoc +346 -0
- data/docs/input-sqs.asciidoc +287 -0
- data/docs/output-cloudwatch.asciidoc +321 -0
- data/docs/output-s3.asciidoc +442 -0
- data/docs/output-sns.asciidoc +166 -0
- data/docs/output-sqs.asciidoc +242 -0
- data/lib/logstash/codecs/cloudfront.rb +84 -0
- data/lib/logstash/codecs/cloudtrail.rb +47 -0
- data/lib/logstash/inputs/cloudwatch.rb +338 -0
- data/lib/logstash/inputs/s3.rb +466 -0
- data/lib/logstash/inputs/sqs.rb +196 -0
- data/lib/logstash/outputs/cloudwatch.rb +346 -0
- data/lib/logstash/outputs/s3/file_repository.rb +193 -0
- data/lib/logstash/outputs/s3/path_validator.rb +18 -0
- data/lib/logstash/outputs/s3/size_and_time_rotation_policy.rb +24 -0
- data/lib/logstash/outputs/s3/size_rotation_policy.rb +26 -0
- data/lib/logstash/outputs/s3/temporary_file.rb +114 -0
- data/lib/logstash/outputs/s3/temporary_file_factory.rb +126 -0
- data/lib/logstash/outputs/s3/time_rotation_policy.rb +26 -0
- data/lib/logstash/outputs/s3/uploader.rb +76 -0
- data/lib/logstash/outputs/s3/writable_directory_validator.rb +17 -0
- data/lib/logstash/outputs/s3/write_bucket_permission_validator.rb +60 -0
- data/lib/logstash/outputs/s3.rb +442 -0
- data/lib/logstash/outputs/sns.rb +133 -0
- data/lib/logstash/outputs/sqs.rb +167 -0
- data/lib/logstash/plugin_mixins/aws_config/generic.rb +54 -0
- data/lib/logstash/plugin_mixins/aws_config/v2.rb +93 -0
- data/lib/logstash/plugin_mixins/aws_config.rb +8 -0
- data/lib/logstash-integration-aws_jars.rb +4 -0
- data/lib/tasks/build.rake +15 -0
- data/logstash-integration-aws.gemspec +55 -0
- data/spec/codecs/cloudfront_spec.rb +92 -0
- data/spec/codecs/cloudtrail_spec.rb +56 -0
- data/spec/fixtures/aws_credentials_file_sample_test.yml +2 -0
- data/spec/fixtures/aws_temporary_credentials_file_sample_test.yml +3 -0
- data/spec/fixtures/cloudfront.log +4 -0
- data/spec/fixtures/compressed.log.gee.zip +0 -0
- data/spec/fixtures/compressed.log.gz +0 -0
- data/spec/fixtures/compressed.log.gzip +0 -0
- data/spec/fixtures/invalid_utf8.gbk.log +2 -0
- data/spec/fixtures/json.log +2 -0
- data/spec/fixtures/json_with_message.log +2 -0
- data/spec/fixtures/multiline.log +6 -0
- data/spec/fixtures/multiple_compressed_streams.gz +0 -0
- data/spec/fixtures/uncompressed.log +2 -0
- data/spec/inputs/cloudwatch_spec.rb +85 -0
- data/spec/inputs/s3_spec.rb +610 -0
- data/spec/inputs/sincedb_spec.rb +17 -0
- data/spec/inputs/sqs_spec.rb +324 -0
- data/spec/integration/cloudwatch_spec.rb +25 -0
- data/spec/integration/dynamic_prefix_spec.rb +92 -0
- data/spec/integration/gzip_file_spec.rb +62 -0
- data/spec/integration/gzip_size_rotation_spec.rb +63 -0
- data/spec/integration/outputs/sqs_spec.rb +98 -0
- data/spec/integration/restore_from_crash_spec.rb +133 -0
- data/spec/integration/s3_spec.rb +66 -0
- data/spec/integration/size_rotation_spec.rb +59 -0
- data/spec/integration/sqs_spec.rb +110 -0
- data/spec/integration/stress_test_spec.rb +60 -0
- data/spec/integration/time_based_rotation_with_constant_write_spec.rb +60 -0
- data/spec/integration/time_based_rotation_with_stale_write_spec.rb +64 -0
- data/spec/integration/upload_current_file_on_shutdown_spec.rb +51 -0
- data/spec/outputs/cloudwatch_spec.rb +38 -0
- data/spec/outputs/s3/file_repository_spec.rb +143 -0
- data/spec/outputs/s3/size_and_time_rotation_policy_spec.rb +77 -0
- data/spec/outputs/s3/size_rotation_policy_spec.rb +41 -0
- data/spec/outputs/s3/temporary_file_factory_spec.rb +89 -0
- data/spec/outputs/s3/temporary_file_spec.rb +47 -0
- data/spec/outputs/s3/time_rotation_policy_spec.rb +60 -0
- data/spec/outputs/s3/uploader_spec.rb +69 -0
- data/spec/outputs/s3/writable_directory_validator_spec.rb +40 -0
- data/spec/outputs/s3/write_bucket_permission_validator_spec.rb +49 -0
- data/spec/outputs/s3_spec.rb +232 -0
- data/spec/outputs/sns_spec.rb +160 -0
- data/spec/plugin_mixin/aws_config_spec.rb +217 -0
- data/spec/spec_helper.rb +8 -0
- data/spec/support/helpers.rb +121 -0
- data/spec/unit/outputs/sqs_spec.rb +247 -0
- data/vendor/jar-dependencies/org/logstash/plugins/integration/aws/logstash-integration-aws/7.1.1/logstash-integration-aws-7.1.1.jar +0 -0
- metadata +472 -0
@@ -0,0 +1,17 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require "logstash/devutils/rspec/spec_helper"
|
3
|
+
require "logstash/inputs/s3"
|
4
|
+
require "stud/temporary"
|
5
|
+
require "fileutils"
|
6
|
+
|
7
|
+
describe LogStash::Inputs::S3::SinceDB::File do
|
8
|
+
let(:file) { Stud::Temporary.file.path }
|
9
|
+
subject { LogStash::Inputs::S3::SinceDB::File.new(file) }
|
10
|
+
before do
|
11
|
+
FileUtils.touch(file)
|
12
|
+
end
|
13
|
+
|
14
|
+
it "doesnt raise an exception if the file is empty" do
|
15
|
+
expect { subject.read }.not_to raise_error
|
16
|
+
end
|
17
|
+
end
|
@@ -0,0 +1,324 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require "spec_helper"
|
3
|
+
require "logstash/devutils/rspec/shared_examples"
|
4
|
+
require "logstash/inputs/sqs"
|
5
|
+
require "logstash/errors"
|
6
|
+
require "logstash/event"
|
7
|
+
require "logstash/json"
|
8
|
+
require "aws-sdk-sqs"
|
9
|
+
require "ostruct"
|
10
|
+
|
11
|
+
describe LogStash::Inputs::SQS do
|
12
|
+
let(:queue_name) { "the-infinite-pandora-box" }
|
13
|
+
let(:queue_url) { "https://sqs.test.local/#{queue_name}" }
|
14
|
+
let(:config) do
|
15
|
+
{
|
16
|
+
"region" => "us-east-1",
|
17
|
+
"access_key_id" => "123",
|
18
|
+
"secret_access_key" => "secret",
|
19
|
+
"queue" => queue_name
|
20
|
+
}
|
21
|
+
end
|
22
|
+
|
23
|
+
let(:input) { LogStash::Inputs::SQS.new(config) }
|
24
|
+
let(:decoded_message) { { "bonjour" => "awesome" } }
|
25
|
+
let(:encoded_message) { double("sqs_message", :body => LogStash::Json::dump(decoded_message)) }
|
26
|
+
|
27
|
+
subject { input }
|
28
|
+
|
29
|
+
let(:mock_sqs) { Aws::SQS::Client.new({ :stub_responses => true }) }
|
30
|
+
|
31
|
+
|
32
|
+
context "with invalid credentials" do
|
33
|
+
before do
|
34
|
+
expect(Aws::SQS::Client).to receive(:new).and_return(mock_sqs)
|
35
|
+
expect(mock_sqs).to receive(:get_queue_url).with({ :queue_name => queue_name }) { raise Aws::SQS::Errors::ServiceError.new("bad-something", "bad token") }
|
36
|
+
end
|
37
|
+
|
38
|
+
it "raises a Configuration error if the credentials are bad" do
|
39
|
+
expect { subject.register }.to raise_error(LogStash::ConfigurationError)
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
context "valid credentials" do
|
44
|
+
let(:queue) { [] }
|
45
|
+
|
46
|
+
it "doesn't raise an error with valid credentials" do
|
47
|
+
expect(Aws::SQS::Client).to receive(:new).and_return(mock_sqs)
|
48
|
+
expect(mock_sqs).to receive(:get_queue_url).with({ :queue_name => queue_name }).and_return({:queue_url => queue_url })
|
49
|
+
expect { subject.register }.not_to raise_error
|
50
|
+
end
|
51
|
+
|
52
|
+
context "when queue_aws_account_id option is specified" do
|
53
|
+
let(:queue_account_id) { "123456789012" }
|
54
|
+
let(:config) do
|
55
|
+
{
|
56
|
+
"region" => "us-east-1",
|
57
|
+
"access_key_id" => "123",
|
58
|
+
"secret_access_key" => "secret",
|
59
|
+
"queue" => queue_name,
|
60
|
+
"queue_owner_aws_account_id" => queue_account_id
|
61
|
+
}
|
62
|
+
end
|
63
|
+
it "passes the option to sqs client" do
|
64
|
+
expect(Aws::SQS::Client).to receive(:new).and_return(mock_sqs)
|
65
|
+
expect(mock_sqs).to receive(:get_queue_url).with({ :queue_name => queue_name, :queue_owner_aws_account_id => queue_account_id }).and_return({:queue_url => queue_url })
|
66
|
+
expect { subject.register }.not_to raise_error
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
70
|
+
describe "additional_settings" do
|
71
|
+
context "supported settings" do
|
72
|
+
let(:config) {
|
73
|
+
{
|
74
|
+
"additional_settings" => { "force_path_style" => 'true', "ssl_verify_peer" => 'false', "profile" => 'logstash' },
|
75
|
+
"queue" => queue_name
|
76
|
+
}
|
77
|
+
}
|
78
|
+
|
79
|
+
it 'should instantiate Aws::SQS clients with force_path_style set' do
|
80
|
+
expect(Aws::SQS::Client).to receive(:new).and_return(mock_sqs)
|
81
|
+
# mock a remote call to retrieve the queue URL
|
82
|
+
expect(mock_sqs).to receive(:get_queue_url).with({ :queue_name => queue_name }).and_return({:queue_url => queue_url })
|
83
|
+
|
84
|
+
expect(subject.aws_options_hash).to include({:force_path_style => true, :ssl_verify_peer => false, :profile => 'logstash'})
|
85
|
+
|
86
|
+
expect { subject.register }.not_to raise_error
|
87
|
+
end
|
88
|
+
end
|
89
|
+
|
90
|
+
context "unsupported settings" do
|
91
|
+
let(:config) {
|
92
|
+
{
|
93
|
+
"additional_settings" => { "stub_responses" => 'true', "invalid_option" => "invalid" },
|
94
|
+
"queue" => queue_name
|
95
|
+
}
|
96
|
+
}
|
97
|
+
|
98
|
+
it 'must fail with ArgumentError' do
|
99
|
+
expect {subject.register}.to raise_error(ArgumentError, /invalid_option/)
|
100
|
+
end
|
101
|
+
end
|
102
|
+
|
103
|
+
end
|
104
|
+
|
105
|
+
context "when interrupting the plugin" do
|
106
|
+
before do
|
107
|
+
expect(Aws::SQS::Client).to receive(:new).and_return(mock_sqs)
|
108
|
+
expect(mock_sqs).to receive(:get_queue_url).with({ :queue_name => queue_name }).and_return({:queue_url => queue_url })
|
109
|
+
expect(subject).to receive(:poller).and_return(mock_sqs).at_least(:once)
|
110
|
+
|
111
|
+
# We have to make sure we create a bunch of events
|
112
|
+
# so we actually really try to stop the plugin.
|
113
|
+
#
|
114
|
+
# rspec's `and_yield` allow you to define a fix amount of possible
|
115
|
+
# yielded values and doesn't allow you to create infinite loop.
|
116
|
+
# And since we are actually creating thread we need to make sure
|
117
|
+
# we have enough work to keep the thread working until we kill it..
|
118
|
+
#
|
119
|
+
# I haven't found a way to make it rspec friendly
|
120
|
+
mock_sqs.instance_eval do
|
121
|
+
def poll(polling_options = {})
|
122
|
+
loop do
|
123
|
+
yield [OpenStruct.new(:body => LogStash::Json::dump({ "message" => "hello world"}))], OpenStruct.new
|
124
|
+
end
|
125
|
+
end
|
126
|
+
end
|
127
|
+
end
|
128
|
+
|
129
|
+
it_behaves_like "an interruptible input plugin"
|
130
|
+
end
|
131
|
+
|
132
|
+
context "enrich event" do
|
133
|
+
let(:event) { LogStash::Event.new }
|
134
|
+
|
135
|
+
let(:message_id) { "123" }
|
136
|
+
let(:md5_of_body) { "dr strange" }
|
137
|
+
let(:sent_timestamp) { LogStash::Timestamp.new }
|
138
|
+
let(:epoch_timestamp) { (sent_timestamp.utc.to_f * 1000).to_i }
|
139
|
+
|
140
|
+
let(:id_field) { "my_id_field" }
|
141
|
+
let(:md5_field) { "my_md5_field" }
|
142
|
+
let(:sent_timestamp_field) { "my_sent_timestamp_field" }
|
143
|
+
|
144
|
+
let(:message) do
|
145
|
+
double("message", :message_id => message_id, :md5_of_body => md5_of_body, :attributes => { LogStash::Inputs::SQS::SENT_TIMESTAMP => epoch_timestamp } )
|
146
|
+
end
|
147
|
+
|
148
|
+
subject { input.add_sqs_data(event, message) }
|
149
|
+
|
150
|
+
context "when the option is specified" do
|
151
|
+
let(:config) do
|
152
|
+
{
|
153
|
+
"region" => "us-east-1",
|
154
|
+
"access_key_id" => "123",
|
155
|
+
"secret_access_key" => "secret",
|
156
|
+
"queue" => queue_name,
|
157
|
+
"id_field" => id_field,
|
158
|
+
"md5_field" => md5_field,
|
159
|
+
"sent_timestamp_field" => sent_timestamp_field
|
160
|
+
}
|
161
|
+
end
|
162
|
+
|
163
|
+
it "add the `message_id`" do
|
164
|
+
expect(subject.get(id_field)).to eq(message_id)
|
165
|
+
end
|
166
|
+
|
167
|
+
it "add the `md5_of_body`" do
|
168
|
+
expect(subject.get(md5_field)).to eq(md5_of_body)
|
169
|
+
end
|
170
|
+
|
171
|
+
it "add the `sent_timestamp`" do
|
172
|
+
expect(subject.get(sent_timestamp_field).to_i).to eq(sent_timestamp.to_i)
|
173
|
+
end
|
174
|
+
end
|
175
|
+
|
176
|
+
context "when the option isn't specified" do
|
177
|
+
it "doesnt add the `message_id`" do
|
178
|
+
expect(subject).not_to include(id_field)
|
179
|
+
end
|
180
|
+
|
181
|
+
it "doesnt add the `md5_of_body`" do
|
182
|
+
expect(subject).not_to include(md5_field)
|
183
|
+
end
|
184
|
+
|
185
|
+
it "doesnt add the `sent_timestamp`" do
|
186
|
+
expect(subject).not_to include(sent_timestamp_field)
|
187
|
+
end
|
188
|
+
end
|
189
|
+
end
|
190
|
+
|
191
|
+
context "when decoding body" do
|
192
|
+
subject { LogStash::Inputs::SQS::new(config.merge({ "codec" => "json" })) }
|
193
|
+
|
194
|
+
it "uses the specified codec" do
|
195
|
+
subject.handle_message(encoded_message, queue)
|
196
|
+
expect(queue.pop.get("bonjour")).to eq(decoded_message["bonjour"])
|
197
|
+
end
|
198
|
+
end
|
199
|
+
|
200
|
+
context "receiving messages" do
|
201
|
+
before do
|
202
|
+
expect(subject).to receive(:poller).and_return(mock_sqs).at_least(:once)
|
203
|
+
end
|
204
|
+
|
205
|
+
it "creates logstash event" do
|
206
|
+
expect(mock_sqs).to receive(:poll).with(anything()).and_yield([encoded_message], double("stats"))
|
207
|
+
subject.run(queue)
|
208
|
+
expect(queue.pop.get("bonjour")).to eq(decoded_message["bonjour"])
|
209
|
+
end
|
210
|
+
|
211
|
+
context 'can create multiple events' do
|
212
|
+
require "logstash/codecs/json_lines"
|
213
|
+
let(:config) { super().merge({ "codec" => "json_lines" }) }
|
214
|
+
let(:first_message) { { "sequence" => "first" } }
|
215
|
+
let(:second_message) { { "sequence" => "second" } }
|
216
|
+
let(:encoded_message) { double("sqs_message", :body => "#{LogStash::Json::dump(first_message)}\n#{LogStash::Json::dump(second_message)}\n") }
|
217
|
+
|
218
|
+
it 'creates multiple events' do
|
219
|
+
expect(mock_sqs).to receive(:poll).with(anything()).and_yield([encoded_message], double("stats"))
|
220
|
+
subject.run(queue)
|
221
|
+
events = queue.map{ |e|e.get('sequence')}
|
222
|
+
expect(events).to match_array([first_message['sequence'], second_message['sequence']])
|
223
|
+
end
|
224
|
+
end
|
225
|
+
end
|
226
|
+
|
227
|
+
context "on errors" do
|
228
|
+
let(:payload) { "Hello world" }
|
229
|
+
|
230
|
+
before do
|
231
|
+
expect(subject).to receive(:poller).and_return(mock_sqs).at_least(:once)
|
232
|
+
end
|
233
|
+
|
234
|
+
context "SQS error" do
|
235
|
+
it "retry to fetch messages" do
|
236
|
+
# change the poller implementation to raise SQS errors.
|
237
|
+
had_error = false
|
238
|
+
|
239
|
+
# actually using the child of `Object` to do an expectation of `#sleep`
|
240
|
+
expect(subject).to receive(:sleep).with(LogStash::Inputs::SQS::BACKOFF_SLEEP_TIME)
|
241
|
+
expect(mock_sqs).to receive(:poll).with(anything()).at_most(2) do
|
242
|
+
unless had_error
|
243
|
+
had_error = true
|
244
|
+
raise Aws::SQS::Errors::ServiceError.new("testing", "testing exception")
|
245
|
+
end
|
246
|
+
|
247
|
+
queue << payload
|
248
|
+
end
|
249
|
+
|
250
|
+
subject.run(queue)
|
251
|
+
|
252
|
+
expect(queue.size).to eq(1)
|
253
|
+
expect(queue.pop).to eq(payload)
|
254
|
+
end
|
255
|
+
end
|
256
|
+
|
257
|
+
context "SQS error (retries)" do
|
258
|
+
|
259
|
+
it "retry to fetch messages" do
|
260
|
+
sleep_time = LogStash::Inputs::SQS::BACKOFF_SLEEP_TIME
|
261
|
+
expect(subject).to receive(:sleep).with(sleep_time)
|
262
|
+
expect(subject).to receive(:sleep).with(sleep_time * 2)
|
263
|
+
expect(subject).to receive(:sleep).with(sleep_time * 4)
|
264
|
+
|
265
|
+
error_count = 0
|
266
|
+
expect(mock_sqs).to receive(:poll).with(anything()).at_most(4) do
|
267
|
+
error_count += 1
|
268
|
+
if error_count <= 3
|
269
|
+
raise Aws::SQS::Errors::QueueDoesNotExist.new("testing", "testing exception (#{error_count})")
|
270
|
+
end
|
271
|
+
|
272
|
+
queue << payload
|
273
|
+
end
|
274
|
+
|
275
|
+
subject.run(queue)
|
276
|
+
|
277
|
+
expect(queue.size).to eq(1)
|
278
|
+
expect(queue.pop).to eq(payload)
|
279
|
+
end
|
280
|
+
|
281
|
+
end
|
282
|
+
|
283
|
+
context "networking error" do
|
284
|
+
|
285
|
+
before(:all) { require 'seahorse/client/networking_error' }
|
286
|
+
|
287
|
+
it "retry to fetch messages" do
|
288
|
+
sleep_time = LogStash::Inputs::SQS::BACKOFF_SLEEP_TIME
|
289
|
+
expect(subject).to receive(:sleep).with(sleep_time).twice
|
290
|
+
|
291
|
+
error_count = 0
|
292
|
+
expect(mock_sqs).to receive(:poll).with(anything()).at_most(5) do
|
293
|
+
error_count += 1
|
294
|
+
if error_count == 1
|
295
|
+
raise Seahorse::Client::NetworkingError.new(Net::OpenTimeout.new, 'timeout')
|
296
|
+
end
|
297
|
+
if error_count == 3
|
298
|
+
raise Seahorse::Client::NetworkingError.new(SocketError.new('spec-error'))
|
299
|
+
end
|
300
|
+
|
301
|
+
queue << payload
|
302
|
+
end
|
303
|
+
|
304
|
+
subject.run(queue)
|
305
|
+
subject.run(queue)
|
306
|
+
|
307
|
+
expect(queue.size).to eq(2)
|
308
|
+
expect(queue.pop).to eq(payload)
|
309
|
+
end
|
310
|
+
|
311
|
+
end
|
312
|
+
|
313
|
+
context "other error" do
|
314
|
+
it "stops executing the code and raise the exception" do
|
315
|
+
expect(mock_sqs).to receive(:poll).with(anything()).at_most(2) do
|
316
|
+
raise RuntimeError
|
317
|
+
end
|
318
|
+
|
319
|
+
expect { subject.run(queue) }.to raise_error(RuntimeError)
|
320
|
+
end
|
321
|
+
end
|
322
|
+
end
|
323
|
+
end
|
324
|
+
end
|
@@ -0,0 +1,25 @@
|
|
1
|
+
require "logstash/devutils/rspec/spec_helper"
|
2
|
+
require "logstash/inputs/cloudwatch"
|
3
|
+
|
4
|
+
describe LogStash::Inputs::CloudWatch, :integration => true do
|
5
|
+
|
6
|
+
let(:settings) { { "access_key_id" => ENV['AWS_ACCESS_KEY_ID'],
|
7
|
+
"secret_access_key" => LogStash::Util::Password.new(ENV['AWS_SECRET_ACCESS_KEY']),
|
8
|
+
"region" => ENV["AWS_REGION"] || "us-east-1",
|
9
|
+
"namespace" => "AWS/S3",
|
10
|
+
'filters' => { "BucketName" => "*"},
|
11
|
+
'metrics' => ["BucketSizeBytes","NumberOfObjects"]
|
12
|
+
|
13
|
+
}}
|
14
|
+
|
15
|
+
def metrics_for(settings)
|
16
|
+
cw = LogStash::Inputs::CloudWatch.new(settings)
|
17
|
+
cw.register
|
18
|
+
cw.send('metrics_for', settings['namespace'])
|
19
|
+
end
|
20
|
+
|
21
|
+
#
|
22
|
+
it "should not raise a type error when using a password" do
|
23
|
+
expect{metrics_for(settings)}.not_to raise_error
|
24
|
+
end
|
25
|
+
end
|
@@ -0,0 +1,92 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require_relative "../spec_helper"
|
3
|
+
require "logstash/outputs/s3"
|
4
|
+
require "logstash/codecs/line"
|
5
|
+
require "stud/temporary"
|
6
|
+
|
7
|
+
describe "Dynamic Prefix", :integration => true do
|
8
|
+
include_context "setup plugin"
|
9
|
+
|
10
|
+
let(:options) { main_options.merge({ "rotation_strategy" => "size" }) }
|
11
|
+
let(:sandbox) { "test" }
|
12
|
+
|
13
|
+
before do
|
14
|
+
clean_remote_files(sandbox)
|
15
|
+
subject.register
|
16
|
+
subject.multi_receive_encoded(batch)
|
17
|
+
subject.close
|
18
|
+
end
|
19
|
+
|
20
|
+
context "With field string" do
|
21
|
+
let(:prefix) { "/#{sandbox}/%{server}/%{language}" }
|
22
|
+
let(:batch) do
|
23
|
+
b = {}
|
24
|
+
e1 = LogStash::Event.new({ "server" => "es1", "language" => "ruby"})
|
25
|
+
b[e1] = "es1-ruby"
|
26
|
+
e2 = LogStash::Event.new({ "server" => "es2", "language" => "java"})
|
27
|
+
b[e2] = "es2-ruby"
|
28
|
+
b
|
29
|
+
end
|
30
|
+
|
31
|
+
it "creates a specific quantity of files" do
|
32
|
+
expect(bucket_resource.objects(:prefix => sandbox).count).to eq(batch.size)
|
33
|
+
end
|
34
|
+
|
35
|
+
it "creates specific keys" do
|
36
|
+
re = Regexp.union(/^es1\/ruby\/ls.s3.sashimi/, /^es2\/java\/ls.s3.sashimi/)
|
37
|
+
|
38
|
+
bucket_resource.objects(:prefix => sandbox) do |obj|
|
39
|
+
expect(obj.key).to match(re)
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
it "Persists all events" do
|
44
|
+
download_directory = Stud::Temporary.pathname
|
45
|
+
|
46
|
+
FileUtils.rm_rf(download_directory)
|
47
|
+
FileUtils.mkdir_p(download_directory)
|
48
|
+
|
49
|
+
counter = 0
|
50
|
+
bucket_resource.objects(:prefix => sandbox).each do |object|
|
51
|
+
target = File.join(download_directory, "#{counter}.txt")
|
52
|
+
object.get(:response_target => target)
|
53
|
+
counter += 1
|
54
|
+
end
|
55
|
+
expect(Dir.glob(File.join(download_directory, "**", "*.txt")).inject(0) { |sum, f| sum + IO.readlines(f).size }).to eq(batch.size)
|
56
|
+
end
|
57
|
+
end
|
58
|
+
|
59
|
+
context "with unsupported char" do
|
60
|
+
let(:prefix) { "/#{sandbox}/%{server}/%{language}" }
|
61
|
+
let(:batch) do
|
62
|
+
b = {}
|
63
|
+
e1 = LogStash::Event.new({ "server" => "e>s1", "language" => "ruby"})
|
64
|
+
b[e1] = "es2-ruby"
|
65
|
+
b
|
66
|
+
end
|
67
|
+
|
68
|
+
it "convert them to underscore" do
|
69
|
+
re = Regexp.union(/^e_s1\/ruby\/ls.s3.sashimi/)
|
70
|
+
|
71
|
+
bucket_resource.objects(:prefix => sandbox) do |obj|
|
72
|
+
expect(obj.key).to match(re)
|
73
|
+
end
|
74
|
+
end
|
75
|
+
end
|
76
|
+
|
77
|
+
context "with dates" do
|
78
|
+
let(:prefix) { "/#{sandbox}/%{+YYYY-MM-d}" }
|
79
|
+
|
80
|
+
let(:batch) do
|
81
|
+
b = {}
|
82
|
+
e1 = LogStash::Event.new({ "server" => "e>s1", "language" => "ruby"})
|
83
|
+
b[e1] = "es2-ruby"
|
84
|
+
b
|
85
|
+
end
|
86
|
+
|
87
|
+
it "creates dated path" do
|
88
|
+
re = /^#{sandbox}\/\d{4}-\d{2}-\d{1,2}\/ls\.s3\./
|
89
|
+
expect(bucket_resource.objects(:prefix => sandbox).first.key).to match(re)
|
90
|
+
end
|
91
|
+
end
|
92
|
+
end
|
@@ -0,0 +1,62 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require_relative "../spec_helper"
|
3
|
+
require "logstash/outputs/s3"
|
4
|
+
require "logstash/codecs/line"
|
5
|
+
require "stud/temporary"
|
6
|
+
|
7
|
+
describe "Gzip File Time rotation with constant write", :integration => true do
|
8
|
+
include_context "setup plugin"
|
9
|
+
|
10
|
+
let(:time_file) { 0.004 }
|
11
|
+
let(:options) { main_options.merge({ "encoding" => "gzip",
|
12
|
+
"rotation_strategy" => "time" }) }
|
13
|
+
let(:number_of_events) { 5000 }
|
14
|
+
let(:batch_size) { 125 }
|
15
|
+
let(:event_encoded) { "Hello world" }
|
16
|
+
let(:batch) do
|
17
|
+
b = {}
|
18
|
+
number_of_events.times do
|
19
|
+
event = LogStash::Event.new({ "message" => event_encoded })
|
20
|
+
b[event] = "#{event_encoded}\n"
|
21
|
+
end
|
22
|
+
b
|
23
|
+
end
|
24
|
+
let(:minimum_number_of_time_rotation) { 3 }
|
25
|
+
let(:batch_step) { (number_of_events / minimum_number_of_time_rotation).ceil }
|
26
|
+
|
27
|
+
before do
|
28
|
+
clean_remote_files(prefix)
|
29
|
+
subject.register
|
30
|
+
|
31
|
+
# simulate batch read/write
|
32
|
+
batch.each_slice(batch_step) do |batch_time|
|
33
|
+
batch_time.each_slice(batch_size) do |smaller_batch|
|
34
|
+
subject.multi_receive_encoded(smaller_batch)
|
35
|
+
end
|
36
|
+
sleep(1)
|
37
|
+
end
|
38
|
+
|
39
|
+
subject.close
|
40
|
+
end
|
41
|
+
|
42
|
+
it "creates multiples files" do
|
43
|
+
# using close will upload the current file
|
44
|
+
expect(bucket_resource.objects(:prefix => prefix).count).to be_between(minimum_number_of_time_rotation, minimum_number_of_time_rotation + 1).inclusive
|
45
|
+
end
|
46
|
+
|
47
|
+
it "Persists all events" do
|
48
|
+
download_directory = Stud::Temporary.pathname
|
49
|
+
|
50
|
+
FileUtils.rm_rf(download_directory)
|
51
|
+
FileUtils.mkdir_p(download_directory)
|
52
|
+
|
53
|
+
counter = 0
|
54
|
+
bucket_resource.objects(:prefix => prefix).each do |object|
|
55
|
+
target = File.join(download_directory, "#{counter}.gz")
|
56
|
+
object.get(:response_target => target)
|
57
|
+
counter += 1
|
58
|
+
end
|
59
|
+
|
60
|
+
expect(Dir.glob(File.join(download_directory, "**", "*.gz")).inject(0) { |sum, f| sum + Zlib::GzipReader.new(File.open(f)).readlines.size }).to eq(number_of_events)
|
61
|
+
end
|
62
|
+
end
|
@@ -0,0 +1,63 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require_relative "../spec_helper"
|
3
|
+
require "logstash/outputs/s3"
|
4
|
+
require "logstash/codecs/line"
|
5
|
+
require "stud/temporary"
|
6
|
+
|
7
|
+
describe "Gzip Size rotation", :integration => true do
|
8
|
+
include_context "setup plugin"
|
9
|
+
|
10
|
+
let(:document_size) { 20 * 1024 } # in bytes
|
11
|
+
|
12
|
+
let(:options) do
|
13
|
+
main_options.merge({
|
14
|
+
"encoding" => "gzip",
|
15
|
+
"size_file" => document_size,
|
16
|
+
"rotation_strategy" => "size" })
|
17
|
+
end
|
18
|
+
|
19
|
+
let(:number_of_events) { 1_000_000 }
|
20
|
+
let(:batch_size) { 125 }
|
21
|
+
let(:event_encoded) { "Hello world" * 20 }
|
22
|
+
let(:batch) do
|
23
|
+
b = {}
|
24
|
+
batch_size.times do
|
25
|
+
event = LogStash::Event.new({ "message" => event_encoded })
|
26
|
+
b[event] = "#{event_encoded}\n"
|
27
|
+
end
|
28
|
+
b
|
29
|
+
end
|
30
|
+
let(:number_of_files) { number_of_events / 50000 }
|
31
|
+
|
32
|
+
before do
|
33
|
+
clean_remote_files(prefix)
|
34
|
+
subject.register
|
35
|
+
(number_of_events/batch_size).times do
|
36
|
+
subject.multi_receive_encoded(batch)
|
37
|
+
end
|
38
|
+
subject.close
|
39
|
+
end
|
40
|
+
|
41
|
+
it "Rotates the files based on size" do
|
42
|
+
f = bucket_resource.objects(:prefix => prefix).first
|
43
|
+
expect(f.size).to be_between(document_size, document_size * 2).inclusive
|
44
|
+
end
|
45
|
+
|
46
|
+
it "Persists all events" do
|
47
|
+
download_directory = Stud::Temporary.pathname
|
48
|
+
|
49
|
+
FileUtils.rm_rf(download_directory)
|
50
|
+
FileUtils.mkdir_p(download_directory)
|
51
|
+
|
52
|
+
counter = 0
|
53
|
+
bucket_resource.objects(:prefix => prefix).each do |object|
|
54
|
+
target = File.join(download_directory, "#{counter}.txt.gz")
|
55
|
+
object.get(:response_target => target)
|
56
|
+
counter += 1
|
57
|
+
end
|
58
|
+
|
59
|
+
expect(Dir.glob(File.join(download_directory, "**", "*.gz")).inject(0) do |sum, f|
|
60
|
+
sum + Zlib::GzipReader.new(File.open(f)).readlines.size
|
61
|
+
end).to eq(number_of_events)
|
62
|
+
end
|
63
|
+
end
|
@@ -0,0 +1,98 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
|
3
|
+
require_relative '../../spec_helper'
|
4
|
+
require 'logstash/event'
|
5
|
+
require 'logstash/json'
|
6
|
+
require 'securerandom'
|
7
|
+
|
8
|
+
describe LogStash::Outputs::SQS, :integration => true do
|
9
|
+
let(:config) do
|
10
|
+
{
|
11
|
+
'queue' => @queue_name,
|
12
|
+
}
|
13
|
+
end
|
14
|
+
subject { described_class.new(config) }
|
15
|
+
|
16
|
+
# Create an SQS queue with a random name.
|
17
|
+
before(:all) do
|
18
|
+
@sqs = Aws::SQS::Client.new
|
19
|
+
@queue_name = "logstash-output-sqs-#{SecureRandom.hex}"
|
20
|
+
@queue_url = @sqs.create_queue(:queue_name => @queue_name)[:queue_url]
|
21
|
+
end
|
22
|
+
|
23
|
+
# Destroy the SQS queue which was created in `before(:all)`.
|
24
|
+
after(:all) do
|
25
|
+
@sqs.delete_queue(:queue_url => @queue_url)
|
26
|
+
end
|
27
|
+
|
28
|
+
describe '#register' do
|
29
|
+
context 'with invalid credentials' do
|
30
|
+
let(:config) do
|
31
|
+
super().merge({
|
32
|
+
'access_key_id' => 'bad_access',
|
33
|
+
'secret_access_key' => 'bad_secret_key',
|
34
|
+
})
|
35
|
+
end
|
36
|
+
|
37
|
+
it 'raises a configuration error' do
|
38
|
+
expect { subject.register }.to raise_error(LogStash::ConfigurationError)
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
context 'with a nonexistent queue' do
|
43
|
+
let(:config) { super().merge('queue' => 'nonexistent-queue') }
|
44
|
+
|
45
|
+
it 'raises a configuration error' do
|
46
|
+
expect { subject.register }.to raise_error(LogStash::ConfigurationError)
|
47
|
+
end
|
48
|
+
end
|
49
|
+
|
50
|
+
context 'with a nonpermissive account id' do
|
51
|
+
let(:config) { super().merge('queue_owner_aws_account_id' => '123456789012')}
|
52
|
+
|
53
|
+
it 'raises a configuration error' do
|
54
|
+
expect { subject.register }.to raise_error(LogStash::ConfigurationError)
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
context 'with valid configuration' do
|
59
|
+
it 'does not raise an error' do
|
60
|
+
expect { subject.register }.not_to raise_error
|
61
|
+
end
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
describe '#multi_receive_encoded' do
|
66
|
+
let(:sample_count) { 10 }
|
67
|
+
let(:sample_event) { LogStash::Event.new('message' => 'This is a message') }
|
68
|
+
let(:sample_events) do
|
69
|
+
sample_count.times.map do
|
70
|
+
[sample_event, LogStash::Json.dump(sample_event)]
|
71
|
+
end
|
72
|
+
end
|
73
|
+
|
74
|
+
before do
|
75
|
+
subject.register
|
76
|
+
end
|
77
|
+
|
78
|
+
after do
|
79
|
+
subject.close
|
80
|
+
end
|
81
|
+
|
82
|
+
context 'with batching disabled' do
|
83
|
+
let(:config) { super().merge('batch_events' => 1) }
|
84
|
+
|
85
|
+
it 'publishes to SQS' do
|
86
|
+
subject.multi_receive_encoded(sample_events)
|
87
|
+
expect(receive_all_messages(@queue_url).count).to eq(sample_events.count)
|
88
|
+
end
|
89
|
+
end
|
90
|
+
|
91
|
+
context 'with batching enabled (default)' do
|
92
|
+
it 'publishes to SQS' do
|
93
|
+
subject.multi_receive_encoded(sample_events)
|
94
|
+
expect(receive_all_messages(@queue_url).count).to eq(sample_events.count)
|
95
|
+
end
|
96
|
+
end
|
97
|
+
end
|
98
|
+
end
|