logstash-output-s3 4.2.0 → 4.3.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 890ff03d14bda56d56fe801c3432bd7a4da11643fd160c52a2b7caf238a4fb8d
4
- data.tar.gz: ea0fb6181b73b91d8d1c18a89ed92329abae4c23d5f55e85d06a8789f2d05667
3
+ metadata.gz: 1d9a7a272abc73ba56936d68d273ec4042421d0082fc3e92e5e27dda3e93af21
4
+ data.tar.gz: 2a583ab3d5d69aef1ce03c98ec24e5f621d64e56517ed552fdc0495adecee24b
5
5
  SHA512:
6
- metadata.gz: 1134e3aa8531cbb29716f4db08513ed15fa6a0013efda15cda54e8dd3b2d5fa885148988662494119d680fd1ff597c232f48f67816bde953e35b3e93e5b0ee4e
7
- data.tar.gz: 21f03d765f73b4bbb52779a00a6879dd3bb99714b67dfefc042e3d683692ddefeb299a7c182d297a218a9dbe7c93813728205677a3006aa87c9f12b55644800b
6
+ metadata.gz: dcae487d4c4a129aa7d7c53883f680807b2bbcf472566099fbca39316a685e9ae965aecb1f880f60227815097b13f1a773f0a1bd6d1f89bd236c809502141a6f
7
+ data.tar.gz: 9bac6b7e4bad12b8d3766dbf82ab6f8f1cbd3a366dee87b93ab12df21f380b458d233f4dbc87dae3cd787f5cb968468eea004389eee36ffb0d8806ab70fe36c5
@@ -1,3 +1,6 @@
1
+ ## 4.3.0
2
+ - Feat: Added retry_count and retry_delay config [#218](https://github.com/logstash-plugins/logstash-output-s3/pull/218)
3
+
1
4
  ## 4.2.0
2
5
  - Added ability to specify [ONEZONE_IA](https://aws.amazon.com/s3/storage-classes/#__) as storage_class
3
6
 
@@ -90,6 +90,8 @@ This plugin supports the following configuration options plus the <<plugins-{typ
90
90
  | <<plugins-{type}s-{plugin}-proxy_uri>> |<<string,string>>|No
91
91
  | <<plugins-{type}s-{plugin}-region>> |<<string,string>>|No
92
92
  | <<plugins-{type}s-{plugin}-restore>> |<<boolean,boolean>>|No
93
+ | <<plugins-{type}s-{plugin}-retry_count>> |<<number,number>>|No
94
+ | <<plugins-{type}s-{plugin}-retry_delay>> |<<number,number>>|No
93
95
  | <<plugins-{type}s-{plugin}-role_arn>> |<<string,string>>|No
94
96
  | <<plugins-{type}s-{plugin}-role_session_name>> |<<string,string>>|No
95
97
  | <<plugins-{type}s-{plugin}-rotation_strategy>> |<<string,string>>, one of `["size_and_time", "size", "time"]`|No
@@ -241,6 +243,22 @@ The AWS Region
241
243
  Used to enable recovery after crash/abnormal termination.
242
244
  Temporary log files will be recovered and uploaded.
243
245
 
246
+ [id="plugins-{type}s-{plugin}-retry_count"]
247
+ ===== `retry_count`
248
+
249
+ * Value type is <<number,number>>
250
+ * Default value is `Infinity`
251
+
252
+ Allows to limit number of retries when S3 uploading fails.
253
+
254
+ [id="plugins-{type}s-{plugin}-retry_delay"]
255
+ ===== `retry_delay`
256
+
257
+ * Value type is <<number,number>>
258
+ * Default value is `1`
259
+
260
+ Delay (in seconds) to wait between consecutive retries on upload failures.
261
+
244
262
  [id="plugins-{type}s-{plugin}-role_arn"]
245
263
  ===== `role_arn`
246
264
 
@@ -315,7 +333,7 @@ specified here
315
333
  * Value type is <<number,number>>
316
334
  * Default value is `5242880`
317
335
 
318
- Set the size of file in bytes, this means that files on bucket when have dimension > file_size, they are stored in two or more file.
336
+ Set the size of file in bytes, this means that files on bucket when dimension > file_size, are stored in two or more files.
319
337
  If you have tags then it will generate a specific size file for every tags
320
338
 
321
339
  [id="plugins-{type}s-{plugin}-ssekms_key_id"]
@@ -390,7 +408,7 @@ Specify how many workers to use to upload the files to S3
390
408
  * Default value is `true`
391
409
 
392
410
  The common use case is to define permission on the root bucket and give Logstash full access to write its logs.
393
- In some circumstances you need finer grained permission on subfolder, this allow you to disable the check at startup.
411
+ In some circumstances you need finer grained permission on subfolder, this allows you to disable the check at startup.
394
412
 
395
413
  [id="plugins-{type}s-{plugin}-common-options"]
396
414
  include::{include_path}/{type}.asciidoc[]
@@ -188,6 +188,12 @@ class LogStash::Outputs::S3 < LogStash::Outputs::Base
188
188
  # In some circonstances you need finer grained permission on subfolder, this allow you to disable the check at startup.
189
189
  config :validate_credentials_on_root_bucket, :validate => :boolean, :default => true
190
190
 
191
+ # The number of times to retry a failed S3 upload.
192
+ config :retry_count, :validate => :number, :default => Float::INFINITY
193
+
194
+ # The amount of time to wait in seconds before attempting to retry a failed upload.
195
+ config :retry_delay, :validate => :number, :default => 1
196
+
191
197
  def register
192
198
  # I've move the validation of the items into custom classes
193
199
  # to prepare for the new config validation that will be part of the core so the core can
@@ -219,7 +225,7 @@ class LogStash::Outputs::S3 < LogStash::Outputs::Base
219
225
  :max_queue => @upload_queue_size,
220
226
  :fallback_policy => :caller_runs })
221
227
 
222
- @uploader = Uploader.new(bucket_resource, @logger, executor)
228
+ @uploader = Uploader.new(bucket_resource, @logger, executor, retry_count: @retry_count, retry_delay: @retry_delay)
223
229
 
224
230
  # Restoring from crash will use a new threadpool to slowly recover
225
231
  # New events should have more priority.
@@ -326,10 +332,6 @@ class LogStash::Outputs::S3 < LogStash::Outputs::Base
326
332
  Aws::S3::Bucket.new(@bucket, full_options)
327
333
  end
328
334
 
329
- def aws_service_endpoint(region)
330
- { :s3_endpoint => region == 'us-east-1' ? 's3.amazonaws.com' : "s3-#{region}.amazonaws.com"}
331
- end
332
-
333
335
  def rotate_if_needed(prefixes)
334
336
  prefixes.each do |prefix|
335
337
  # Each file access is thread safe,
@@ -6,7 +6,7 @@ module LogStash
6
6
  module Outputs
7
7
  class S3
8
8
  class Uploader
9
- TIME_BEFORE_RETRYING_SECONDS = 1
9
+
10
10
  DEFAULT_THREADPOOL = Concurrent::ThreadPoolExecutor.new({
11
11
  :min_threads => 1,
12
12
  :max_threads => 8,
@@ -14,13 +14,14 @@ module LogStash
14
14
  :fallback_policy => :caller_runs
15
15
  })
16
16
 
17
-
18
17
  attr_reader :bucket, :upload_options, :logger
19
18
 
20
- def initialize(bucket, logger, threadpool = DEFAULT_THREADPOOL)
19
+ def initialize(bucket, logger, threadpool = DEFAULT_THREADPOOL, retry_count: Float::INFINITY, retry_delay: 1)
21
20
  @bucket = bucket
22
21
  @workers_pool = threadpool
23
22
  @logger = logger
23
+ @retry_count = retry_count
24
+ @retry_delay = retry_delay
24
25
  end
25
26
 
26
27
  def upload_async(file, options = {})
@@ -33,6 +34,7 @@ module LogStash
33
34
  def upload(file, options = {})
34
35
  upload_options = options.fetch(:upload_options, {})
35
36
 
37
+ tries = 0
36
38
  begin
37
39
  obj = bucket.object(file.key)
38
40
  obj.upload_file(file.path, upload_options)
@@ -44,15 +46,22 @@ module LogStash
44
46
  #
45
47
  # Thread might be stuck here, but I think its better than losing anything
46
48
  # its either a transient errors or something bad really happened.
47
- logger.error("Uploading failed, retrying.", :exception => e.class, :message => e.message, :path => file.path, :backtrace => e.backtrace)
48
- sleep TIME_BEFORE_RETRYING_SECONDS
49
- retry
49
+ if tries < @retry_count
50
+ tries += 1
51
+ logger.warn("Uploading failed, retrying (##{tries} of #{@retry_count})", :exception => e.class, :message => e.message, :path => file.path, :backtrace => e.backtrace)
52
+ sleep @retry_delay
53
+ retry
54
+ else
55
+ logger.error("Failed to upload file (retried #{@retry_count} times).", :exception => e.class, :message => e.message, :path => file.path, :backtrace => e.backtrace)
56
+ end
50
57
  end
51
58
 
52
- options[:on_complete].call(file) unless options[:on_complete].nil?
53
- rescue => e
54
- logger.error("An error occured in the `on_complete` uploader", :exception => e.class, :message => e.message, :path => file.path, :backtrace => e.backtrace)
55
- raise e # reraise it since we don't deal with it now
59
+ begin
60
+ options[:on_complete].call(file) unless options[:on_complete].nil?
61
+ rescue => e
62
+ logger.error("An error occurred in the `on_complete` uploader", :exception => e.class, :message => e.message, :path => file.path, :backtrace => e.backtrace)
63
+ raise e # reraise it since we don't deal with it now
64
+ end
56
65
  end
57
66
 
58
67
  def stop
@@ -1,6 +1,6 @@
1
1
  Gem::Specification.new do |s|
2
2
  s.name = 'logstash-output-s3'
3
- s.version = '4.2.0'
3
+ s.version = '4.3.0'
4
4
  s.licenses = ['Apache-2.0']
5
5
  s.summary = "Sends Logstash events to the Amazon Simple Storage Service"
6
6
  s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program"
@@ -6,7 +6,7 @@ require "aws-sdk"
6
6
  require "stud/temporary"
7
7
 
8
8
  describe LogStash::Outputs::S3::Uploader do
9
- let(:logger) { spy(:logger ) }
9
+ let(:logger) { spy(:logger) }
10
10
  let(:max_upload_workers) { 1 }
11
11
  let(:bucket_name) { "foobar-bucket" }
12
12
  let(:client) { Aws::S3::Client.new(stub_responses: true) }
@@ -14,7 +14,6 @@ describe LogStash::Outputs::S3::Uploader do
14
14
  let(:temporary_directory) { Stud::Temporary.pathname }
15
15
  let(:temporary_file) { Stud::Temporary.file }
16
16
  let(:key) { "foobar" }
17
- let(:upload_options) { {} }
18
17
  let(:threadpool) do
19
18
  Concurrent::ThreadPoolExecutor.new({
20
19
  :min_threads => 1,
@@ -31,27 +30,41 @@ describe LogStash::Outputs::S3::Uploader do
31
30
  f
32
31
  end
33
32
 
34
- subject { described_class.new(bucket, logger, threadpool) }
33
+ subject { described_class.new(bucket, logger, threadpool, retry_delay: 0.01) }
35
34
 
36
35
  it "upload file to the s3 bucket" do
37
36
  expect { subject.upload(file) }.not_to raise_error
38
37
  end
39
38
 
40
39
  it "execute a callback when the upload is complete" do
41
- callback = proc { |f| }
40
+ callback = proc { |_| }
42
41
 
43
42
  expect(callback).to receive(:call).with(file)
44
- subject.upload(file, { :on_complete => callback })
43
+ subject.upload(file, :on_complete => callback)
45
44
  end
46
45
 
47
- it "retries errors indefinitively" do
46
+ it "retries errors indefinitely" do
48
47
  s3 = double("s3").as_null_object
49
48
 
50
- expect(logger).to receive(:error).with(any_args).once
51
- expect(bucket).to receive(:object).with(file.key).and_return(s3).twice
52
- expect(s3).to receive(:upload_file).with(any_args).and_raise(StandardError)
49
+ allow(bucket).to receive(:object).with(file.key).and_return(s3)
50
+
51
+ expect(logger).to receive(:warn).with(any_args)
52
+ expect(s3).to receive(:upload_file).with(any_args).and_raise(RuntimeError.new('UPLOAD FAILED')).exactly(5).times
53
53
  expect(s3).to receive(:upload_file).with(any_args).and_return(true)
54
54
 
55
55
  subject.upload(file)
56
56
  end
57
+
58
+ it "retries errors specified times" do
59
+ subject = described_class.new(bucket, logger, threadpool, retry_count: 3, retry_delay: 0.01)
60
+ s3 = double("s3").as_null_object
61
+
62
+ allow(bucket).to receive(:object).with(file.key).and_return(s3)
63
+
64
+ expect(logger).to receive(:warn).with(any_args).exactly(3).times
65
+ expect(logger).to receive(:error).with(any_args).once
66
+ expect(s3).to receive(:upload_file).with(file.path, {}).and_raise(RuntimeError).at_least(1).times
67
+
68
+ subject.upload(file)
69
+ end
57
70
  end
@@ -204,4 +204,27 @@ describe LogStash::Outputs::S3 do
204
204
  subject.multi_receive_encoded(events_and_encoded)
205
205
  end
206
206
  end
207
+
208
+ describe "aws service" do
209
+ context 'us-east-1' do
210
+ let(:region) { 'us-east-1' }
211
+ it "sets endpoint" do
212
+ expect( subject.send(:bucket_resource).client.config.endpoint.to_s ).to eql 'https://s3.us-east-1.amazonaws.com'
213
+ end
214
+ end
215
+
216
+ context 'ap-east-1' do
217
+ let(:region) { 'ap-east-1' }
218
+ it "sets endpoint" do
219
+ expect( subject.send(:bucket_resource).client.config.endpoint.to_s ).to eql 'https://s3.ap-east-1.amazonaws.com'
220
+ end
221
+ end
222
+
223
+ context 'cn-northwest-1' do
224
+ let(:region) { 'cn-northwest-1' }
225
+ it "sets endpoint" do
226
+ expect( subject.send(:bucket_resource).client.config.endpoint.to_s ).to eql 'https://s3.cn-northwest-1.amazonaws.com.cn'
227
+ end
228
+ end
229
+ end
207
230
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-output-s3
3
3
  version: !ruby/object:Gem::Version
4
- version: 4.2.0
4
+ version: 4.3.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Elastic
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2019-11-18 00:00:00.000000000 Z
11
+ date: 2020-02-11 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  requirement: !ruby/object:Gem::Requirement