logstash-output-s3 4.3.7 → 4.4.1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 238e7be91fe40e4fcb80736f3e8d62e76b6f8108be35e5a0c054cb19bb239428
4
- data.tar.gz: eb0c70181aa21d20b794cd05d2e458b322b10f6e343ae41d96a1e6f49cf85858
3
+ metadata.gz: 1915e4499e1950b269e287e9bbec3d88efca2f274a390f99949c27af5e2da105
4
+ data.tar.gz: 6a464adee35655b6f06f5fdf8dea9461af550b51912c4edbe46fb0d7c656774c
5
5
  SHA512:
6
- metadata.gz: 1194c4ee3defe1104fcc1c68d914c20e2ca2548ba6405960c75d4b398a7cd9949287c3410ead913ff82b5183164c4bd5bf858fe836f94f63654c582efab78ab5
7
- data.tar.gz: cb135c3b28297db0f5ad90a4f7ab4ba6b8c112574815e38bdccdec685f5caa28ed7636f68569586f71205d651118dad7cc7dd0194786efb59bfb8113fa7d0afb
6
+ metadata.gz: 31ff8fa96f60dffc824f4881ec162c2db15a707ddc43dee4ff81a043ad23dd9296dd5bafaedcef26a27e053f6db8156c918da4c68ec95b4dd496783218595cbe
7
+ data.tar.gz: 67e830c249b6a5042b442b89b0b0e0b5b7d88dd6064119d0b9816a680aff1d1ae975addcc02b6f5cbf8a36fc5faf2d5e9b6af164e7a471f08f8f8ecd010188a8
data/CHANGELOG.md CHANGED
@@ -1,3 +1,12 @@
1
+ ## 4.4.1
2
+ - Fixes several closely-related race conditions that could cause plugin crashes or data-loss [#252](https://github.com/logstash-plugins/logstash-output-s3/pull/252)
3
+ - race condition in initializing a prefix could cause one or more local temp files to be abandoned and only recovered after next pipeline start
4
+ - race condition in stale watcher could cause the plugin to crash when working with a stale (empty) file that had been deleted
5
+ - race condition in stale watcher could cause a non-empty file to be deleted if bytes were written to it after it was detected as stale
6
+
7
+ ## 4.4.0
8
+ - Logstash recovers corrupted gzip and uploads to S3 [#249](https://github.com/logstash-plugins/logstash-output-s3/pull/249)
9
+
1
10
  ## 4.3.7
2
11
  - Refactor: avoid usage of CHM (JRuby 9.3.4 work-around) [#248](https://github.com/logstash-plugins/logstash-output-s3/pull/248)
3
12
 
data/README.md CHANGED
@@ -19,7 +19,7 @@ Need help? Try #logstash on freenode IRC or the https://discuss.elastic.co/c/log
19
19
 
20
20
  ## Developing
21
21
 
22
- ### 1. Plugin Developement and Testing
22
+ ### 1. Plugin Development and Testing
23
23
 
24
24
  #### Code
25
25
  - To get started, you'll need JRuby with the Bundler gem installed.
data/VERSION ADDED
@@ -0,0 +1 @@
1
+ 4.4.0
@@ -15,8 +15,9 @@ module LogStash
15
15
  class PrefixedValue
16
16
  def initialize(file_factory, stale_time)
17
17
  @file_factory = file_factory
18
- @lock = Mutex.new
18
+ @lock = Monitor.new
19
19
  @stale_time = stale_time
20
+ @is_deleted = false
20
21
  end
21
22
 
22
23
  def with_lock
@@ -34,7 +35,14 @@ module LogStash
34
35
  end
35
36
 
36
37
  def delete!
37
- with_lock{ |factory| factory.current.delete! }
38
+ with_lock do |factory|
39
+ factory.current.delete!
40
+ @is_deleted = true
41
+ end
42
+ end
43
+
44
+ def deleted?
45
+ with_lock { |_| @is_deleted }
38
46
  end
39
47
  end
40
48
 
@@ -72,17 +80,52 @@ module LogStash
72
80
  end
73
81
 
74
82
  def each_files
75
- @prefixed_factories.values.each do |prefixed_file|
76
- prefixed_file.with_lock { |factory| yield factory.current }
83
+ each_factory(keys) do |factory|
84
+ yield factory.current
77
85
  end
78
86
  end
79
87
 
80
- # Return the file factory
88
+ ##
89
+ # Yields the file factory while the current thread has exclusive access to it, creating a new
90
+ # one if one does not exist or if the current one is being reaped by the stale watcher.
91
+ # @param prefix_key [String]: the prefix key
92
+ # @yieldparam factory [TemporaryFileFactory]: a temporary file factory that this thread has exclusive access to
93
+ # @yieldreturn [Object]: a value to return; should NOT be the factory, which should be contained by the exclusive access scope.
94
+ # @return [Object]: the value returned by the provided block
81
95
  def get_factory(prefix_key)
82
- prefix_val = @prefixed_factories.fetch_or_store(prefix_key) { @factory_initializer.create_value(prefix_key) }
96
+
97
+ # fast-path: if factory exists and is not deleted, yield it with exclusive access and return
98
+ prefix_val = @prefixed_factories.get(prefix_key)
99
+ prefix_val&.with_lock do |factory|
100
+ # intentional local-jump to ensure deletion detection
101
+ # is done inside the exclusive access.
102
+ return yield(factory) unless prefix_val.deleted?
103
+ end
104
+
105
+ # slow-path:
106
+ # the Concurrent::Map#get operation is lock-free, but may have returned an entry that was being deleted by
107
+ # another thread (such as via stale detection). If we failed to retrieve a value, or retrieved one that had
108
+ # been marked deleted, use the atomic Concurrent::Map#compute to retrieve a non-deleted entry.
109
+ prefix_val = @prefixed_factories.compute(prefix_key) do |existing|
110
+ existing && !existing.deleted? ? existing : @factory_initializer.create_value(prefix_key)
111
+ end
83
112
  prefix_val.with_lock { |factory| yield factory }
84
113
  end
85
114
 
115
+ ##
116
+ # Yields each non-deleted file factory while the current thread has exclusive access to it.
117
+ # @param prefixes [Array<String>]: the prefix keys
118
+ # @yieldparam factory [TemporaryFileFactory]
119
+ # @return [void]
120
+ def each_factory(prefixes)
121
+ prefixes.each do |prefix_key|
122
+ prefix_val = @prefixed_factories.get(prefix_key)
123
+ prefix_val&.with_lock do |factory|
124
+ yield factory unless prefix_val.deleted?
125
+ end
126
+ end
127
+ end
128
+
86
129
  def get_file(prefix_key)
87
130
  get_factory(prefix_key) { |factory| yield factory.current }
88
131
  end
@@ -95,10 +138,21 @@ module LogStash
95
138
  @prefixed_factories.size
96
139
  end
97
140
 
98
- def remove_stale(k, v)
99
- if v.stale?
100
- @prefixed_factories.delete_pair(k, v)
101
- v.delete!
141
+ def remove_if_stale(prefix_key)
142
+ # we use the ATOMIC `Concurrent::Map#compute_if_present` to atomically
143
+ # detect the staleness, mark a stale prefixed factory as deleted, and delete from the map.
144
+ @prefixed_factories.compute_if_present(prefix_key) do |prefixed_factory|
145
+ # once we have retrieved an instance, we acquire exclusive access to it
146
+ # for stale detection, marking it as deleted before releasing the lock
147
+ # and causing it to become deleted from the map.
148
+ prefixed_factory.with_lock do |_|
149
+ if prefixed_factory.stale?
150
+ prefixed_factory.delete! # mark deleted to prevent reuse
151
+ nil # cause deletion
152
+ else
153
+ prefixed_factory # keep existing
154
+ end
155
+ end
102
156
  end
103
157
  end
104
158
 
@@ -106,7 +160,9 @@ module LogStash
106
160
  @stale_sweeper = Concurrent::TimerTask.new(:execution_interval => @sweeper_interval) do
107
161
  LogStash::Util.set_thread_name("S3, Stale factory sweeper")
108
162
 
109
- @prefixed_factories.each { |k, v| remove_stale(k,v) }
163
+ @prefixed_factories.keys.each do |prefix|
164
+ remove_if_stale(prefix)
165
+ end
110
166
  end
111
167
 
112
168
  @stale_sweeper.execute
@@ -7,7 +7,7 @@ module LogStash
7
7
 
8
8
  def initialize(size_file)
9
9
  if size_file <= 0
10
- raise LogStash::ConfigurationError, "`size_file` need to be greather than 0"
10
+ raise LogStash::ConfigurationError, "`size_file` need to be greater than 0"
11
11
  end
12
12
 
13
13
  @size_file = size_file
@@ -2,15 +2,23 @@
2
2
  require "thread"
3
3
  require "forwardable"
4
4
  require "fileutils"
5
+ require "logstash-output-s3_jars"
5
6
 
6
7
  module LogStash
7
8
  module Outputs
8
9
  class S3
9
- # Wrap the actual file descriptor into an utility classe
10
- # It make it more OOP and easier to reason with the paths.
10
+
11
+ java_import 'org.logstash.plugins.outputs.s3.GzipUtil'
12
+
13
+ # Wrap the actual file descriptor into an utility class
14
+ # Make it more OOP and easier to reason with the paths.
11
15
  class TemporaryFile
12
16
  extend Forwardable
13
17
 
18
+ GZIP_EXTENSION = "txt.gz"
19
+ TXT_EXTENSION = "txt"
20
+ RECOVERED_FILE_NAME_TAG = "-recovered"
21
+
14
22
  def_delegators :@fd, :path, :write, :close, :fsync
15
23
 
16
24
  attr_reader :fd
@@ -33,8 +41,10 @@ module LogStash
33
41
  def size
34
42
  # Use the fd size to get the accurate result,
35
43
  # so we dont have to deal with fsync
36
- # if the file is close we will use the File::size
44
+ # if the file is close, fd.size raises an IO exception so we use the File::size
37
45
  begin
46
+ # fd is nil when LS tries to recover gzip file but fails
47
+ return 0 unless @fd != nil
38
48
  @fd.size
39
49
  rescue IOError
40
50
  ::File.size(path)
@@ -45,7 +55,7 @@ module LogStash
45
55
  @key.gsub(/^\//, "")
46
56
  end
47
57
 
48
- # Each temporary file is made inside a directory named with an UUID,
58
+ # Each temporary file is created inside a directory named with an UUID,
49
59
  # instead of deleting the file directly and having the risk of deleting other files
50
60
  # we delete the root of the UUID, using a UUID also remove the risk of deleting unwanted file, it acts as
51
61
  # a sandbox.
@@ -58,13 +68,46 @@ module LogStash
58
68
  size == 0
59
69
  end
60
70
 
71
+ # only to cover the case where LS cannot restore corrupted file, file is not exist
72
+ def recoverable?
73
+ !@fd.nil?
74
+ end
75
+
61
76
  def self.create_from_existing_file(file_path, temporary_folder)
62
77
  key_parts = Pathname.new(file_path).relative_path_from(temporary_folder).to_s.split(::File::SEPARATOR)
63
78
 
79
+ # recover gzip file and compress back before uploading to S3
80
+ if file_path.end_with?("." + GZIP_EXTENSION)
81
+ file_path = self.recover(file_path)
82
+ end
64
83
  TemporaryFile.new(key_parts.slice(1, key_parts.size).join("/"),
65
- ::File.open(file_path, "r"),
84
+ ::File.exist?(file_path) ? ::File.open(file_path, "r") : nil, # for the nil case, file size will be 0 and upload will be ignored.
66
85
  ::File.join(temporary_folder, key_parts.slice(0, 1)))
67
86
  end
87
+
88
+ def self.gzip_extension
89
+ GZIP_EXTENSION
90
+ end
91
+
92
+ def self.text_extension
93
+ TXT_EXTENSION
94
+ end
95
+
96
+ def self.recovery_file_name_tag
97
+ RECOVERED_FILE_NAME_TAG
98
+ end
99
+
100
+ private
101
+ def self.recover(file_path)
102
+ full_gzip_extension = "." + GZIP_EXTENSION
103
+ recovered_txt_file_path = file_path.gsub(full_gzip_extension, RECOVERED_FILE_NAME_TAG + "." + TXT_EXTENSION)
104
+ recovered_gzip_file_path = file_path.gsub(full_gzip_extension, RECOVERED_FILE_NAME_TAG + full_gzip_extension)
105
+ GzipUtil.recover(file_path, recovered_txt_file_path)
106
+ if ::File.exist?(recovered_txt_file_path) && !::File.zero?(recovered_txt_file_path)
107
+ GzipUtil.compress(recovered_txt_file_path, recovered_gzip_file_path)
108
+ end
109
+ recovered_gzip_file_path
110
+ end
68
111
  end
69
112
  end
70
113
  end
@@ -19,9 +19,6 @@ module LogStash
19
19
  # I do not have to mess around to check if the other directory have file in it before destroying them.
20
20
  class TemporaryFileFactory
21
21
  FILE_MODE = "a"
22
- GZIP_ENCODING = "gzip"
23
- GZIP_EXTENSION = "txt.gz"
24
- TXT_EXTENSION = "txt"
25
22
  STRFTIME = "%Y-%m-%dT%H.%M"
26
23
 
27
24
  attr_accessor :counter, :tags, :prefix, :encoding, :temporary_directory, :current
@@ -48,7 +45,7 @@ module LogStash
48
45
 
49
46
  private
50
47
  def extension
51
- gzip? ? GZIP_EXTENSION : TXT_EXTENSION
48
+ gzip? ? TemporaryFile.gzip_extension : TemporaryFile.text_extension
52
49
  end
53
50
 
54
51
  def gzip?
@@ -31,6 +31,7 @@ module LogStash
31
31
  end
32
32
  end
33
33
 
34
+ # uploads a TemporaryFile to S3
34
35
  def upload(file, options = {})
35
36
  upload_options = options.fetch(:upload_options, {})
36
37
 
@@ -68,6 +69,7 @@ module LogStash
68
69
  @workers_pool.shutdown
69
70
  @workers_pool.wait_for_termination(nil) # block until its done
70
71
  end
72
+
71
73
  end
72
74
  end
73
75
  end
@@ -97,6 +97,7 @@ class LogStash::Outputs::S3 < LogStash::Outputs::Base
97
97
  :fallback_policy => :caller_runs
98
98
  })
99
99
 
100
+ GZIP_ENCODING = "gzip"
100
101
 
101
102
  config_name "s3"
102
103
  default :codec, "line"
@@ -181,7 +182,7 @@ class LogStash::Outputs::S3 < LogStash::Outputs::Base
181
182
  config :tags, :validate => :array, :default => []
182
183
 
183
184
  # Specify the content encoding. Supports ("gzip"). Defaults to "none"
184
- config :encoding, :validate => ["none", "gzip"], :default => "none"
185
+ config :encoding, :validate => ["none", GZIP_ENCODING], :default => "none"
185
186
 
186
187
  # Define the strategy to use to decide when we need to rotate the file and push it to S3,
187
188
  # The default strategy is to check for both size and time, the first one to match will rotate the file.
@@ -265,6 +266,8 @@ class LogStash::Outputs::S3 < LogStash::Outputs::Base
265
266
 
266
267
  @logger.debug("Uploading current workspace")
267
268
 
269
+ @file_repository.shutdown # stop stale sweeps
270
+
268
271
  # The plugin has stopped receiving new events, but we still have
269
272
  # data on disk, lets make sure it get to S3.
270
273
  # If Logstash get interrupted, the `restore_from_crash` (when set to true) method will pickup
@@ -274,8 +277,6 @@ class LogStash::Outputs::S3 < LogStash::Outputs::Base
274
277
  upload_file(file)
275
278
  end
276
279
 
277
- @file_repository.shutdown
278
-
279
280
  @uploader.stop # wait until all the current upload are complete
280
281
  @crash_uploader.stop if @restore # we might have still work to do for recovery so wait until we are done
281
282
  end
@@ -315,7 +316,7 @@ class LogStash::Outputs::S3 < LogStash::Outputs::Base
315
316
  :server_side_encryption => @server_side_encryption ? @server_side_encryption_algorithm : nil,
316
317
  :ssekms_key_id => @server_side_encryption_algorithm == "aws:kms" ? @ssekms_key_id : nil,
317
318
  :storage_class => @storage_class,
318
- :content_encoding => @encoding == "gzip" ? "gzip" : nil,
319
+ :content_encoding => @encoding == GZIP_ENCODING ? GZIP_ENCODING : nil,
319
320
  :multipart_threshold => @upload_multipart_threshold
320
321
  }
321
322
  end
@@ -343,22 +344,22 @@ class LogStash::Outputs::S3 < LogStash::Outputs::Base
343
344
  end
344
345
 
345
346
  def rotate_if_needed(prefixes)
346
- prefixes.each do |prefix|
347
- # Each file access is thread safe,
348
- # until the rotation is done then only
349
- # one thread has access to the resource.
350
- @file_repository.get_factory(prefix) do |factory|
351
- temp_file = factory.current
352
-
353
- if @rotation.rotate?(temp_file)
354
- @logger.debug? && @logger.debug("Rotate file",
355
- :key => temp_file.key,
356
- :path => temp_file.path,
357
- :strategy => @rotation.class.name)
358
-
359
- upload_file(temp_file)
360
- factory.rotate!
361
- end
347
+ # Each file access is thread safe,
348
+ # until the rotation is done then only
349
+ # one thread has access to the resource.
350
+ @file_repository.each_factory(prefixes) do |factory|
351
+ # we have exclusive access to the one-and-only
352
+ # prefix WRAPPER for this factory.
353
+ temp_file = factory.current
354
+
355
+ if @rotation.rotate?(temp_file)
356
+ @logger.debug? && @logger.debug("Rotate file",
357
+ :key => temp_file.key,
358
+ :path => temp_file.path,
359
+ :strategy => @rotation.class.name)
360
+
361
+ upload_file(temp_file) # may be async or blocking
362
+ factory.rotate!
362
363
  end
363
364
  end
364
365
  end
@@ -397,16 +398,48 @@ class LogStash::Outputs::S3 < LogStash::Outputs::Base
397
398
  @crash_uploader = Uploader.new(bucket_resource, @logger, CRASH_RECOVERY_THREADPOOL)
398
399
 
399
400
  temp_folder_path = Pathname.new(@temporary_directory)
400
- Dir.glob(::File.join(@temporary_directory, "**/*"))
401
- .select { |file| ::File.file?(file) }
402
- .each do |file|
403
- temp_file = TemporaryFile.create_from_existing_file(file, temp_folder_path)
404
- if temp_file.size > 0
405
- @logger.debug? && @logger.debug("Recovering from crash and uploading", :path => temp_file.path)
406
- @crash_uploader.upload_async(temp_file, :on_complete => method(:clean_temporary_file), :upload_options => upload_options)
401
+ files = Dir.glob(::File.join(@temporary_directory, "**/*"))
402
+ .select { |file_path| ::File.file?(file_path) }
403
+ under_recovery_files = get_under_recovery_files(files)
404
+
405
+ files.each do |file_path|
406
+ # when encoding is GZIP, if file is already recovering or recovered and uploading to S3, log and skip
407
+ if under_recovery_files.include?(file_path)
408
+ unless file_path.include?(TemporaryFile.gzip_extension)
409
+ @logger.warn("The #{file_path} file either under recover process or failed to recover before.")
410
+ end
407
411
  else
408
- clean_temporary_file(temp_file)
412
+ temp_file = TemporaryFile.create_from_existing_file(file_path, temp_folder_path)
413
+ # do not remove or upload if Logstash tries to recover file but fails
414
+ if temp_file.recoverable?
415
+ if temp_file.size > 0
416
+ @logger.debug? && @logger.debug("Recovering from crash and uploading", :path => temp_file.path)
417
+ @crash_uploader.upload_async(temp_file,
418
+ :on_complete => method(:clean_temporary_file),
419
+ :upload_options => upload_options)
420
+ else
421
+ clean_temporary_file(temp_file)
422
+ end
423
+ end
424
+ end
425
+ end
426
+ end
427
+
428
+ # figures out the recovering files and
429
+ # creates a skip list to ignore for the rest of processes
430
+ def get_under_recovery_files(files)
431
+ skip_files = Set.new
432
+ return skip_files unless @encoding == GZIP_ENCODING
433
+
434
+ files.each do |file_path|
435
+ if file_path.include?(TemporaryFile.recovery_file_name_tag)
436
+ skip_files << file_path
437
+ if file_path.include?(TemporaryFile.gzip_extension)
438
+ # also include the original corrupted gzip file
439
+ skip_files << file_path.gsub(TemporaryFile.recovery_file_name_tag, "")
440
+ end
409
441
  end
410
442
  end
443
+ skip_files
411
444
  end
412
445
  end
@@ -0,0 +1,4 @@
1
+ # AUTOGENERATED BY THE GRADLE SCRIPT. DO NOT EDIT.
2
+
3
+ require 'jar_dependencies'
4
+ require_jar('org.logstash.plugins.outputs.s3', 'logstash-output-s3', '4.4.0')
@@ -0,0 +1,15 @@
1
+ # encoding: utf-8
2
+ require "jars/installer"
3
+ require "fileutils"
4
+
5
+ task :vendor do
6
+ exit(1) unless system './gradlew vendor'
7
+ version = File.read("VERSION").strip
8
+ end
9
+
10
+ desc "clean"
11
+ task :clean do
12
+ ["build", "vendor/jar-dependencies", "Gemfile.lock"].each do |p|
13
+ FileUtils.rm_rf(p)
14
+ end
15
+ end
@@ -1,13 +1,13 @@
1
1
  Gem::Specification.new do |s|
2
2
  s.name = 'logstash-output-s3'
3
- s.version = '4.3.7'
3
+ s.version = '4.4.1'
4
4
  s.licenses = ['Apache-2.0']
5
5
  s.summary = "Sends Logstash events to the Amazon Simple Storage Service"
6
6
  s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program"
7
7
  s.authors = ["Elastic"]
8
8
  s.email = 'info@elastic.co'
9
9
  s.homepage = "http://www.elastic.co/guide/en/logstash/current/index.html"
10
- s.require_paths = ["lib"]
10
+ s.require_paths = ["lib", "vendor/jar-dependencies"]
11
11
 
12
12
  # Files
13
13
  s.files = Dir["lib/**/*","spec/**/*","*.gemspec","*.md","CONTRIBUTORS","Gemfile","LICENSE","NOTICE.TXT", "vendor/jar-dependencies/**/*.jar", "vendor/jar-dependencies/**/*.rb", "VERSION", "docs/**/*"]
@@ -7,18 +7,17 @@ require "stud/temporary"
7
7
  describe "Restore from crash", :integration => true do
8
8
  include_context "setup plugin"
9
9
 
10
- let(:options) { main_options.merge({ "restore" => true, "canned_acl" => "public-read-write" }) }
11
-
12
10
  let(:number_of_files) { 5 }
13
11
  let(:dummy_content) { "foobar\n" * 100 }
14
- let(:factory) { LogStash::Outputs::S3::TemporaryFileFactory.new(prefix, tags, "none", temporary_directory)}
15
12
 
16
13
  before do
17
14
  clean_remote_files(prefix)
18
15
  end
19
16
 
20
-
21
17
  context 'with a non-empty tempfile' do
18
+ let(:options) { main_options.merge({ "restore" => true, "canned_acl" => "public-read-write" }) }
19
+ let(:factory) { LogStash::Outputs::S3::TemporaryFileFactory.new(prefix, tags, "none", temporary_directory)}
20
+
22
21
  before do
23
22
  # Creating a factory always create a file
24
23
  factory.current.write(dummy_content)
@@ -41,6 +40,9 @@ describe "Restore from crash", :integration => true do
41
40
  end
42
41
 
43
42
  context 'with an empty tempfile' do
43
+ let(:options) { main_options.merge({ "restore" => true, "canned_acl" => "public-read-write" }) }
44
+ let(:factory) { LogStash::Outputs::S3::TemporaryFileFactory.new(prefix, tags, "none", temporary_directory)}
45
+
44
46
  before do
45
47
  factory.current
46
48
  factory.rotate!
@@ -63,5 +65,68 @@ describe "Restore from crash", :integration => true do
63
65
  expect(bucket_resource.objects(:prefix => prefix).count).to eq(0)
64
66
  end
65
67
  end
68
+
69
+ context "#gzip encoding" do
70
+ let(:options) { main_options.merge({ "restore" => true, "canned_acl" => "public-read-write", "encoding" => "gzip" }) }
71
+ let(:factory) { LogStash::Outputs::S3::TemporaryFileFactory.new(prefix, tags, "gzip", temporary_directory)}
72
+ describe "with empty recovered file" do
73
+ before do
74
+ # Creating a factory always create a file
75
+ factory.current.write('')
76
+ factory.current.fsync
77
+ factory.current.close
78
+ end
79
+
80
+ it 'should not upload and not remove temp file' do
81
+ subject.register
82
+ try(20) do
83
+ expect(bucket_resource.objects(:prefix => prefix).count).to eq(0)
84
+ expect(Dir.glob(File.join(temporary_directory, "*")).size).to eq(1)
85
+ end
86
+ end
87
+ end
88
+
89
+ describe "with healthy recovered, size is greater than zero file" do
90
+ before do
91
+ # Creating a factory always create a file
92
+ factory.current.write(dummy_content)
93
+ factory.current.fsync
94
+ factory.current.close
95
+
96
+ (number_of_files - 1).times do
97
+ factory.rotate!
98
+ factory.current.write(dummy_content)
99
+ factory.current.fsync
100
+ factory.current.close
101
+ end
102
+ end
103
+
104
+ it 'should recover, upload to S3 and remove temp file' do
105
+ subject.register
106
+ try(20) do
107
+ expect(bucket_resource.objects(:prefix => prefix).count).to eq(number_of_files)
108
+ expect(Dir.glob(File.join(temporary_directory, "*")).size).to eq(0)
109
+ expect(bucket_resource.objects(:prefix => prefix).first.acl.grants.collect(&:permission)).to include("READ", "WRITE")
110
+ end
111
+ end
112
+ end
113
+
114
+ describe "with failure when recovering" do
115
+ before do
116
+ # Creating a factory always create a file
117
+ factory.current.write(dummy_content)
118
+ factory.current.fsync
119
+ end
120
+
121
+ it 'should not upload to S3 and not remove temp file' do
122
+ subject.register
123
+ try(20) do
124
+ expect(bucket_resource.objects(:prefix => prefix).count).to eq(0)
125
+ expect(Dir.glob(File.join(temporary_directory, "*")).size).to eq(1)
126
+ end
127
+ end
128
+ end
129
+ end
130
+
66
131
  end
67
132
 
@@ -25,11 +25,11 @@ describe LogStash::Outputs::S3::SizeRotationPolicy do
25
25
  end
26
26
 
27
27
  it "raises an exception if the `size_file` is 0" do
28
- expect { described_class.new(0) }.to raise_error(LogStash::ConfigurationError, /need to be greather than 0/)
28
+ expect { described_class.new(0) }.to raise_error(LogStash::ConfigurationError, /need to be greater than 0/)
29
29
  end
30
30
 
31
31
  it "raises an exception if the `size_file` is < 0" do
32
- expect { described_class.new(-100) }.to raise_error(LogStash::ConfigurationError, /need to be greather than 0/)
32
+ expect { described_class.new(-100) }.to raise_error(LogStash::ConfigurationError, /need to be greater than 0/)
33
33
  end
34
34
 
35
35
  context "#needs_periodic?" do
@@ -5,6 +5,7 @@ shared_context "setup plugin" do
5
5
  let(:bucket) { ENV["AWS_LOGSTASH_TEST_BUCKET"] }
6
6
  let(:access_key_id) { ENV["AWS_ACCESS_KEY_ID"] }
7
7
  let(:secret_access_key) { ENV["AWS_SECRET_ACCESS_KEY"] }
8
+ let(:session_token) { ENV["AWS_SESSION_TOKEN"] }
8
9
  let(:size_file) { 100 }
9
10
  let(:time_file) { 100 }
10
11
  let(:tags) { [] }
@@ -18,6 +19,7 @@ shared_context "setup plugin" do
18
19
  "temporary_directory" => temporary_directory,
19
20
  "access_key_id" => access_key_id,
20
21
  "secret_access_key" => secret_access_key,
22
+ "session_token" => session_token,
21
23
  "size_file" => size_file,
22
24
  "time_file" => time_file,
23
25
  "region" => region,
@@ -25,7 +27,7 @@ shared_context "setup plugin" do
25
27
  }
26
28
  end
27
29
 
28
- let(:client_credentials) { Aws::Credentials.new(access_key_id, secret_access_key) }
30
+ let(:client_credentials) { Aws::Credentials.new(access_key_id, secret_access_key, session_token) }
29
31
  let(:bucket_resource) { Aws::S3::Bucket.new(bucket, { :credentials => client_credentials, :region => region }) }
30
32
 
31
33
  subject { LogStash::Outputs::S3.new(options) }
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-output-s3
3
3
  version: !ruby/object:Gem::Version
4
- version: 4.3.7
4
+ version: 4.4.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Elastic
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2022-06-16 00:00:00.000000000 Z
11
+ date: 2022-12-22 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  requirement: !ruby/object:Gem::Requirement
@@ -129,7 +129,9 @@ files:
129
129
  - LICENSE
130
130
  - NOTICE.TXT
131
131
  - README.md
132
+ - VERSION
132
133
  - docs/index.asciidoc
134
+ - lib/logstash-output-s3_jars.rb
133
135
  - lib/logstash/outputs/s3.rb
134
136
  - lib/logstash/outputs/s3/file_repository.rb
135
137
  - lib/logstash/outputs/s3/patch.rb
@@ -142,6 +144,7 @@ files:
142
144
  - lib/logstash/outputs/s3/uploader.rb
143
145
  - lib/logstash/outputs/s3/writable_directory_validator.rb
144
146
  - lib/logstash/outputs/s3/write_bucket_permission_validator.rb
147
+ - lib/tasks/build.rake
145
148
  - logstash-output-s3.gemspec
146
149
  - spec/integration/dynamic_prefix_spec.rb
147
150
  - spec/integration/gzip_file_spec.rb
@@ -164,6 +167,7 @@ files:
164
167
  - spec/outputs/s3_spec.rb
165
168
  - spec/spec_helper.rb
166
169
  - spec/supports/helpers.rb
170
+ - vendor/jar-dependencies/org/logstash/plugins/outputs/s3/logstash-output-s3/4.4.0/logstash-output-s3-4.4.0.jar
167
171
  homepage: http://www.elastic.co/guide/en/logstash/current/index.html
168
172
  licenses:
169
173
  - Apache-2.0
@@ -174,6 +178,7 @@ post_install_message:
174
178
  rdoc_options: []
175
179
  require_paths:
176
180
  - lib
181
+ - vendor/jar-dependencies
177
182
  required_ruby_version: !ruby/object:Gem::Requirement
178
183
  requirements:
179
184
  - - ">="