logstash-output-s3 4.4.0 → 4.4.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +6 -0
- data/lib/logstash/outputs/s3/file_repository.rb +67 -11
- data/lib/logstash/outputs/s3.rb +18 -18
- data/logstash-output-s3.gemspec +1 -1
- data/vendor/jar-dependencies/org/logstash/plugins/outputs/s3/logstash-output-s3/4.4.0/logstash-output-s3-4.4.0.jar +0 -0
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 1915e4499e1950b269e287e9bbec3d88efca2f274a390f99949c27af5e2da105
|
4
|
+
data.tar.gz: 6a464adee35655b6f06f5fdf8dea9461af550b51912c4edbe46fb0d7c656774c
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 31ff8fa96f60dffc824f4881ec162c2db15a707ddc43dee4ff81a043ad23dd9296dd5bafaedcef26a27e053f6db8156c918da4c68ec95b4dd496783218595cbe
|
7
|
+
data.tar.gz: 67e830c249b6a5042b442b89b0b0e0b5b7d88dd6064119d0b9816a680aff1d1ae975addcc02b6f5cbf8a36fc5faf2d5e9b6af164e7a471f08f8f8ecd010188a8
|
data/CHANGELOG.md
CHANGED
@@ -1,3 +1,9 @@
|
|
1
|
+
## 4.4.1
|
2
|
+
- Fixes several closely-related race conditions that could cause plugin crashes or data-loss [#252](https://github.com/logstash-plugins/logstash-output-s3/pull/252)
|
3
|
+
- race condition in initializing a prefix could cause one or more local temp files to be abandoned and only recovered after next pipeline start
|
4
|
+
- race condition in stale watcher could cause the plugin to crash when working with a stale (empty) file that had been deleted
|
5
|
+
- race condition in stale watcher could cause a non-empty file to be deleted if bytes were written to it after it was detected as stale
|
6
|
+
|
1
7
|
## 4.4.0
|
2
8
|
- Logstash recovers corrupted gzip and uploads to S3 [#249](https://github.com/logstash-plugins/logstash-output-s3/pull/249)
|
3
9
|
|
@@ -15,8 +15,9 @@ module LogStash
|
|
15
15
|
class PrefixedValue
|
16
16
|
def initialize(file_factory, stale_time)
|
17
17
|
@file_factory = file_factory
|
18
|
-
@lock =
|
18
|
+
@lock = Monitor.new
|
19
19
|
@stale_time = stale_time
|
20
|
+
@is_deleted = false
|
20
21
|
end
|
21
22
|
|
22
23
|
def with_lock
|
@@ -34,7 +35,14 @@ module LogStash
|
|
34
35
|
end
|
35
36
|
|
36
37
|
def delete!
|
37
|
-
with_lock
|
38
|
+
with_lock do |factory|
|
39
|
+
factory.current.delete!
|
40
|
+
@is_deleted = true
|
41
|
+
end
|
42
|
+
end
|
43
|
+
|
44
|
+
def deleted?
|
45
|
+
with_lock { |_| @is_deleted }
|
38
46
|
end
|
39
47
|
end
|
40
48
|
|
@@ -72,17 +80,52 @@ module LogStash
|
|
72
80
|
end
|
73
81
|
|
74
82
|
def each_files
|
75
|
-
|
76
|
-
|
83
|
+
each_factory(keys) do |factory|
|
84
|
+
yield factory.current
|
77
85
|
end
|
78
86
|
end
|
79
87
|
|
80
|
-
|
88
|
+
##
|
89
|
+
# Yields the file factory while the current thread has exclusive access to it, creating a new
|
90
|
+
# one if one does not exist or if the current one is being reaped by the stale watcher.
|
91
|
+
# @param prefix_key [String]: the prefix key
|
92
|
+
# @yieldparam factory [TemporaryFileFactory]: a temporary file factory that this thread has exclusive access to
|
93
|
+
# @yieldreturn [Object]: a value to return; should NOT be the factory, which should be contained by the exclusive access scope.
|
94
|
+
# @return [Object]: the value returned by the provided block
|
81
95
|
def get_factory(prefix_key)
|
82
|
-
|
96
|
+
|
97
|
+
# fast-path: if factory exists and is not deleted, yield it with exclusive access and return
|
98
|
+
prefix_val = @prefixed_factories.get(prefix_key)
|
99
|
+
prefix_val&.with_lock do |factory|
|
100
|
+
# intentional local-jump to ensure deletion detection
|
101
|
+
# is done inside the exclusive access.
|
102
|
+
return yield(factory) unless prefix_val.deleted?
|
103
|
+
end
|
104
|
+
|
105
|
+
# slow-path:
|
106
|
+
# the Concurrent::Map#get operation is lock-free, but may have returned an entry that was being deleted by
|
107
|
+
# another thread (such as via stale detection). If we failed to retrieve a value, or retrieved one that had
|
108
|
+
# been marked deleted, use the atomic Concurrent::Map#compute to retrieve a non-deleted entry.
|
109
|
+
prefix_val = @prefixed_factories.compute(prefix_key) do |existing|
|
110
|
+
existing && !existing.deleted? ? existing : @factory_initializer.create_value(prefix_key)
|
111
|
+
end
|
83
112
|
prefix_val.with_lock { |factory| yield factory }
|
84
113
|
end
|
85
114
|
|
115
|
+
##
|
116
|
+
# Yields each non-deleted file factory while the current thread has exclusive access to it.
|
117
|
+
# @param prefixes [Array<String>]: the prefix keys
|
118
|
+
# @yieldparam factory [TemporaryFileFactory]
|
119
|
+
# @return [void]
|
120
|
+
def each_factory(prefixes)
|
121
|
+
prefixes.each do |prefix_key|
|
122
|
+
prefix_val = @prefixed_factories.get(prefix_key)
|
123
|
+
prefix_val&.with_lock do |factory|
|
124
|
+
yield factory unless prefix_val.deleted?
|
125
|
+
end
|
126
|
+
end
|
127
|
+
end
|
128
|
+
|
86
129
|
def get_file(prefix_key)
|
87
130
|
get_factory(prefix_key) { |factory| yield factory.current }
|
88
131
|
end
|
@@ -95,10 +138,21 @@ module LogStash
|
|
95
138
|
@prefixed_factories.size
|
96
139
|
end
|
97
140
|
|
98
|
-
def
|
99
|
-
|
100
|
-
|
101
|
-
|
141
|
+
def remove_if_stale(prefix_key)
|
142
|
+
# we use the ATOMIC `Concurrent::Map#compute_if_present` to atomically
|
143
|
+
# detect the staleness, mark a stale prefixed factory as deleted, and delete from the map.
|
144
|
+
@prefixed_factories.compute_if_present(prefix_key) do |prefixed_factory|
|
145
|
+
# once we have retrieved an instance, we acquire exclusive access to it
|
146
|
+
# for stale detection, marking it as deleted before releasing the lock
|
147
|
+
# and causing it to become deleted from the map.
|
148
|
+
prefixed_factory.with_lock do |_|
|
149
|
+
if prefixed_factory.stale?
|
150
|
+
prefixed_factory.delete! # mark deleted to prevent reuse
|
151
|
+
nil # cause deletion
|
152
|
+
else
|
153
|
+
prefixed_factory # keep existing
|
154
|
+
end
|
155
|
+
end
|
102
156
|
end
|
103
157
|
end
|
104
158
|
|
@@ -106,7 +160,9 @@ module LogStash
|
|
106
160
|
@stale_sweeper = Concurrent::TimerTask.new(:execution_interval => @sweeper_interval) do
|
107
161
|
LogStash::Util.set_thread_name("S3, Stale factory sweeper")
|
108
162
|
|
109
|
-
@prefixed_factories.each
|
163
|
+
@prefixed_factories.keys.each do |prefix|
|
164
|
+
remove_if_stale(prefix)
|
165
|
+
end
|
110
166
|
end
|
111
167
|
|
112
168
|
@stale_sweeper.execute
|
data/lib/logstash/outputs/s3.rb
CHANGED
@@ -266,6 +266,8 @@ class LogStash::Outputs::S3 < LogStash::Outputs::Base
|
|
266
266
|
|
267
267
|
@logger.debug("Uploading current workspace")
|
268
268
|
|
269
|
+
@file_repository.shutdown # stop stale sweeps
|
270
|
+
|
269
271
|
# The plugin has stopped receiving new events, but we still have
|
270
272
|
# data on disk, lets make sure it get to S3.
|
271
273
|
# If Logstash get interrupted, the `restore_from_crash` (when set to true) method will pickup
|
@@ -275,8 +277,6 @@ class LogStash::Outputs::S3 < LogStash::Outputs::Base
|
|
275
277
|
upload_file(file)
|
276
278
|
end
|
277
279
|
|
278
|
-
@file_repository.shutdown
|
279
|
-
|
280
280
|
@uploader.stop # wait until all the current upload are complete
|
281
281
|
@crash_uploader.stop if @restore # we might have still work to do for recovery so wait until we are done
|
282
282
|
end
|
@@ -344,22 +344,22 @@ class LogStash::Outputs::S3 < LogStash::Outputs::Base
|
|
344
344
|
end
|
345
345
|
|
346
346
|
def rotate_if_needed(prefixes)
|
347
|
-
|
348
|
-
|
349
|
-
|
350
|
-
|
351
|
-
|
352
|
-
|
353
|
-
|
354
|
-
|
355
|
-
|
356
|
-
|
357
|
-
|
358
|
-
|
359
|
-
|
360
|
-
|
361
|
-
|
362
|
-
|
347
|
+
# Each file access is thread safe,
|
348
|
+
# until the rotation is done then only
|
349
|
+
# one thread has access to the resource.
|
350
|
+
@file_repository.each_factory(prefixes) do |factory|
|
351
|
+
# we have exclusive access to the one-and-only
|
352
|
+
# prefix WRAPPER for this factory.
|
353
|
+
temp_file = factory.current
|
354
|
+
|
355
|
+
if @rotation.rotate?(temp_file)
|
356
|
+
@logger.debug? && @logger.debug("Rotate file",
|
357
|
+
:key => temp_file.key,
|
358
|
+
:path => temp_file.path,
|
359
|
+
:strategy => @rotation.class.name)
|
360
|
+
|
361
|
+
upload_file(temp_file) # may be async or blocking
|
362
|
+
factory.rotate!
|
363
363
|
end
|
364
364
|
end
|
365
365
|
end
|
data/logstash-output-s3.gemspec
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
Gem::Specification.new do |s|
|
2
2
|
s.name = 'logstash-output-s3'
|
3
|
-
s.version = '4.4.
|
3
|
+
s.version = '4.4.1'
|
4
4
|
s.licenses = ['Apache-2.0']
|
5
5
|
s.summary = "Sends Logstash events to the Amazon Simple Storage Service"
|
6
6
|
s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program"
|
Binary file
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: logstash-output-s3
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 4.4.
|
4
|
+
version: 4.4.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Elastic
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2022-
|
11
|
+
date: 2022-12-22 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
requirement: !ruby/object:Gem::Requirement
|