logstash-input-s3-sns-sqs 2.0.9 → 2.1.0
Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: f7484934d1a9826d562d1a1949bef243f398ead37282b5f78a6d274765b891d0
|
4
|
+
data.tar.gz: c7595e2a337910dabfc862bd0588aa206f0039a1a174512e3aae961536bcb1f2
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 5e48df7a8f5240acfae4e84aa9995130448440279e706f734e1867ed18ce543c003b5ff046b937278b1b8956c93c1432a333b95c7b055bfd0e6b1b504ab555e2
|
7
|
+
data.tar.gz: 50c0b7bf135e0e347c6045e1b60f749348687a3848829c3a8b767e9caa2695953c0746f504bfc52e3be5f75feb1b61f59d3a2a2182918aedb2af0dd767d16ad0
|
data/CHANGELOG.md
CHANGED
@@ -9,6 +9,7 @@ class S3Downloader
|
|
9
9
|
@stopped = stop_semaphore
|
10
10
|
@factory = options[:s3_client_factory]
|
11
11
|
@delete_on_success = options[:delete_on_success]
|
12
|
+
@include_object_properties = options[:include_object_properties]
|
12
13
|
end
|
13
14
|
|
14
15
|
def copy_s3object_to_disk(record)
|
@@ -21,6 +22,7 @@ class S3Downloader
|
|
21
22
|
key: record[:key],
|
22
23
|
response_target: record[:local_file]
|
23
24
|
)
|
25
|
+
record[:s3_data] = response.to_h.keep_if { |key| @include_object_properties.include?(key) }
|
24
26
|
end
|
25
27
|
rescue Aws::S3::Errors::ServiceError => e
|
26
28
|
@logger.error("Unable to download file. Requeuing the message", :error => e, :record => record)
|
@@ -161,6 +161,10 @@ class LogStash::Inputs::S3SNSSQS < LogStash::Inputs::Threadable
|
|
161
161
|
# Session name to use when assuming an IAM role
|
162
162
|
config :s3_role_session_name, :validate => :string, :default => "logstash"
|
163
163
|
config :delete_on_success, :validate => :boolean, :default => false
|
164
|
+
# Whether or not to include the S3 object's properties (last_modified, content_type, metadata)
|
165
|
+
# into each Event at [@metadata][s3]. Regardless of this setting, [@metdata][s3][key] will always
|
166
|
+
# be present.
|
167
|
+
config :include_object_properties, :validate => :array, :default => [:last_modified, :content_type, :metadata]
|
164
168
|
|
165
169
|
### sqs
|
166
170
|
# Name of the SQS Queue to pull messages from. Note that this is just the name of the queue, not the URL or ARN.
|
@@ -169,9 +173,10 @@ class LogStash::Inputs::S3SNSSQS < LogStash::Inputs::Threadable
|
|
169
173
|
# Whether the event is processed though an SNS to SQS. (S3>SNS>SQS = true |S3>SQS=false)
|
170
174
|
config :from_sns, :validate => :boolean, :default => true
|
171
175
|
config :sqs_skip_delete, :validate => :boolean, :default => false
|
176
|
+
config :sqs_delete_on_failure, :validate => :boolean, :default => true
|
177
|
+
|
172
178
|
config :visibility_timeout, :validate => :number, :default => 120
|
173
179
|
config :max_processing_time, :validate => :number, :default => 8000
|
174
|
-
|
175
180
|
### system
|
176
181
|
config :temporary_directory, :validate => :string, :default => File.join(Dir.tmpdir, "logstash")
|
177
182
|
# To run in multiple threads use this
|
@@ -248,7 +253,8 @@ class LogStash::Inputs::S3SNSSQS < LogStash::Inputs::Threadable
|
|
248
253
|
sqs_queue: @queue,
|
249
254
|
queue_owner_aws_account_id: @queue_owner_aws_account_id,
|
250
255
|
from_sns: @from_sns,
|
251
|
-
max_processing_time: @max_processing_time
|
256
|
+
max_processing_time: @max_processing_time,
|
257
|
+
sqs_delete_on_failure: @sqs_delete_on_failure
|
252
258
|
},
|
253
259
|
aws_options_hash)
|
254
260
|
@s3_client_factory = S3ClientFactory.new(@logger, {
|
@@ -259,7 +265,8 @@ class LogStash::Inputs::S3SNSSQS < LogStash::Inputs::Threadable
|
|
259
265
|
}, aws_options_hash)
|
260
266
|
@s3_downloader = S3Downloader.new(@logger, @received_stop, {
|
261
267
|
s3_client_factory: @s3_client_factory,
|
262
|
-
delete_on_success: @delete_on_success
|
268
|
+
delete_on_success: @delete_on_success,
|
269
|
+
include_object_properties: @include_object_properties
|
263
270
|
})
|
264
271
|
@codec_factory = CodecFactory.new(@logger, {
|
265
272
|
default_codec: @codec,
|
@@ -273,18 +280,21 @@ class LogStash::Inputs::S3SNSSQS < LogStash::Inputs::Threadable
|
|
273
280
|
|
274
281
|
# startup
|
275
282
|
def run(logstash_event_queue)
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
283
|
+
@control_threads = @consumer_threads.times.map do |thread_id|
|
284
|
+
Thread.new do
|
285
|
+
restart_count = 0
|
286
|
+
while not stop?
|
287
|
+
#make thead start async to prevent polling the same message from sqs
|
288
|
+
sleep 0.5
|
289
|
+
worker_thread = run_worker_thread(logstash_event_queue, thread_id)
|
290
|
+
worker_thread.join
|
291
|
+
restart_count += 1
|
292
|
+
thread_id = "#{thread_id}_#{restart_count}"
|
293
|
+
@logger.info("[control_thread] restarting a thread #{thread_id}... ", :thread => worker_thread.inspect)
|
294
|
+
end
|
295
|
+
end
|
285
296
|
end
|
286
|
-
|
287
|
-
@worker_threads.each { |t| t.join }
|
297
|
+
@control_threads.each { |t| t.join }
|
288
298
|
end
|
289
299
|
|
290
300
|
# shutdown
|
@@ -311,7 +321,6 @@ class LogStash::Inputs::S3SNSSQS < LogStash::Inputs::Threadable
|
|
311
321
|
# --- END plugin interface ------------------------------------------#
|
312
322
|
|
313
323
|
private
|
314
|
-
|
315
324
|
def run_worker_thread(queue, thread_id)
|
316
325
|
Thread.new do
|
317
326
|
LogStash::Util.set_thread_name("Worker #{@id}/#{thread_id}")
|
@@ -31,7 +31,7 @@ module LogProcessor
|
|
31
31
|
# Decoding a line must not last longer than a few seconds. Otherwise, the file is probably corrupt.
|
32
32
|
codec.decode(line) do |event|
|
33
33
|
event_count += 1
|
34
|
-
decorate_event(event, metadata, type, record[:key], record[:bucket],
|
34
|
+
decorate_event(event, metadata, type, record[:key], record[:bucket], record[:s3_data])
|
35
35
|
#event_time = Time.now #PROFILING
|
36
36
|
#event.set("[@metadata][progress][begin]", start_time)
|
37
37
|
#event.set("[@metadata][progress][index_time]", event_time)
|
@@ -45,7 +45,7 @@ module LogProcessor
|
|
45
45
|
# ensure any stateful codecs (such as multi-line ) are flushed to the queue
|
46
46
|
codec.flush do |event|
|
47
47
|
event_count += 1
|
48
|
-
decorate_event(event, metadata, type, record[:key], record[:bucket],
|
48
|
+
decorate_event(event, metadata, type, record[:key], record[:bucket], record[:s3_data])
|
49
49
|
@logger.debug("[#{Thread.current[:name]}] Flushing an incomplete event", :event => event.to_s)
|
50
50
|
logstash_event_queue << event
|
51
51
|
end
|
@@ -55,7 +55,7 @@ module LogProcessor
|
|
55
55
|
|
56
56
|
private
|
57
57
|
|
58
|
-
def decorate_event(event, metadata, type, key, bucket,
|
58
|
+
def decorate_event(event, metadata, type, key, bucket, s3_data)
|
59
59
|
if event_is_metadata?(event)
|
60
60
|
@logger.debug('Updating the current cloudfront metadata', :event => event)
|
61
61
|
update_metadata(metadata, event)
|
@@ -67,9 +67,11 @@ module LogProcessor
|
|
67
67
|
event.set("cloudfront_version", metadata[:cloudfront_version]) unless metadata[:cloudfront_version].nil?
|
68
68
|
event.set("cloudfront_fields", metadata[:cloudfront_fields]) unless metadata[:cloudfront_fields].nil?
|
69
69
|
|
70
|
+
event.set("[@metadata][s3]", s3_data)
|
70
71
|
event.set("[@metadata][s3][object_key]", key)
|
71
72
|
event.set("[@metadata][s3][bucket_name]", bucket)
|
72
73
|
event.set("[@metadata][s3][object_folder]", get_object_folder(key))
|
74
|
+
|
73
75
|
end
|
74
76
|
end
|
75
77
|
|
@@ -41,6 +41,7 @@ class SqsPoller
|
|
41
41
|
@queue = client_options[:sqs_queue]
|
42
42
|
@from_sns = client_options[:from_sns]
|
43
43
|
@max_processing_time = client_options[:max_processing_time]
|
44
|
+
@sqs_delete_on_failure = client_options[:sqs_delete_on_failure]
|
44
45
|
@options = DEFAULT_OPTIONS.merge(poller_options)
|
45
46
|
begin
|
46
47
|
@logger.info("Registering SQS input", :queue => @queue)
|
@@ -86,6 +87,7 @@ class SqsPoller
|
|
86
87
|
poller_thread = Thread.current
|
87
88
|
extender = Thread.new do
|
88
89
|
while new_visibility < @max_processing_time do
|
90
|
+
|
89
91
|
sleep message_backoff
|
90
92
|
begin
|
91
93
|
@poller.change_message_visibility_timeout(message, new_visibility)
|
@@ -98,8 +100,8 @@ class SqsPoller
|
|
98
100
|
end
|
99
101
|
end
|
100
102
|
@logger.error("[#{Thread.current[:name]}] Maximum visibility reached! We will delete this message from queue!")
|
101
|
-
@poller.delete_message(message)
|
102
|
-
poller_thread.
|
103
|
+
@poller.delete_message(message) if @sqs_delete_on_failure
|
104
|
+
poller_thread.kill
|
103
105
|
end
|
104
106
|
extender[:name] = "#{Thread.current[:name]}/extender" #PROFILING
|
105
107
|
failed = false
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Gem::Specification.new do |s|
|
2
2
|
s.name = 'logstash-input-s3-sns-sqs'
|
3
|
-
s.version = '2.0
|
3
|
+
s.version = '2.1.0'
|
4
4
|
s.licenses = ['Apache-2.0']
|
5
5
|
s.summary = "Get logs from AWS s3 buckets as issued by an object-created event via sns -> sqs."
|
6
6
|
s.description = "This gem is a logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/plugin install gemname. This gem is not a stand-alone program"
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: logstash-input-s3-sns-sqs
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 2.0
|
4
|
+
version: 2.1.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Christian Herweg
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date:
|
11
|
+
date: 2020-01-02 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
requirement: !ruby/object:Gem::Requirement
|