logstash-output-s3 0.1.1 → 0.1.2

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 91981ac4b90d4e167ab69cb1567b86aa6563face
4
- data.tar.gz: 27a65f27a4bce8bec09b7509d1d83a48d0315b08
3
+ metadata.gz: c7a88994c7ff548d4ebacbaf908bc786cccfb129
4
+ data.tar.gz: b87149529c6a84c903c5f87bb831d1b3db6623f2
5
5
  SHA512:
6
- metadata.gz: 5118bb281147a135758c5790795b9928d787a384eda6fb75e14ced937257155e5e675f77e618bde8d455900eb4162a15f79e6188469fe97e05fc40dd8b6f67e7
7
- data.tar.gz: 6fa3c51b82eb6d95f4f60c74ef6a5d38ef38581f9a5217b5e15739b210c722de5ba61c547dcb40ecf989e8d8fb1694fb75921990e7712069dcb6edc58db15be9
6
+ metadata.gz: 1a99e7218cd168e2030fc6d9fe6a6c7920ef9c76e5057f333c6f6992e39306b441f230498d02311802a4dc8978012a24d0ee065541d26e63b52617a620d6e757
7
+ data.tar.gz: 1e9b592b3203375975a97e3c76a2f2bf4f9804529ce87c2d1ec2663e67028f988702f0404738c681afceac9f90ca91158b1bf5a4e276b5f1baeb15d81ffddd73
data/README ADDED
@@ -0,0 +1,15 @@
1
+ [Missing the other part of the readme]
2
+
3
+ ## Running the tests
4
+
5
+ ```
6
+ bundle install
7
+ bundle rspec
8
+ ```
9
+
10
+ If you want to run the integration test against a real bucket you need to pass
11
+ your aws credentials to the test runner or declare it in your environment.
12
+
13
+ ```
14
+ AWS_REGION=us-east-1 AWS_ACCESS_KEY_ID=123 AWS_SECRET_ACCESS_KEY=secret AWS_LOGSTASH_TEST_BUCKET=mytest bundle exec rspec spec/integration/s3_spec.rb --tag integration
15
+ ```
@@ -1,11 +1,14 @@
1
1
  # encoding: utf-8
2
2
  require "logstash/outputs/base"
3
3
  require "logstash/namespace"
4
+ require "logstash/plugin_mixins/aws_config"
5
+ require "stud/temporary"
4
6
  require "socket" # for Socket.gethostname
7
+ require "thread"
8
+ require "tmpdir"
9
+ require "fileutils"
10
+
5
11
 
6
- # TODO integrate aws_config in the future
7
- #require "logstash/plugin_mixins/aws_config"
8
- #
9
12
  # INFORMATION:
10
13
  #
11
14
  # This plugin was created for store the logstash's events into Amazon Simple Storage Service (Amazon S3).
@@ -34,7 +37,6 @@ require "socket" # for Socket.gethostname
34
37
  #
35
38
  ## If you specify size_file and time_file then it will create file for each tag (if specified), when time_file or
36
39
  ## their size > size_file, it will be triggered then they will be pushed on s3's bucket and will delete from local disk.
37
- #
38
40
  ## If you don't specify size_file, but time_file then it will create only one file for each tag (if specified).
39
41
  ## When time_file it will be triggered then the files will be pushed on s3's bucket and delete from local disk.
40
42
  #
@@ -44,22 +46,8 @@ require "socket" # for Socket.gethostname
44
46
  ## If you don't specific size_file and time_file you have a curios mode. It will create only one file for each tag (if specified).
45
47
  ## Then the file will be rest on temporary directory and don't will be pushed on bucket until we will restart logstash.
46
48
  #
47
- # INFORMATION ABOUT CLASS:
48
- #
49
- # I tried to comment the class at best i could do.
50
- # I think there are much thing to improve, but if you want some points to develop here a list:
51
- #
52
- # TODO Integrate aws_config in the future
53
- # TODO Find a method to push them all files when logtstash close the session.
54
- # TODO Integrate @field on the path file
55
- # TODO Permanent connection or on demand? For now on demand, but isn't a good implementation.
56
- # Use a while or a thread to try the connection before break a time_out and signal an error.
57
- # TODO If you have bugs report or helpful advice contact me, but remember that this code is much mine as much as yours,
58
- # try to work on it if you want :)
59
- #
60
- #
61
- # USAGE:
62
49
  #
50
+ # #### Usage:
63
51
  # This is an example of logstash config:
64
52
  # [source,ruby]
65
53
  # output {
@@ -73,285 +61,359 @@ require "socket" # for Socket.gethostname
73
61
  # format => "plain" (optional)
74
62
  # canned_acl => "private" (optional. Options are "private", "public_read", "public_read_write", "authenticated_read". Defaults to "private" )
75
63
  # }
76
- # }
77
- #
78
- # We analize this:
79
- #
80
- # access_key_id => "crazy_key"
81
- # Amazon will give you the key for use their service if you buy it or try it. (not very much open source anyway)
82
- #
83
- # secret_access_key => "monkey_access_key"
84
- # Amazon will give you the secret_access_key for use their service if you buy it or try it . (not very much open source anyway).
85
- #
86
- # endpoint_region => "eu-west-1"
87
- # When you make a contract with Amazon, you should know where the services you use.
88
- #
89
- # bucket => "boss_please_open_your_bucket"
90
- # Be careful you have the permission to write on bucket and know the name.
91
- #
92
- # size_file => 2048
93
- # Means the size, in KB, of files who can store on temporary directory before you will be pushed on bucket.
94
- # Is useful if you have a little server with poor space on disk and you don't want blow up the server with unnecessary temporary log files.
95
- #
96
- # time_file => 5
97
- # Means, in minutes, the time before the files will be pushed on bucket. Is useful if you want to push the files every specific time.
98
- #
99
- # format => "plain"
100
- # Means the format of events you want to store in the files
101
- #
102
- # canned_acl => "private"
103
- # The S3 canned ACL to use when putting the file. Defaults to "private".
104
- #
105
- # LET'S ROCK AND ROLL ON THE CODE!
106
64
  #
107
65
  class LogStash::Outputs::S3 < LogStash::Outputs::Base
108
- #TODO integrate aws_config in the future
109
- # include LogStash::PluginMixins::AwsConfig
110
-
111
- config_name "s3"
112
- milestone 1
66
+ include LogStash::PluginMixins::AwsConfig
67
+
68
+ TEMPFILE_EXTENSION = "txt"
69
+ S3_INVALID_CHARACTERS = /[\^`><]/
70
+
71
+ config_name "s3"
72
+ milestone 1
73
+ default :codec, 'line'
74
+
75
+ # S3 bucket
76
+ config :bucket, :validate => :string
77
+
78
+ # AWS endpoint_region
79
+ config :endpoint_region, :validate => ["us-east-1", "us-west-1", "us-west-2",
80
+ "eu-west-1", "ap-southeast-1", "ap-southeast-2",
81
+ "ap-northeast-1", "sa-east-1", "us-gov-west-1"], :deprecated => 'Deprecated, use region instead.'
82
+
83
+ # Set the size of file in bytes, this means that files on bucket when have dimension > file_size, they are stored in two or more file.
84
+ # If you have tags then it will generate a specific size file for every tags
85
+ ##NOTE: define size of file is the better thing, because generate a local temporary file on disk and then put it in bucket.
86
+ config :size_file, :validate => :number, :default => 0
87
+
88
+ # Set the time, in minutes, to close the current sub_time_section of bucket.
89
+ # If you define file_size you have a number of files in consideration of the section and the current tag.
90
+ # 0 stay all time on listerner, beware if you specific 0 and size_file 0, because you will not put the file on bucket,
91
+ # for now the only thing this plugin can do is to put the file when logstash restart.
92
+ config :time_file, :validate => :number, :default => 0
93
+
94
+ ## IMPORTANT: if you use multiple instance of s3, you should specify on one of them the "restore=> true" and on the others "restore => false".
95
+ ## This is hack for not destroy the new files after restoring the initial files.
96
+ ## If you do not specify "restore => true" when logstash crashes or is restarted, the files are not sent into the bucket,
97
+ ## for example if you have single Instance.
98
+ config :restore, :validate => :boolean, :default => false
99
+
100
+ # The S3 canned ACL to use when putting the file. Defaults to "private".
101
+ config :canned_acl, :validate => ["private", "public_read", "public_read_write", "authenticated_read"],
102
+ :default => "private"
103
+
104
+ # Set the directory where logstash will store the tmp files before sending it to S3
105
+ # default to the current OS temporary directory in linux /tmp/logstash
106
+ config :temporary_directory, :validate => :string, :default => File.join(Dir.tmpdir, "logstash")
107
+
108
+ # Specify a prefix to the uploaded filename, this can simulate directories on S3
109
+ config :prefix, :validate => :string, :default => ''
110
+
111
+ # Specify how many workers to use to upload the files to S3
112
+ config :upload_workers_count, :validate => :number, :default => 1
113
+
114
+ # Exposed attributes for testing purpose.
115
+ attr_accessor :tempfile
116
+ attr_reader :page_counter
117
+ attr_reader :s3
118
+
119
+ def aws_s3_config
120
+ @logger.info("Registering s3 output", :bucket => @bucket, :endpoint_region => @region)
121
+ @s3 = AWS::S3.new(aws_options_hash)
122
+ end
113
123
 
114
- # Aws access_key.
115
- config :access_key_id, :validate => :string
124
+ def aws_service_endpoint(region)
125
+ # Make the deprecated endpoint_region work
126
+ # TODO: (ph) Remove this after deprecation.
127
+
128
+ if @endpoint_region
129
+ region_to_use = @endpoint_region
130
+ else
131
+ region_to_use = @region
132
+ end
116
133
 
117
- # Aws secret_access_key
118
- config :secret_access_key, :validate => :string
134
+ return {
135
+ :s3_endpoint => region_to_use == 'us-east-1' ? 's3.amazonaws.com' : "s3-#{region_to_use}.amazonaws.com"
136
+ }
137
+ end
119
138
 
120
- # S3 bucket
121
- config :bucket, :validate => :string
139
+ public
140
+ def write_on_bucket(file)
141
+ # find and use the bucket
142
+ bucket = @s3.buckets[@bucket]
122
143
 
123
- # Aws endpoint_region
124
- config :endpoint_region, :validate => ["us-east-1", "us-west-1", "us-west-2",
125
- "eu-west-1", "ap-southeast-1", "ap-southeast-2",
126
- "ap-northeast-1", "sa-east-1", "us-gov-west-1"], :default => "us-east-1"
144
+ remote_filename = "#{@prefix}#{File.basename(file)}"
127
145
 
128
- # Set the size of file in KB, this means that files on bucket when have dimension > file_size, they are stored in two or more file.
129
- # If you have tags then it will generate a specific size file for every tags
130
- ##NOTE: define size of file is the better thing, because generate a local temporary file on disk and then put it in bucket.
131
- config :size_file, :validate => :number, :default => 0
146
+ @logger.debug("S3: ready to write file in bucket", :remote_filename => remote_filename, :bucket => @bucket)
132
147
 
133
- # Set the time, in minutes, to close the current sub_time_section of bucket.
134
- # If you define file_size you have a number of files in consideration of the section and the current tag.
135
- # 0 stay all time on listerner, beware if you specific 0 and size_file 0, because you will not put the file on bucket,
136
- # for now the only thing this plugin can do is to put the file when logstash restart.
137
- config :time_file, :validate => :number, :default => 0
148
+ begin
149
+ # prepare for write the file
150
+ object = bucket.objects[remote_filename]
151
+ object.write(:file => file, :acl => @canned_acl)
152
+ rescue AWS::Errors::Base => error
153
+ @logger.error("S3: AWS error", :error => error)
154
+ raise LogStash::Error, "AWS Configuration Error, #{error}"
155
+ end
138
156
 
139
- # The event format you want to store in files. Defaults to plain text.
140
- config :format, :validate => [ "json", "plain", "nil" ], :default => "plain"
157
+ @logger.debug("S3: has written remote file in bucket with canned ACL", :remote_filename => remote_filename, :bucket => @bucket, :canned_acl => @canned_acl)
158
+ end
141
159
 
142
- ## IMPORTANT: if you use multiple instance of s3, you should specify on one of them the "restore=> true" and on the others "restore => false".
143
- ## This is hack for not destroy the new files after restoring the initial files.
144
- ## If you do not specify "restore => true" when logstash crashes or is restarted, the files are not sent into the bucket,
145
- ## for example if you have single Instance.
146
- config :restore, :validate => :boolean, :default => false
160
+ # This method is used for create new empty temporary files for use. Flag is needed for indicate new subsection time_file.
161
+ public
162
+ def create_temporary_file
163
+ filename = File.join(@temporary_directory, get_temporary_filename(@page_counter))
147
164
 
148
- # Aws canned ACL
149
- config :canned_acl, :validate => ["private", "public_read", "public_read_write", "authenticated_read"],
150
- :default => "private"
165
+ @logger.debug("S3: Creating a new temporary file", :filename => filename)
151
166
 
152
- # Method to set up the aws configuration and establish connection
153
- def aws_s3_config
167
+ @file_rotation_lock.synchronize do
168
+ unless @tempfile.nil?
169
+ @tempfile.close
170
+ end
154
171
 
155
- @endpoint_region == 'us-east-1' ? @endpoint_region = 's3.amazonaws.com' : @endpoint_region = 's3-'+@endpoint_region+'.amazonaws.com'
172
+ @tempfile = File.open(filename, "a")
173
+ end
174
+ end
156
175
 
157
- @logger.info("Registering s3 output", :bucket => @bucket, :endpoint_region => @endpoint_region)
176
+ public
177
+ def register
178
+ require "aws-sdk"
179
+ # required if using ruby version < 2.0
180
+ # http://ruby.awsblog.com/post/Tx16QY1CI5GVBFT/Threading-with-the-AWS-SDK-for-Ruby
181
+ AWS.eager_autoload!(AWS::S3)
158
182
 
159
- AWS.config(
160
- :access_key_id => @access_key_id,
161
- :secret_access_key => @secret_access_key,
162
- :s3_endpoint => @endpoint_region
163
- )
164
- @s3 = AWS::S3.new
183
+ workers_not_supported
165
184
 
166
- end
185
+ @s3 = aws_s3_config
186
+ @upload_queue = Queue.new
187
+ @file_rotation_lock = Mutex.new
167
188
 
168
- # This method is used to manage sleep and awaken thread.
169
- def time_alert(interval)
189
+ if @prefix && @prefix =~ S3_INVALID_CHARACTERS
190
+ @logger.error("S3: prefix contains invalid characters", :prefix => @prefix, :contains => S3_INVALID_CHARACTERS)
191
+ raise LogStash::ConfigurationError, "S3: prefix contains invalid characters"
192
+ end
170
193
 
171
- Thread.new do
172
- loop do
173
- start_time = Time.now
174
- yield
175
- elapsed = Time.now - start_time
176
- sleep([interval - elapsed, 0].max)
194
+ if !Dir.exist?(@temporary_directory)
195
+ FileUtils.mkdir_p(@temporary_directory)
177
196
  end
178
- end
179
197
 
180
- end
198
+ test_s3_write
181
199
 
182
- # this method is used for write files on bucket. It accept the file and the name of file.
183
- def write_on_bucket (file_data, file_basename)
200
+ restore_from_crashes if @restore == true
201
+ reset_page_counter
202
+ create_temporary_file
203
+ configure_periodic_rotation if time_file != 0
204
+ configure_upload_workers
184
205
 
185
- # if you lose connection with s3, bad control implementation.
186
- if ( @s3 == nil)
187
- aws_s3_config
206
+ @codec.on_event do |event, encoded_event|
207
+ handle_event(encoded_event)
208
+ end
188
209
  end
189
210
 
190
- # find and use the bucket
191
- bucket = @s3.buckets[@bucket]
192
-
193
- @logger.debug "S3: ready to write "+file_basename+" in bucket "+@bucket+", Fire in the hole!"
194
211
 
195
- # prepare for write the file
196
- object = bucket.objects[file_basename]
197
- object.write(:file => file_data, :acl => @canned_acl)
212
+ # Use the same method that Amazon use to check
213
+ # permission on the user bucket by creating a small file
214
+ public
215
+ def test_s3_write
216
+ @logger.debug("S3: Creating a test file on S3")
198
217
 
199
- @logger.debug "S3: has written "+file_basename+" in bucket "+@bucket + " with canned ACL \"" + @canned_acl + "\""
218
+ test_filename = File.join(@temporary_directory,
219
+ "logstash-programmatic-access-test-object-#{Time.now.to_i}")
200
220
 
201
- end
202
-
203
- # this method is used for create new path for name the file
204
- def getFinalPath
221
+ File.open(test_filename, 'a') do |file|
222
+ file.write('test')
223
+ end
205
224
 
206
- @pass_time = Time.now
207
- return @temp_directory+"ls.s3."+Socket.gethostname+"."+(@pass_time).strftime("%Y-%m-%dT%H.%M")
225
+ begin
226
+ write_on_bucket(test_filename)
227
+ delete_on_bucket(test_filename)
228
+ ensure
229
+ File.delete(test_filename)
230
+ end
231
+ end
232
+
233
+ public
234
+ def restore_from_crashes
235
+ @logger.debug("S3: is attempting to verify previous crashes...")
236
+
237
+ Dir[File.join(@temporary_directory, "*.#{TEMPFILE_EXTENSION}")].each do |file|
238
+ name_file = File.basename(file)
239
+ @logger.warn("S3: have found temporary file the upload process crashed, uploading file to S3.", :filename => name_file)
240
+ move_file_to_bucket_async(file)
241
+ end
242
+ end
208
243
 
209
- end
244
+ public
245
+ def move_file_to_bucket(file)
246
+ if !File.zero?(file)
247
+ write_on_bucket(file)
248
+ @logger.debug("S3: file was put on the upload thread", :filename => File.basename(file), :bucket => @bucket)
249
+ end
210
250
 
211
- # This method is used for restore the previous crash of logstash or to prepare the files to send in bucket.
212
- # Take two parameter: flag and name. Flag indicate if you want to restore or not, name is the name of file
213
- def upFile(flag, name)
251
+ begin
252
+ File.delete(file)
253
+ rescue Errno::ENOENT
254
+ # Something else deleted the file, logging but not raising the issue
255
+ @logger.warn("S3: Cannot delete the temporary file since it doesn't exist on disk", :filename => File.basename(file))
256
+ rescue Errno::EACCES
257
+ @logger.error("S3: Logstash doesnt have the permission to delete the file in the temporary directory.", :filename => File.basename, :temporary_directory => @temporary_directory)
258
+ end
259
+ end
214
260
 
215
- Dir[@temp_directory+name].each do |file|
216
- name_file = File.basename(file)
261
+ public
262
+ def periodic_interval
263
+ @time_file * 60
264
+ end
217
265
 
218
- if (flag == true)
219
- @logger.warn "S3: have found temporary file: "+name_file+", something has crashed before... Prepare for upload in bucket!"
220
- end
266
+ public
267
+ def get_temporary_filename(page_counter = 0)
268
+ current_time = Time.now
269
+ filename = "ls.s3.#{Socket.gethostname}.#{current_time.strftime("%Y-%m-%dT%H.%M")}"
221
270
 
222
- if (!File.zero?(file))
223
- write_on_bucket(file, name_file)
271
+ if @tags.size > 0
272
+ return "#{filename}.tag_#{@tags.join('.')}.part#{page_counter}.#{TEMPFILE_EXTENSION}"
273
+ else
274
+ return "#{filename}.part#{page_counter}.#{TEMPFILE_EXTENSION}"
275
+ end
276
+ end
224
277
 
225
- if (flag == true)
226
- @logger.debug "S3: file: "+name_file+" restored on bucket "+@bucket
227
- else
228
- @logger.debug "S3: file: "+name_file+" was put on bucket "+@bucket
229
- end
230
- end
278
+ public
279
+ def receive(event)
280
+ return unless output?(event)
281
+ @codec.encode(event)
282
+ end
231
283
 
232
- File.delete (file)
284
+ public
285
+ def rotate_events_log?
286
+ @tempfile.size > @size_file
287
+ end
233
288
 
234
- end
235
- end
289
+ public
290
+ def write_events_to_multiple_files?
291
+ @size_file > 0
292
+ end
236
293
 
237
- # This method is used for create new empty temporary files for use. Flag is needed for indicate new subsection time_file.
238
- def newFile (flag)
294
+ public
295
+ def write_to_tempfile(event)
296
+ begin
297
+ @logger.debug("S3: put event into tempfile ", :tempfile => File.basename(@tempfile))
298
+
299
+ @file_rotation_lock.synchronize do
300
+ @tempfile.syswrite(event)
301
+ end
302
+ rescue Errno::ENOSPC
303
+ @logger.error("S3: No space left in temporary directory", :temporary_directory => @temporary_directory)
304
+ teardown
305
+ end
306
+ end
239
307
 
240
- if (flag == true)
241
- @current_final_path = getFinalPath
242
- @sizeCounter = 0
243
- end
308
+ public
309
+ def teardown
310
+ shutdown_upload_workers
311
+ @periodic_rotation_thread.stop! if @periodic_rotation_thread
244
312
 
245
- if (@tags.size != 0)
246
- @tempFile = File.new(@current_final_path+".tag_"+@tag_path+"part"+@sizeCounter.to_s+".txt", "w")
247
- else
248
- @tempFile = File.new(@current_final_path+".part"+@sizeCounter.to_s+".txt", "w")
249
- end
313
+ @tempfile.close
314
+ finished
315
+ end
250
316
 
251
- end
317
+ private
318
+ def shutdown_upload_workers
319
+ @logger.debug("S3: Gracefully shutdown the upload workers")
320
+ @upload_queue << LogStash::ShutdownEvent
321
+ end
252
322
 
253
- public
254
- def register
255
- require "aws-sdk"
256
- @temp_directory = "/opt/logstash/S3_temp/"
323
+ private
324
+ def handle_event(encoded_event)
325
+ if write_events_to_multiple_files?
326
+ if rotate_events_log?
327
+ @logger.debug("S3: tempfile is too large, let's bucket it and create new file", :tempfile => File.basename(@tempfile))
328
+
329
+ move_file_to_bucket_async(@tempfile.path)
330
+ next_page
331
+ create_temporary_file
332
+ else
333
+ @logger.debug("S3: tempfile file size report.", :tempfile_size => @tempfile.size, :size_file => @size_file)
334
+ end
335
+ end
336
+
337
+ write_to_tempfile(encoded_event)
338
+ end
257
339
 
258
- if (@tags.size != 0)
259
- @tag_path = ""
260
- for i in (0..@tags.size-1)
261
- @tag_path += @tags[i].to_s+"."
262
- end
263
- end
340
+ private
341
+ def configure_periodic_rotation
342
+ @periodic_rotation_thread = Stud::Task.new do
343
+ LogStash::Util::set_thread_name("<S3 periodic uploader")
264
344
 
265
- if !(File.directory? @temp_directory)
266
- @logger.debug "S3: Directory "+@temp_directory+" doesn't exist, let's make it!"
267
- Dir.mkdir(@temp_directory)
268
- else
269
- @logger.debug "S3: Directory "+@temp_directory+" exist, nothing to do"
270
- end
345
+ Stud.interval(periodic_interval, :sleep_then_run => true) do
346
+ @logger.debug("S3: time_file triggered, bucketing the file", :filename => @tempfile.path)
271
347
 
272
- if (@restore == true )
273
- @logger.debug "S3: is attempting to verify previous crashes..."
348
+ move_file_to_bucket_async(@tempfile.path)
349
+ next_page
350
+ create_temporary_file
351
+ end
352
+ end
353
+ end
274
354
 
275
- upFile(true, "*.txt")
276
- end
355
+ private
356
+ def configure_upload_workers
357
+ @logger.debug("S3: Configure upload workers")
277
358
 
278
- newFile(true)
359
+ @upload_workers = @upload_workers_count.times.map do |worker_id|
360
+ Stud::Task.new do
361
+ LogStash::Util::set_thread_name("<S3 upload worker #{worker_id}")
279
362
 
280
- if (time_file != 0)
281
- first_time = true
282
- @thread = time_alert(@time_file*60) do
283
- if (first_time == false)
284
- @logger.debug "S3: time_file triggered, let's bucket the file if dosen't empty and create new file "
285
- upFile(false, File.basename(@tempFile))
286
- newFile(true)
287
- else
288
- first_time = false
289
- end
290
- end
291
- end
363
+ while true do
364
+ @logger.debug("S3: upload worker is waiting for a new file to upload.", :worker_id => worker_id)
292
365
 
293
- end
294
-
295
- public
296
- def receive(event)
297
- return unless output?(event)
298
-
299
- # Prepare format of Events
300
- if (@format == "plain")
301
- message = self.class.format_message(event)
302
- elsif (@format == "json")
303
- message = event.to_json
304
- else
305
- message = event.to_s
366
+ upload_worker
367
+ end
368
+ end
369
+ end
306
370
  end
307
371
 
308
- if(time_file !=0)
309
- @logger.debug "S3: trigger files after "+((@pass_time+60*time_file)-Time.now).to_s
372
+ private
373
+ def upload_worker
374
+ file = @upload_queue.deq
375
+
376
+ case file
377
+ when LogStash::ShutdownEvent
378
+ @logger.debug("S3: upload worker is shutting down gracefuly")
379
+ @upload_queue.enq(LogStash::ShutdownEvent)
380
+ else
381
+ @logger.debug("S3: upload working is uploading a new file", :filename => File.basename(file))
382
+ move_file_to_bucket(file)
383
+ end
310
384
  end
311
385
 
312
- # if specific the size
313
- if(size_file !=0)
314
-
315
- if (@tempFile.size < @size_file )
316
-
317
- @logger.debug "S3: File have size: "+@tempFile.size.to_s+" and size_file is: "+ @size_file.to_s
318
- @logger.debug "S3: put event into: "+File.basename(@tempFile)
319
-
320
- # Put the event in the file, now!
321
- File.open(@tempFile, 'a') do |file|
322
- file.puts message
323
- file.write "\n"
324
- end
386
+ private
387
+ def next_page
388
+ @page_counter += 1
389
+ end
325
390
 
326
- else
391
+ private
392
+ def reset_page_counter
393
+ @page_counter = 0
394
+ end
327
395
 
328
- @logger.debug "S3: file: "+File.basename(@tempFile)+" is too large, let's bucket it and create new file"
329
- upFile(false, File.basename(@tempFile))
330
- @sizeCounter += 1
331
- newFile(false)
396
+ private
397
+ def delete_on_bucket(filename)
398
+ bucket = @s3.buckets[@bucket]
332
399
 
333
- end
400
+ remote_filename = "#{@prefix}#{File.basename(filename)}"
334
401
 
335
- # else we put all in one file
336
- else
402
+ @logger.debug("S3: delete file from bucket", :remote_filename => remote_filename, :bucket => @bucket)
337
403
 
338
- @logger.debug "S3: put event into "+File.basename(@tempFile)
339
- File.open(@tempFile, 'a') do |file|
340
- file.puts message
341
- file.write "\n"
404
+ begin
405
+ # prepare for write the file
406
+ object = bucket.objects[remote_filename]
407
+ object.delete
408
+ rescue AWS::Errors::Base => e
409
+ @logger.error("S3: AWS error", :error => e)
410
+ raise LogStash::ConfigurationError, "AWS Configuration Error"
342
411
  end
343
412
  end
344
413
 
345
- end
346
-
347
- def self.format_message(event)
348
- message = "Date: #{event[LogStash::Event::TIMESTAMP]}\n"
349
- message << "Source: #{event["source"]}\n"
350
- message << "Tags: #{event["tags"].join(', ')}\n"
351
- message << "Fields: #{event.to_hash.inspect}\n"
352
- message << "Message: #{event["message"]}"
353
- end
354
-
414
+ private
415
+ def move_file_to_bucket_async(file)
416
+ @logger.debug("S3: Sending the file to the upload queue.", :filename => File.basename(file))
417
+ @upload_queue.enq(file)
418
+ end
355
419
  end
356
-
357
- # Enjoy it, by Bistic:)
@@ -1,7 +1,7 @@
1
1
  Gem::Specification.new do |s|
2
2
 
3
3
  s.name = 'logstash-output-s3'
4
- s.version = '0.1.1'
4
+ s.version = '0.1.2'
5
5
  s.licenses = ['Apache License (2.0)']
6
6
  s.summary = "This plugin was created for store the logstash's events into Amazon Simple Storage Service (Amazon S3)"
7
7
  s.description = "This gem is a logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/plugin install gemname. This gem is not a stand-alone program"
@@ -23,6 +23,10 @@ Gem::Specification.new do |s|
23
23
  s.add_runtime_dependency 'logstash', '>= 1.4.0', '< 2.0.0'
24
24
  s.add_runtime_dependency 'logstash-mixin-aws'
25
25
  s.add_runtime_dependency 'aws-sdk'
26
+ s.add_runtime_dependency 'stud', '~> 0.0.18'
26
27
  s.add_development_dependency 'logstash-devutils'
28
+ s.add_development_dependency 'logstash-input-generator'
29
+ s.add_development_dependency 'logstash-input-stdin'
30
+ s.add_development_dependency 'logstash-codec-line'
27
31
  end
28
32
 
@@ -0,0 +1,96 @@
1
+ require "logstash/devutils/rspec/spec_helper"
2
+ require "logstash/outputs/s3"
3
+ require 'socket'
4
+ require "aws-sdk"
5
+ require "fileutils"
6
+ require "stud/temporary"
7
+ require_relative "../supports/helpers"
8
+
9
+ describe LogStash::Outputs::S3, :integration => true, :s3 => true do
10
+ before do
11
+ Thread.abort_on_exception = true
12
+ end
13
+
14
+ let!(:minimal_settings) { { "access_key_id" => ENV['AWS_ACCESS_KEY_ID'],
15
+ "secret_access_key" => ENV['AWS_SECRET_ACCESS_KEY'],
16
+ "bucket" => ENV['AWS_LOGSTASH_TEST_BUCKET'],
17
+ "region" => ENV["AWS_REGION"] || "us-east-1",
18
+ "temporary_directory" => Stud::Temporary.pathname('temporary_directory') }}
19
+
20
+ let!(:s3_object) do
21
+ s3output = LogStash::Outputs::S3.new(minimal_settings)
22
+ s3output.register
23
+ s3output.s3
24
+ end
25
+
26
+ after(:all) do
27
+ delete_matching_keys_on_bucket('studtmp')
28
+ delete_matching_keys_on_bucket('my-prefix')
29
+ end
30
+
31
+ describe "#register" do
32
+ it "write a file on the bucket to check permissions" do
33
+ s3 = LogStash::Outputs::S3.new(minimal_settings)
34
+ expect(s3.register).not_to raise_error
35
+ end
36
+ end
37
+
38
+ describe "#write_on_bucket" do
39
+ after(:all) do
40
+ File.unlink(fake_data.path)
41
+ end
42
+
43
+ let!(:fake_data) { Stud::Temporary.file }
44
+
45
+ it "should prefix the file on the bucket if a prefix is specified" do
46
+ prefix = "my-prefix"
47
+
48
+ config = minimal_settings.merge({
49
+ "prefix" => prefix,
50
+ })
51
+
52
+ s3 = LogStash::Outputs::S3.new(config)
53
+ s3.register
54
+ s3.write_on_bucket(fake_data)
55
+
56
+ expect(key_exists_on_bucket?("#{prefix}#{File.basename(fake_data.path)}")).to eq(true)
57
+ end
58
+
59
+ it 'should use the same local filename if no prefix is specified' do
60
+ s3 = LogStash::Outputs::S3.new(minimal_settings)
61
+ s3.register
62
+ s3.write_on_bucket(fake_data)
63
+
64
+ expect(key_exists_on_bucket?(File.basename(fake_data.path))).to eq(true)
65
+ end
66
+ end
67
+
68
+ describe "#move_file_to_bucket" do
69
+ let!(:s3) { LogStash::Outputs::S3.new(minimal_settings) }
70
+
71
+ before do
72
+ s3.register
73
+ end
74
+
75
+ it "should upload the file if the size > 0" do
76
+ tmp = Stud::Temporary.file
77
+ allow(File).to receive(:zero?).and_return(false)
78
+ s3.move_file_to_bucket(tmp)
79
+ expect(key_exists_on_bucket?(File.basename(tmp.path))).to eq(true)
80
+ end
81
+ end
82
+
83
+ describe "#restore_from_crashes" do
84
+ it "read the temp directory and upload the matching file to s3" do
85
+ Stud::Temporary.pathname do |temp_path|
86
+ tempfile = File.open(File.join(temp_path, 'A'), 'w+') { |f| f.write('test')}
87
+
88
+ s3 = LogStash::Outputs::S3.new(minimal_settings.merge({ "temporary_directory" => temp_path }))
89
+ s3.restore_from_crashes
90
+
91
+ expect(File.exist?(tempfile.path)).to eq(false)
92
+ expect(key_exists_on_bucket?(File.basename(tempfile.path))).to eq(true)
93
+ end
94
+ end
95
+ end
96
+ end
@@ -1,6 +1,324 @@
1
1
  # encoding: utf-8
2
2
  require "logstash/devutils/rspec/spec_helper"
3
- require 'logstash/outputs/s3'
3
+ require "logstash/outputs/s3"
4
+ require "logstash/codecs/line"
5
+ require "logstash/pipeline"
6
+ require "aws-sdk"
7
+ require "fileutils"
8
+ require_relative "../supports/helpers"
4
9
 
5
10
  describe LogStash::Outputs::S3 do
11
+ before do
12
+ # We stub all the calls from S3, for more information see:
13
+ # http://ruby.awsblog.com/post/Tx2SU6TYJWQQLC3/Stubbing-AWS-Responses
14
+ AWS.stub!
15
+
16
+ Thread.abort_on_exception = true
17
+ end
18
+
19
+ let(:minimal_settings) { { "access_key_id" => "1234",
20
+ "secret_access_key" => "secret",
21
+ "bucket" => "my-bucket" } }
22
+
23
+ describe "configuration" do
24
+ let!(:config) { { "endpoint_region" => "sa-east-1" } }
25
+
26
+ it "should support the deprecated endpoint_region as a configuration option" do
27
+ s3 = LogStash::Outputs::S3.new(config)
28
+ expect(s3.aws_options_hash[:s3_endpoint]).to eq("s3-sa-east-1.amazonaws.com")
29
+ end
30
+
31
+ it "should fallback to region if endpoint_region isnt defined" do
32
+ s3 = LogStash::Outputs::S3.new(config.merge({ "region" => 'sa-east-1' }))
33
+ expect(s3.aws_options_hash).to include(:s3_endpoint => "s3-sa-east-1.amazonaws.com")
34
+ end
35
+ end
36
+
37
+ describe "#register" do
38
+ it "should create the tmp directory if it doesn't exist" do
39
+ temporary_directory = Stud::Temporary.pathname("temporary_directory")
40
+
41
+ config = {
42
+ "access_key_id" => "1234",
43
+ "secret_access_key" => "secret",
44
+ "bucket" => "logstash",
45
+ "size_file" => 10,
46
+ "temporary_directory" => temporary_directory
47
+ }
48
+
49
+ s3 = LogStash::Outputs::S3.new(config)
50
+ allow(s3).to receive(:test_s3_write)
51
+ s3.register
52
+
53
+ expect(Dir.exist?(temporary_directory)).to eq(true)
54
+ FileUtils.rm_r(temporary_directory)
55
+ end
56
+
57
+ it "should raise a ConfigurationError if the prefix contains one or more '\^`><' characters" do
58
+ config = {
59
+ "prefix" => "`no\><^"
60
+ }
61
+
62
+ s3 = LogStash::Outputs::S3.new(config)
63
+
64
+ expect {
65
+ s3.register
66
+ }.to raise_error(LogStash::ConfigurationError)
67
+ end
68
+ end
69
+
70
+ describe "#generate_temporary_filename" do
71
+ before do
72
+ Socket.stub(:gethostname) { "logstash.local" }
73
+ Time.stub(:now) { Time.new('2015-10-09-09:00') }
74
+ end
75
+
76
+ it "should add tags to the filename if present" do
77
+ config = minimal_settings.merge({ "tags" => ["elasticsearch", "logstash", "kibana"], "temporary_directory" => "/tmp/logstash"})
78
+ s3 = LogStash::Outputs::S3.new(config)
79
+ expect(s3.get_temporary_filename).to eq("ls.s3.logstash.local.2015-01-01T00.00.tag_elasticsearch.logstash.kibana.part0.txt")
80
+ end
81
+
82
+ it "should not add the tags to the filename" do
83
+ config = minimal_settings.merge({ "tags" => [], "temporary_directory" => "/tmp/logstash" })
84
+ s3 = LogStash::Outputs::S3.new(config)
85
+ expect(s3.get_temporary_filename(3)).to eq("ls.s3.logstash.local.2015-01-01T00.00.part3.txt")
86
+ end
87
+
88
+ it "normalized the temp directory to include the trailing slash if missing" do
89
+ s3 = LogStash::Outputs::S3.new(minimal_settings.merge({ "temporary_directory" => "/tmp/logstash" }))
90
+ expect(s3.get_temporary_filename).to eq("ls.s3.logstash.local.2015-01-01T00.00.part0.txt")
91
+ end
92
+ end
93
+
94
+ describe "#write_on_bucket" do
95
+ after(:all) do
96
+ File.unlink(fake_data.path)
97
+ end
98
+
99
+ let!(:fake_data) { Stud::Temporary.file }
100
+
101
+ let(:fake_bucket) do
102
+ s3 = double('S3Object')
103
+ s3.stub(:write)
104
+ s3
105
+ end
106
+
107
+ it "should prefix the file on the bucket if a prefix is specified" do
108
+ prefix = "my-prefix"
109
+
110
+ config = minimal_settings.merge({
111
+ "prefix" => prefix,
112
+ "bucket" => "my-bucket"
113
+ })
114
+
115
+ expect_any_instance_of(AWS::S3::ObjectCollection).to receive(:[]).with("#{prefix}#{File.basename(fake_data)}") { fake_bucket }
116
+
117
+ s3 = LogStash::Outputs::S3.new(config)
118
+ allow(s3).to receive(:test_s3_write)
119
+ s3.register
120
+ s3.write_on_bucket(fake_data)
121
+ end
122
+
123
+ it 'should use the same local filename if no prefix is specified' do
124
+ config = minimal_settings.merge({
125
+ "bucket" => "my-bucket"
126
+ })
127
+
128
+ expect_any_instance_of(AWS::S3::ObjectCollection).to receive(:[]).with(File.basename(fake_data)) { fake_bucket }
129
+
130
+ s3 = LogStash::Outputs::S3.new(minimal_settings)
131
+ allow(s3).to receive(:test_s3_write)
132
+ s3.register
133
+ s3.write_on_bucket(fake_data)
134
+ end
135
+ end
136
+
137
+ describe "#write_events_to_multiple_files?" do
138
+ it 'returns true if the size_file is != 0 ' do
139
+ s3 = LogStash::Outputs::S3.new(minimal_settings.merge({ "size_file" => 200 }))
140
+ expect(s3.write_events_to_multiple_files?).to eq(true)
141
+ end
142
+
143
+ it 'returns false if size_file is zero or not set' do
144
+ s3 = LogStash::Outputs::S3.new(minimal_settings)
145
+ expect(s3.write_events_to_multiple_files?).to eq(false)
146
+ end
147
+ end
148
+
149
+ describe "#write_to_tempfile" do
150
+ it "should append the event to a file" do
151
+ Stud::Temporary.file("logstash", "a+") do |tmp|
152
+ s3 = LogStash::Outputs::S3.new(minimal_settings)
153
+ allow(s3).to receive(:test_s3_write)
154
+ s3.register
155
+ s3.tempfile = tmp
156
+ s3.write_to_tempfile("test-write")
157
+ tmp.rewind
158
+ expect(tmp.read).to eq("test-write")
159
+ end
160
+ end
161
+ end
162
+
163
+ describe "#rotate_events_log" do
164
+ let(:s3) { LogStash::Outputs::S3.new(minimal_settings.merge({ "size_file" => 1024 })) }
165
+
166
+ it "returns true if the tempfile is over the file_size limit" do
167
+ Stud::Temporary.file do |tmp|
168
+ tmp.stub(:size) { 2024001 }
169
+
170
+ s3.tempfile = tmp
171
+ expect(s3.rotate_events_log?).to be(true)
172
+ end
173
+ end
174
+
175
+ it "returns false if the tempfile is under the file_size limit" do
176
+ Stud::Temporary.file do |tmp|
177
+ tmp.stub(:size) { 100 }
178
+
179
+ s3.tempfile = tmp
180
+ expect(s3.rotate_events_log?).to eq(false)
181
+ end
182
+ end
183
+ end
184
+
185
+ describe "#move_file_to_bucket" do
186
+ let!(:s3) { LogStash::Outputs::S3.new(minimal_settings) }
187
+
188
+ before do
189
+ # Assume the AWS test credentials pass.
190
+ allow(s3).to receive(:test_s3_write)
191
+ s3.register
192
+ end
193
+
194
+ it "should always delete the source file" do
195
+ tmp = Stud::Temporary.file
196
+
197
+ allow(File).to receive(:zero?).and_return(true)
198
+ expect(File).to receive(:delete).with(tmp)
199
+
200
+ s3.move_file_to_bucket(tmp)
201
+ end
202
+
203
+ it 'should not upload the file if the size of the file is zero' do
204
+ temp_file = Stud::Temporary.file
205
+ allow(temp_file).to receive(:zero?).and_return(true)
206
+
207
+ expect(s3).not_to receive(:write_on_bucket)
208
+ s3.move_file_to_bucket(temp_file)
209
+ end
210
+
211
+ it "should upload the file if the size > 0" do
212
+ tmp = Stud::Temporary.file
213
+
214
+ allow(File).to receive(:zero?).and_return(false)
215
+ expect(s3).to receive(:write_on_bucket)
216
+
217
+ s3.move_file_to_bucket(tmp)
218
+ end
219
+ end
220
+
221
+ describe "#restore_from_crashes" do
222
+ it "read the temp directory and upload the matching file to s3" do
223
+ s3 = LogStash::Outputs::S3.new(minimal_settings.merge({ "temporary_directory" => "/tmp/logstash/" }))
224
+
225
+ expect(Dir).to receive(:[]).with("/tmp/logstash/*.txt").and_return(["/tmp/logstash/01.txt"])
226
+ expect(s3).to receive(:move_file_to_bucket_async).with("/tmp/logstash/01.txt")
227
+
228
+
229
+ s3.restore_from_crashes
230
+ end
231
+ end
232
+
233
+ describe "#receive" do
234
+ it "should send the event through the codecs" do
235
+ data = {"foo" => "bar", "baz" => {"bah" => ["a","b","c"]}, "@timestamp" => "2014-05-30T02:52:17.929Z"}
236
+ event = LogStash::Event.new(data)
237
+
238
+ expect_any_instance_of(LogStash::Codecs::Line).to receive(:encode).with(event)
239
+
240
+ s3 = LogStash::Outputs::S3.new(minimal_settings)
241
+ allow(s3).to receive(:test_s3_write)
242
+ s3.register
243
+
244
+ s3.receive(event)
245
+ end
246
+ end
247
+
248
+ describe "when rotating the temporary file" do
249
+ before { allow(File).to receive(:delete) }
250
+
251
+ it "doesn't skip events if using the size_file option" do
252
+ Stud::Temporary.directory do |temporary_directory|
253
+ size_file = rand(200..20000)
254
+ event_count = rand(300..15000)
255
+
256
+ config = %Q[
257
+ input {
258
+ generator {
259
+ count => #{event_count}
260
+ }
261
+ }
262
+ output {
263
+ s3 {
264
+ access_key_id => "1234"
265
+ secret_access_key => "secret"
266
+ size_file => #{size_file}
267
+ codec => line
268
+ temporary_directory => '#{temporary_directory}'
269
+ bucket => 'testing'
270
+ }
271
+ }
272
+ ]
273
+
274
+ pipeline = LogStash::Pipeline.new(config)
275
+
276
+ pipeline_thread = Thread.new { pipeline.run }
277
+ sleep 0.1 while !pipeline.ready?
278
+ pipeline_thread.join
279
+
280
+ events_written_count = events_in_files(Dir[File.join(temporary_directory, 'ls.*.txt')])
281
+ expect(events_written_count).to eq(event_count)
282
+ end
283
+ end
284
+
285
+ it "doesn't skip events if using the time_file option", :tag => :slow do
286
+ Stud::Temporary.directory do |temporary_directory|
287
+ time_file = rand(5..10)
288
+ number_of_rotation = rand(4..10)
289
+
290
+ config = {
291
+ "time_file" => time_file,
292
+ "codec" => "line",
293
+ "temporary_directory" => temporary_directory,
294
+ "bucket" => "testing"
295
+ }
296
+
297
+ s3 = LogStash::Outputs::S3.new(minimal_settings.merge(config))
298
+ # Make the test run in seconds intead of minutes..
299
+ allow(s3).to receive(:periodic_interval).and_return(time_file)
300
+ s3.register
301
+
302
+ # Force to have a few files rotation
303
+ stop_time = Time.now + (number_of_rotation * time_file)
304
+ event_count = 0
305
+
306
+ event = LogStash::Event.new("message" => "Hello World")
307
+
308
+ until Time.now > stop_time do
309
+ s3.receive(event)
310
+ event_count += 1
311
+ end
312
+ s3.teardown
313
+
314
+ generated_files = Dir[File.join(temporary_directory, 'ls.*.txt')]
315
+
316
+ events_written_count = events_in_files(generated_files)
317
+
318
+ # Skew times can affect the number of rotation..
319
+ expect(generated_files.count).to be_within(number_of_rotation).of(number_of_rotation + 1)
320
+ expect(events_written_count).to eq(event_count)
321
+ end
322
+ end
323
+ end
6
324
  end
@@ -0,0 +1,14 @@
1
+ def delete_matching_keys_on_bucket(prefix)
2
+ s3_object.buckets[minimal_settings["bucket"]].objects.with_prefix(prefix).each do |obj|
3
+ obj.delete
4
+ end
5
+ end
6
+
7
+ def key_exists_on_bucket?(key)
8
+ s3_object.buckets[minimal_settings["bucket"]].objects[key].exists?
9
+ end
10
+
11
+ def events_in_files(files)
12
+ files.collect { |file| File.foreach(file).count }.inject(&:+)
13
+ end
14
+
metadata CHANGED
@@ -1,17 +1,18 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-output-s3
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.1
4
+ version: 0.1.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Elasticsearch
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2014-11-19 00:00:00.000000000 Z
11
+ date: 2015-01-16 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
- requirement: !ruby/object:Gem::Requirement
14
+ name: logstash
15
+ version_requirements: !ruby/object:Gem::Requirement
15
16
  requirements:
16
17
  - - '>='
17
18
  - !ruby/object:Gem::Version
@@ -19,10 +20,7 @@ dependencies:
19
20
  - - <
20
21
  - !ruby/object:Gem::Version
21
22
  version: 2.0.0
22
- name: logstash
23
- prerelease: false
24
- type: :runtime
25
- version_requirements: !ruby/object:Gem::Requirement
23
+ requirement: !ruby/object:Gem::Requirement
26
24
  requirements:
27
25
  - - '>='
28
26
  - !ruby/object:Gem::Version
@@ -30,48 +28,106 @@ dependencies:
30
28
  - - <
31
29
  - !ruby/object:Gem::Version
32
30
  version: 2.0.0
31
+ prerelease: false
32
+ type: :runtime
33
33
  - !ruby/object:Gem::Dependency
34
+ name: logstash-mixin-aws
35
+ version_requirements: !ruby/object:Gem::Requirement
36
+ requirements:
37
+ - - '>='
38
+ - !ruby/object:Gem::Version
39
+ version: '0'
34
40
  requirement: !ruby/object:Gem::Requirement
35
41
  requirements:
36
42
  - - '>='
37
43
  - !ruby/object:Gem::Version
38
44
  version: '0'
39
- name: logstash-mixin-aws
40
45
  prerelease: false
41
46
  type: :runtime
47
+ - !ruby/object:Gem::Dependency
48
+ name: aws-sdk
42
49
  version_requirements: !ruby/object:Gem::Requirement
43
50
  requirements:
44
51
  - - '>='
45
52
  - !ruby/object:Gem::Version
46
53
  version: '0'
47
- - !ruby/object:Gem::Dependency
48
54
  requirement: !ruby/object:Gem::Requirement
49
55
  requirements:
50
56
  - - '>='
51
57
  - !ruby/object:Gem::Version
52
58
  version: '0'
53
- name: aws-sdk
54
59
  prerelease: false
55
60
  type: :runtime
61
+ - !ruby/object:Gem::Dependency
62
+ name: stud
63
+ version_requirements: !ruby/object:Gem::Requirement
64
+ requirements:
65
+ - - ~>
66
+ - !ruby/object:Gem::Version
67
+ version: 0.0.18
68
+ requirement: !ruby/object:Gem::Requirement
69
+ requirements:
70
+ - - ~>
71
+ - !ruby/object:Gem::Version
72
+ version: 0.0.18
73
+ prerelease: false
74
+ type: :runtime
75
+ - !ruby/object:Gem::Dependency
76
+ name: logstash-devutils
77
+ version_requirements: !ruby/object:Gem::Requirement
78
+ requirements:
79
+ - - '>='
80
+ - !ruby/object:Gem::Version
81
+ version: '0'
82
+ requirement: !ruby/object:Gem::Requirement
83
+ requirements:
84
+ - - '>='
85
+ - !ruby/object:Gem::Version
86
+ version: '0'
87
+ prerelease: false
88
+ type: :development
89
+ - !ruby/object:Gem::Dependency
90
+ name: logstash-input-generator
56
91
  version_requirements: !ruby/object:Gem::Requirement
57
92
  requirements:
58
93
  - - '>='
59
94
  - !ruby/object:Gem::Version
60
95
  version: '0'
96
+ requirement: !ruby/object:Gem::Requirement
97
+ requirements:
98
+ - - '>='
99
+ - !ruby/object:Gem::Version
100
+ version: '0'
101
+ prerelease: false
102
+ type: :development
61
103
  - !ruby/object:Gem::Dependency
104
+ name: logstash-input-stdin
105
+ version_requirements: !ruby/object:Gem::Requirement
106
+ requirements:
107
+ - - '>='
108
+ - !ruby/object:Gem::Version
109
+ version: '0'
62
110
  requirement: !ruby/object:Gem::Requirement
63
111
  requirements:
64
112
  - - '>='
65
113
  - !ruby/object:Gem::Version
66
114
  version: '0'
67
- name: logstash-devutils
68
115
  prerelease: false
69
116
  type: :development
117
+ - !ruby/object:Gem::Dependency
118
+ name: logstash-codec-line
70
119
  version_requirements: !ruby/object:Gem::Requirement
71
120
  requirements:
72
121
  - - '>='
73
122
  - !ruby/object:Gem::Version
74
123
  version: '0'
124
+ requirement: !ruby/object:Gem::Requirement
125
+ requirements:
126
+ - - '>='
127
+ - !ruby/object:Gem::Version
128
+ version: '0'
129
+ prerelease: false
130
+ type: :development
75
131
  description: This gem is a logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/plugin install gemname. This gem is not a stand-alone program
76
132
  email: info@elasticsearch.com
77
133
  executables: []
@@ -81,10 +137,13 @@ files:
81
137
  - .gitignore
82
138
  - Gemfile
83
139
  - LICENSE
140
+ - README
84
141
  - Rakefile
85
142
  - lib/logstash/outputs/s3.rb
86
143
  - logstash-output-s3.gemspec
144
+ - spec/integration/s3_spec.rb
87
145
  - spec/outputs/s3_spec.rb
146
+ - spec/supports/helpers.rb
88
147
  homepage: http://www.elasticsearch.org/guide/en/logstash/current/index.html
89
148
  licenses:
90
149
  - Apache License (2.0)
@@ -112,4 +171,6 @@ signing_key:
112
171
  specification_version: 4
113
172
  summary: This plugin was created for store the logstash's events into Amazon Simple Storage Service (Amazon S3)
114
173
  test_files:
174
+ - spec/integration/s3_spec.rb
115
175
  - spec/outputs/s3_spec.rb
176
+ - spec/supports/helpers.rb