fluent-plugin-s3 1.0.0.rc3 → 1.0.0.rc4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +8 -3
- data/VERSION +1 -1
- data/lib/fluent/plugin/in_s3.rb +3 -2
- data/lib/fluent/plugin/out_s3.rb +5 -3
- data/test/test_in_s3.rb +35 -0
- metadata +3 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: dbc4b32e04be1ba3836e5c34a09df8a831b42086
|
4
|
+
data.tar.gz: 0cf95c08b2eabc0f249f5a7e468cf6c597e0626b
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: f1c8dd2918e1c4490881a29a5e7d234f59e13ba0907a73a84dfd4a8b7e8a8f470b28a3cbaa97fefc116a4ca778c132c588d3503e4fab2eb7e5ffd65ea6b1aa94
|
7
|
+
data.tar.gz: ce4efb8bedd6f58d701677ae457383cae253ac2a46fadac01b5a3ef4a4bfeb1b6a001220babc01911a0beb42bc81ca3f141b11c86ae1a8c7f6cacd564b4c4df3
|
data/README.md
CHANGED
@@ -5,6 +5,9 @@ alt="Build Status" />](https://travis-ci.org/fluent/fluent-plugin-s3) [<img
|
|
5
5
|
src="https://codeclimate.com/github/fluent/fluent-plugin-s3/badges/gpa.svg"
|
6
6
|
/>](https://codeclimate.com/github/fluent/fluent-plugin-s3)
|
7
7
|
|
8
|
+
## PR
|
9
|
+
**it will be greatful that the plugin can push log to owner S3 Storage like Ceph S3.**
|
10
|
+
|
8
11
|
## Overview
|
9
12
|
|
10
13
|
**s3** output plugin buffers event logs in local file and upload it to S3
|
@@ -74,7 +77,9 @@ For `<buffer>`, you can use any record field in `path` / `s3_object_key_format`.
|
|
74
77
|
# parameters...
|
75
78
|
</buffer>
|
76
79
|
|
77
|
-
|
80
|
+
See official article for more detail: [Buffer section configurations](http://docs.fluentd.org/v0.14/articles/buffer-section)
|
81
|
+
|
82
|
+
Note that this configuration doesn't work with fluentd v0.12.
|
78
83
|
|
79
84
|
### v0.12 style
|
80
85
|
|
@@ -275,7 +280,7 @@ You can change key name by "message_key" option.
|
|
275
280
|
|
276
281
|
Create S3 bucket if it does not exists. Default is true.
|
277
282
|
|
278
|
-
**
|
283
|
+
**check_bucket**
|
279
284
|
|
280
285
|
Check mentioned bucket if it exists in AWS or not. Default is true.
|
281
286
|
|
@@ -293,7 +298,7 @@ When it is false,
|
|
293
298
|
Example object name, assuming it is created on 2016/16/11 3:30:54 PM
|
294
299
|
20161611_153054.txt (extension can be anything as per user's choice)
|
295
300
|
|
296
|
-
**Example when
|
301
|
+
**Example when check_bucket=false and check_object=false**
|
297
302
|
|
298
303
|
When the mentioned configuration will be made, fluentd will work with the
|
299
304
|
minimum IAM poilcy, like:
|
data/VERSION
CHANGED
@@ -1 +1 @@
|
|
1
|
-
1.0.0.
|
1
|
+
1.0.0.rc4
|
data/lib/fluent/plugin/in_s3.rb
CHANGED
@@ -2,6 +2,7 @@ require 'fluent/plugin/input'
|
|
2
2
|
require 'fluent/log-ext'
|
3
3
|
|
4
4
|
require 'aws-sdk-resources'
|
5
|
+
require 'cgi/util'
|
5
6
|
require 'zlib'
|
6
7
|
require 'time'
|
7
8
|
require 'tempfile'
|
@@ -213,12 +214,12 @@ module Fluent::Plugin
|
|
213
214
|
@bucket.objects.first
|
214
215
|
log.debug("Succeeded to verify API keys")
|
215
216
|
rescue => e
|
216
|
-
raise "can't call S3 API. Please check your
|
217
|
+
raise "can't call S3 API. Please check your credentials or s3_region configuration. error = #{e.inspect}"
|
217
218
|
end
|
218
219
|
|
219
220
|
def process(body)
|
220
221
|
s3 = body["Records"].first["s3"]
|
221
|
-
key = s3["object"]["key"]
|
222
|
+
key = CGI.unescape(s3["object"]["key"])
|
222
223
|
|
223
224
|
io = @bucket.object(key).get.body
|
224
225
|
content = @extractor.extract(io)
|
data/lib/fluent/plugin/out_s3.rb
CHANGED
@@ -1,5 +1,6 @@
|
|
1
1
|
require 'fluent/plugin/output'
|
2
2
|
require 'fluent/log-ext'
|
3
|
+
require 'fluent/timezone'
|
3
4
|
require 'aws-sdk-resources'
|
4
5
|
require 'zlib'
|
5
6
|
require 'time'
|
@@ -157,6 +158,7 @@ module Fluent::Plugin
|
|
157
158
|
# TODO: Remove time_slice_format when end of support compat_parameters
|
158
159
|
@configured_time_slice_format = conf['time_slice_format']
|
159
160
|
@values_for_s3_object_chunk = {}
|
161
|
+
@time_slice_with_tz = Fluent::Timezone.formatter(@timekey_zone, @configured_time_slice_format || timekey_to_timeformat(@buffer_config['timekey']))
|
160
162
|
end
|
161
163
|
|
162
164
|
def multi_workers_ready?
|
@@ -200,11 +202,10 @@ module Fluent::Plugin
|
|
200
202
|
i = 0
|
201
203
|
metadata = chunk.metadata
|
202
204
|
previous_path = nil
|
203
|
-
time_slice_format = @configured_time_slice_format || timekey_to_timeformat(@buffer_config['timekey'])
|
204
205
|
time_slice = if metadata.timekey.nil?
|
205
206
|
''.freeze
|
206
207
|
else
|
207
|
-
|
208
|
+
@time_slice_with_tz.call(metadata.timekey)
|
208
209
|
end
|
209
210
|
|
210
211
|
if @check_object
|
@@ -248,6 +249,7 @@ module Fluent::Plugin
|
|
248
249
|
"%{path}" => @path,
|
249
250
|
"%{time_slice}" => time_slice,
|
250
251
|
"%{file_extension}" => @compressor.ext,
|
252
|
+
"%{hms_slice}" => hms_slicer,
|
251
253
|
}.merge!(@values_for_s3_object_chunk[chunk.unique_id])
|
252
254
|
values_for_s3_object_key["%{uuid_flush}".freeze] = uuid_random if @uuid_flush_enabled
|
253
255
|
|
@@ -354,7 +356,7 @@ module Fluent::Plugin
|
|
354
356
|
rescue Aws::S3::Errors::NoSuchBucket
|
355
357
|
# ignore NoSuchBucket Error because ensure_bucket checks it.
|
356
358
|
rescue => e
|
357
|
-
raise "can't call S3 API. Please check your
|
359
|
+
raise "can't call S3 API. Please check your credentials or s3_region configuration. error = #{e.inspect}"
|
358
360
|
end
|
359
361
|
|
360
362
|
def setup_credentials
|
data/test/test_in_s3.rb
CHANGED
@@ -185,6 +185,41 @@ class S3InputTest < Test::Unit::TestCase
|
|
185
185
|
assert_equal({ "message" => "aaa" }, events.first[2])
|
186
186
|
end
|
187
187
|
|
188
|
+
def test_one_record_url_encoded
|
189
|
+
setup_mocks
|
190
|
+
d = create_driver(CONFIG + "\ncheck_apikey_on_start false\nstore_as text\nformat none\n")
|
191
|
+
|
192
|
+
s3_object = stub(Object.new)
|
193
|
+
s3_response = stub(Object.new)
|
194
|
+
s3_response.body { StringIO.new("aaa") }
|
195
|
+
s3_object.get { s3_response }
|
196
|
+
@s3_bucket.object('test key').at_least(1) { s3_object }
|
197
|
+
|
198
|
+
body = {
|
199
|
+
"Records" => [
|
200
|
+
{
|
201
|
+
"s3" => {
|
202
|
+
"object" => {
|
203
|
+
"key" => "test+key"
|
204
|
+
}
|
205
|
+
}
|
206
|
+
}
|
207
|
+
]
|
208
|
+
}
|
209
|
+
message = Struct::StubMessage.new(1, 1, Yajl.dump(body))
|
210
|
+
@sqs_poller.get_messages(anything, anything) do |config, stats|
|
211
|
+
config.before_request.call(stats) if config.before_request
|
212
|
+
stats.request_count += 1
|
213
|
+
if stats.request_count >= 1
|
214
|
+
d.instance.instance_variable_set(:@running, false)
|
215
|
+
end
|
216
|
+
[message]
|
217
|
+
end
|
218
|
+
d.run(expect_emits: 1)
|
219
|
+
events = d.events
|
220
|
+
assert_equal({ "message" => "aaa" }, events.first[2])
|
221
|
+
end
|
222
|
+
|
188
223
|
def test_one_record_multi_line
|
189
224
|
setup_mocks
|
190
225
|
d = create_driver(CONFIG + "\ncheck_apikey_on_start false\nstore_as text\nformat none\n")
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: fluent-plugin-s3
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.0.0.
|
4
|
+
version: 1.0.0.rc4
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Sadayuki Furuhashi
|
@@ -9,7 +9,7 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
|
-
date: 2017-
|
12
|
+
date: 2017-06-26 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
14
|
- !ruby/object:Gem::Dependency
|
15
15
|
name: fluentd
|
@@ -155,7 +155,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
155
155
|
version: 1.3.1
|
156
156
|
requirements: []
|
157
157
|
rubyforge_project:
|
158
|
-
rubygems_version: 2.6.
|
158
|
+
rubygems_version: 2.6.11
|
159
159
|
signing_key:
|
160
160
|
specification_version: 4
|
161
161
|
summary: Amazon S3 output plugin for Fluentd event collector
|