fluent-plugin-s3-fork 0.4.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.travis.yml +21 -0
- data/AUTHORS +2 -0
- data/ChangeLog +122 -0
- data/Gemfile +3 -0
- data/README.rdoc +176 -0
- data/Rakefile +14 -0
- data/VERSION +1 -0
- data/fluent-plugin-s3.gemspec +25 -0
- data/lib/fluent/plugin/out_s3.rb +201 -0
- data/test/test_out_s3.rb +322 -0
- metadata +137 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA1:
|
3
|
+
metadata.gz: afb81d1dd9426d54175ab62ac2e5d54301a80350
|
4
|
+
data.tar.gz: bf7cfd3bd1e1acfd5b2aaccfd5b1097f7e4e03b6
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: 0c6166ec2a11010b6cf7f473bfb308f9030195121b1594e99952a2bc4890f312eb9c147c661f596b566904ee58b6b937eeacfd588610c952e9166474cfb1febc
|
7
|
+
data.tar.gz: c322533b99d3b0bcca1d5a8f8b1acd1e9f90604209b0cb4dc48c3f5f493e0ea3478748a5ce8f5b53987e106da149f18b08efc9037c587f5527a5c42016bfae03
|
data/.travis.yml
ADDED
data/AUTHORS
ADDED
data/ChangeLog
ADDED
@@ -0,0 +1,122 @@
|
|
1
|
+
Release 0.4.2 - 2014/10/22
|
2
|
+
|
3
|
+
* Update fluent-mixin-config-placeholders to v0.3.0
|
4
|
+
|
5
|
+
|
6
|
+
Release 0.4.1 - 2014/10/16
|
7
|
+
|
8
|
+
* Add 's3_region' option to specify S3 region
|
9
|
+
* Restrict aws-sdk gem dependency version to use v1
|
10
|
+
* Fix infinite loop when same object path is generated
|
11
|
+
|
12
|
+
|
13
|
+
Release 0.4.0 - 2014/06/06
|
14
|
+
|
15
|
+
* Add 'format' option to change one line format
|
16
|
+
* Update fluentd dependency to v0.10.49
|
17
|
+
|
18
|
+
|
19
|
+
Release 0.3.7 - 2014/03/07
|
20
|
+
|
21
|
+
* Support lzma2 compression using 'xz' command
|
22
|
+
https://github.com/fluent/fluent-plugin-s3/pull/41
|
23
|
+
* Relax aws-sdk gem requirement
|
24
|
+
https://github.com/fluent/fluent-plugin-s3/pull/42
|
25
|
+
|
26
|
+
|
27
|
+
Release 0.3.6 - 2014/02/05
|
28
|
+
|
29
|
+
* Support 'log_level' option
|
30
|
+
|
31
|
+
|
32
|
+
Release 0.3.5 - 2013/12/05
|
33
|
+
|
34
|
+
* Add 'reduced_redundancy' option to store logs in reduced redundancy
|
35
|
+
https://github.com/fluent/fluent-plugin-s3/pull/33
|
36
|
+
|
37
|
+
|
38
|
+
Release 0.3.4 - 2013/07/31
|
39
|
+
|
40
|
+
* Add dynamic path slicing by time formatted string
|
41
|
+
https://github.com/fluent/fluent-plugin-s3/pull/24
|
42
|
+
|
43
|
+
|
44
|
+
Release 0.3.3 - 2013/06/18
|
45
|
+
|
46
|
+
* Fix require bug on case-sensitive environment
|
47
|
+
|
48
|
+
|
49
|
+
Release 0.3.2 - 2013/06/18
|
50
|
+
|
51
|
+
* Support lzo mime-type
|
52
|
+
https://github.com/fluent/fluent-plugin-s3/pull/29
|
53
|
+
* Add proxy_uri option
|
54
|
+
https://github.com/fluent/fluent-plugin-s3/issues/25
|
55
|
+
* Add check_apikey_on_start option
|
56
|
+
https://github.com/fluent/fluent-plugin-s3/pull/28
|
57
|
+
|
58
|
+
|
59
|
+
Release 0.3.1 - 2013/03/28
|
60
|
+
|
61
|
+
* Support json and text mime-types
|
62
|
+
https://github.com/fluent/fluent-plugin-s3/pull/20
|
63
|
+
|
64
|
+
|
65
|
+
Release 0.3.0 - 2013/02/19
|
66
|
+
|
67
|
+
* Enable dynamic and configurable S3 object kyes
|
68
|
+
https://github.com/fluent/fluent-plugin-s3/pull/12
|
69
|
+
* Fix a lot of temporary files were left on /tmp when the plugin failed to write to S3
|
70
|
+
https://github.com/fluent/fluent-plugin-s3/pull/15
|
71
|
+
* Enable fluent-mixin-config-placeholders to support hostname, uuid and other parameters in configuration
|
72
|
+
https://github.com/fluent/fluent-plugin-s3/pull/19
|
73
|
+
* Update 'aws-sdk' version requirement to '~> 1.8.2'
|
74
|
+
https://github.com/fluent/fluent-plugin-s3/pull/21
|
75
|
+
* Create new S3 bucket if not exists
|
76
|
+
https://github.com/fluent/fluent-plugin-s3/pull/22
|
77
|
+
* Check the permission and bucket existence at start method, not write method.
|
78
|
+
|
79
|
+
|
80
|
+
Release 0.2.6 - 2013/01/15
|
81
|
+
|
82
|
+
* Add use_ssl option
|
83
|
+
|
84
|
+
|
85
|
+
Release 0.2.5 - 2012/12/06
|
86
|
+
|
87
|
+
* Add format_json and time/tag mixin options [#9]
|
88
|
+
|
89
|
+
|
90
|
+
Release 0.2.4 - 2012/11/21
|
91
|
+
|
92
|
+
* Set content type when writing file to s3
|
93
|
+
|
94
|
+
|
95
|
+
Release 0.2.3 - 2012/11/19
|
96
|
+
|
97
|
+
* Loosen 'aws-sdk' version requirement from "~> 1.1.3" to "~> 1.1"
|
98
|
+
* Support aws-sdk facility to load credentials from ENV vars or IAM Instance Profile by making the credentials non-mandatory
|
99
|
+
* Use Yajl instead of to_json not to raise exceptions when it got invalid bytes as UTF-8.
|
100
|
+
|
101
|
+
|
102
|
+
Release 0.2.2 - 2011/12/15
|
103
|
+
|
104
|
+
* Add s3_endpoint option
|
105
|
+
|
106
|
+
|
107
|
+
Release 0.2.1 - 2011/10/24
|
108
|
+
|
109
|
+
* Add sequential number to the file to avoid overwriting
|
110
|
+
* Use bundler instead of jeweler for packaging
|
111
|
+
* Updated README
|
112
|
+
|
113
|
+
|
114
|
+
Release 0.2.0 - 2011/10/16
|
115
|
+
|
116
|
+
* Updated to fluentd-0.10.0
|
117
|
+
|
118
|
+
|
119
|
+
Release 0.1.1 - 2011/09/27
|
120
|
+
|
121
|
+
* First release
|
122
|
+
|
data/Gemfile
ADDED
data/README.rdoc
ADDED
@@ -0,0 +1,176 @@
|
|
1
|
+
= Amazon S3 output plugin for {Fluentd}[http://github.com/fluent/fluentd]
|
2
|
+
|
3
|
+
== Overview
|
4
|
+
|
5
|
+
*s3* output plugin buffers event logs in local file and upload it to S3 periodically.
|
6
|
+
|
7
|
+
This plugin splits files exactly by using the time of event logs (not the time when the logs are received). For example, a log '2011-01-02 message B' is reached, and then another log '2011-01-03 message B' is reached in this order, the former one is stored in "20110102.gz" file, and latter one in "20110103.gz" file.
|
8
|
+
|
9
|
+
|
10
|
+
== Installation
|
11
|
+
|
12
|
+
Simply use RubyGems:
|
13
|
+
|
14
|
+
gem install fluent-plugin-s3
|
15
|
+
|
16
|
+
== Configuration
|
17
|
+
|
18
|
+
<match pattern>
|
19
|
+
type s3
|
20
|
+
|
21
|
+
aws_key_id YOUR_AWS_KEY_ID
|
22
|
+
aws_sec_key YOUR_AWS_SECRET/KEY
|
23
|
+
s3_bucket YOUR_S3_BUCKET_NAME
|
24
|
+
s3_endpoint s3-ap-northeast-1.amazonaws.com
|
25
|
+
s3_object_key_format %{path}%{time_slice}_%{index}.%{file_extension}
|
26
|
+
path logs/
|
27
|
+
buffer_path /var/log/fluent/s3
|
28
|
+
|
29
|
+
time_slice_format %Y%m%d-%H
|
30
|
+
time_slice_wait 10m
|
31
|
+
utc
|
32
|
+
</match>
|
33
|
+
|
34
|
+
[aws_key_id] AWS access key id. This parameter is required when your agent is not running on EC2 instance with an IAM Role.
|
35
|
+
|
36
|
+
[aws_sec_key] AWS secret key. This parameter is required when your agent is not running on EC2 instance with an IAM Role.
|
37
|
+
|
38
|
+
[s3_bucket (required)] S3 bucket name.
|
39
|
+
|
40
|
+
[s3_region] s3 region name. For example, US West (Oregon) Region is "us-west-2". The full list of regions are available here. > http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region. We recommend using `s3_region` instead of `s3_endpoint`.
|
41
|
+
|
42
|
+
[s3_endpoint] s3 endpoint name. For example, US West (Oregon) Region is "s3-us-west-2.amazonaws.com". The full list of endpoints are available here. > http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
|
43
|
+
|
44
|
+
[s3_object_key_format] The format of S3 object keys. You can use several built-in variables:
|
45
|
+
|
46
|
+
- %{path}
|
47
|
+
- %{time_slice}
|
48
|
+
- %{index}
|
49
|
+
- %{file_extension}
|
50
|
+
|
51
|
+
to decide keys dynamically.
|
52
|
+
|
53
|
+
%{path} is exactly the value of *path* configured in the configuration file. E.g., "logs/" in the example configuration above.
|
54
|
+
%{time_slice} is the time-slice in text that are formatted with *time_slice_format*.
|
55
|
+
%{index} is the sequential number starts from 0, increments when multiple files are uploaded to S3 in the same time slice.
|
56
|
+
%{file_extention} is always "gz" for now.
|
57
|
+
|
58
|
+
The default format is "%{path}%{time_slice}_%{index}.%{file_extension}".
|
59
|
+
|
60
|
+
For instance, using the example configuration above, actual object keys on S3 will be something like:
|
61
|
+
|
62
|
+
"logs/20130111-22_0.gz"
|
63
|
+
"logs/20130111-23_0.gz"
|
64
|
+
"logs/20130111-23_1.gz"
|
65
|
+
"logs/20130112-00_0.gz"
|
66
|
+
|
67
|
+
With the configuration:
|
68
|
+
|
69
|
+
s3_object_key_format %{path}/events/ts=%{time_slice}/events_%{index}.%{file_extension}
|
70
|
+
path log
|
71
|
+
time_slice_format %Y%m%d-%H
|
72
|
+
|
73
|
+
You get:
|
74
|
+
|
75
|
+
"log/events/ts=20130111-22/events_0.gz"
|
76
|
+
"log/events/ts=20130111-23/events_0.gz"
|
77
|
+
"log/events/ts=20130111-23/events_1.gz"
|
78
|
+
"log/events/ts=20130112-00/events_0.gz"
|
79
|
+
|
80
|
+
The {fluent-mixin-config-placeholders}[https://github.com/tagomoris/fluent-mixin-config-placeholders] mixin is also incorporated, so additional variables such as %{hostname}, %{uuid}, etc. can be used in the s3_object_key_format. This could prove useful in preventing filename conflicts when writing from multiple servers.
|
81
|
+
|
82
|
+
s3_object_key_format %{path}/events/ts=%{time_slice}/events_%{index}-%{hostname}.%{file_extension}
|
83
|
+
|
84
|
+
[store_as] archive format on S3. You can use serveral format:
|
85
|
+
|
86
|
+
- gzip (default)
|
87
|
+
- json
|
88
|
+
- text
|
89
|
+
- lzo (Need lzop command)
|
90
|
+
|
91
|
+
[format] Change one line format in the S3 object. Supported formats are "out_file", "json", "ltsv" and "single_value".
|
92
|
+
|
93
|
+
- out_file (default).
|
94
|
+
|
95
|
+
time\ttag\t{..json1..}
|
96
|
+
time\ttag\t{..json2..}
|
97
|
+
...
|
98
|
+
|
99
|
+
- json
|
100
|
+
|
101
|
+
{..json1..}
|
102
|
+
{..json2..}
|
103
|
+
...
|
104
|
+
|
105
|
+
At this format, "time" and "tag" are omitted.
|
106
|
+
But you can set these information to the record by setting "include_tag_key" / "tag_key" and "include_time_key" / "time_key" option.
|
107
|
+
If you set following configuration in S3 output:
|
108
|
+
|
109
|
+
format_json true
|
110
|
+
include_time_key true
|
111
|
+
time_key log_time # default is time
|
112
|
+
|
113
|
+
then the record has log_time field.
|
114
|
+
|
115
|
+
{"log_time":"time string",...}
|
116
|
+
|
117
|
+
- ltsv
|
118
|
+
|
119
|
+
key1:value1\tkey2:value2
|
120
|
+
key1:value1\tkey2:value2
|
121
|
+
...
|
122
|
+
|
123
|
+
"ltsv" format also accepts "include_xxx" related options. See "json" section.
|
124
|
+
|
125
|
+
- single_value
|
126
|
+
|
127
|
+
Use specified value instead of entire recode. If you get '{"message":"my log"}', then contents are
|
128
|
+
|
129
|
+
my log1
|
130
|
+
my log2
|
131
|
+
...
|
132
|
+
|
133
|
+
You can change key name by "message_key" option.
|
134
|
+
|
135
|
+
[auto_create_bucket] Create S3 bucket if it does not exists. Default is true.
|
136
|
+
|
137
|
+
[check_apikey_on_start] Check AWS key on start. Default is true.
|
138
|
+
|
139
|
+
[proxy_uri] uri of proxy environment.
|
140
|
+
|
141
|
+
[path] path prefix of the files on S3. Default is "" (no prefix).
|
142
|
+
|
143
|
+
[buffer_path (required)] path prefix of the files to buffer logs.
|
144
|
+
|
145
|
+
[time_slice_format] Format of the time used as the file name. Default is '%Y%m%d'. Use '%Y%m%d%H' to split files hourly.
|
146
|
+
|
147
|
+
[time_slice_wait] The time to wait old logs. Default is 10 minutes. Specify larger value if old logs may reache.
|
148
|
+
|
149
|
+
[utc] Use UTC instead of local time.
|
150
|
+
|
151
|
+
== IAM Policy
|
152
|
+
|
153
|
+
The following is an example for a minimal IAM policy needed to write to an s3 bucket (matches my-s3bucket/logs, my-s3bucket-test, etc.).
|
154
|
+
|
155
|
+
{ "Statement": [
|
156
|
+
{ "Effect":"Allow",
|
157
|
+
"Action":"s3:*",
|
158
|
+
"Resource":"arn:aws:s3:::my-s3bucket*"
|
159
|
+
} ]
|
160
|
+
}
|
161
|
+
|
162
|
+
Note that the bucket must already exist and *auto_create_bucket* has no effect in this case.
|
163
|
+
|
164
|
+
Refer to the {AWS documentation}[http://docs.aws.amazon.com/IAM/latest/UserGuide/ExampleIAMPolicies.html] for example policies.
|
165
|
+
|
166
|
+
Using {IAM roles}[http://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html] with a properly configured IAM policy are preferred over embedding access keys on EC2 instances.
|
167
|
+
|
168
|
+
== Website, license, et. al.
|
169
|
+
|
170
|
+
Web site:: http://fluentd.org/
|
171
|
+
Documents:: http://docs.fluentd.org/
|
172
|
+
Source repository:: http://github.com/fluent
|
173
|
+
Discussion:: http://groups.google.com/group/fluentd
|
174
|
+
Author:: Sadayuki Furuhashi
|
175
|
+
Copyright:: (c) 2011 FURUHASHI Sadayuki
|
176
|
+
License:: Apache License, Version 2.0
|
data/Rakefile
ADDED
@@ -0,0 +1,14 @@
|
|
1
|
+
|
2
|
+
require 'bundler'
|
3
|
+
Bundler::GemHelper.install_tasks
|
4
|
+
|
5
|
+
require 'rake/testtask'
|
6
|
+
|
7
|
+
Rake::TestTask.new(:test) do |test|
|
8
|
+
test.libs << 'lib' << 'test'
|
9
|
+
test.test_files = FileList['test/test_*.rb']
|
10
|
+
test.verbose = true
|
11
|
+
end
|
12
|
+
|
13
|
+
task :default => [:build]
|
14
|
+
|
data/VERSION
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
0.4.2
|
@@ -0,0 +1,25 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
$:.push File.expand_path('../lib', __FILE__)
|
3
|
+
|
4
|
+
Gem::Specification.new do |gem|
|
5
|
+
gem.name = "fluent-plugin-s3-fork"
|
6
|
+
gem.description = "Amazon S3 output plugin for Fluentd event collector"
|
7
|
+
gem.homepage = "https://github.com/tomodian/fluent-plugin-s3"
|
8
|
+
gem.summary = gem.description
|
9
|
+
gem.version = File.read("VERSION").strip
|
10
|
+
gem.authors = ["Hayato Tomoda"]
|
11
|
+
gem.email = "tomodian@gmail.com"
|
12
|
+
gem.has_rdoc = false
|
13
|
+
#gem.platform = Gem::Platform::RUBY
|
14
|
+
gem.files = `git ls-files`.split("\n")
|
15
|
+
gem.test_files = `git ls-files -- {test,spec,features}/*`.split("\n")
|
16
|
+
gem.executables = `git ls-files -- bin/*`.split("\n").map{ |f| File.basename(f) }
|
17
|
+
gem.require_paths = ['lib']
|
18
|
+
|
19
|
+
gem.add_dependency "fluentd", "~> 0.10.49"
|
20
|
+
gem.add_dependency "aws-sdk", "~> 1.38"
|
21
|
+
gem.add_dependency "yajl-ruby", "~> 1.0"
|
22
|
+
gem.add_dependency "fluent-mixin-config-placeholders", ">= 0.3.0"
|
23
|
+
gem.add_development_dependency "rake", ">= 0.9.2"
|
24
|
+
gem.add_development_dependency "flexmock", ">= 1.2.0"
|
25
|
+
end
|
@@ -0,0 +1,201 @@
|
|
1
|
+
module Fluent
|
2
|
+
require 'fluent/mixin/config_placeholders'
|
3
|
+
|
4
|
+
class S3Output < Fluent::TimeSlicedOutput
|
5
|
+
Fluent::Plugin.register_output('s3', self)
|
6
|
+
|
7
|
+
unless method_defined?(:log)
|
8
|
+
define_method(:log) { $log }
|
9
|
+
end
|
10
|
+
|
11
|
+
def initialize
|
12
|
+
super
|
13
|
+
require 'aws-sdk'
|
14
|
+
require 'zlib'
|
15
|
+
require 'time'
|
16
|
+
require 'tempfile'
|
17
|
+
require 'open3'
|
18
|
+
|
19
|
+
@use_ssl = true
|
20
|
+
end
|
21
|
+
|
22
|
+
config_param :path, :string, :default => ""
|
23
|
+
|
24
|
+
config_param :aws_key_id, :string, :default => nil
|
25
|
+
config_param :aws_sec_key, :string, :default => nil
|
26
|
+
config_param :s3_bucket, :string
|
27
|
+
config_param :s3_region, :string, :default => nil
|
28
|
+
config_param :s3_endpoint, :string, :default => nil
|
29
|
+
config_param :s3_object_key_format, :string, :default => "%{path}%{time_slice}_%{index}.%{file_extension}"
|
30
|
+
config_param :store_as, :string, :default => "gzip"
|
31
|
+
config_param :command_parameter, :string, :default => nil
|
32
|
+
config_param :auto_create_bucket, :bool, :default => true
|
33
|
+
config_param :check_apikey_on_start, :bool, :default => true
|
34
|
+
config_param :proxy_uri, :string, :default => nil
|
35
|
+
config_param :reduced_redundancy, :bool, :default => false
|
36
|
+
config_param :format, :string, :default => 'out_file'
|
37
|
+
|
38
|
+
attr_reader :bucket
|
39
|
+
|
40
|
+
include Fluent::Mixin::ConfigPlaceholders
|
41
|
+
|
42
|
+
def placeholders
|
43
|
+
[:percent]
|
44
|
+
end
|
45
|
+
|
46
|
+
def configure(conf)
|
47
|
+
super
|
48
|
+
|
49
|
+
if use_ssl = conf['use_ssl']
|
50
|
+
if use_ssl.empty?
|
51
|
+
@use_ssl = true
|
52
|
+
else
|
53
|
+
@use_ssl = Config.bool_value(use_ssl)
|
54
|
+
if @use_ssl.nil?
|
55
|
+
raise ConfigError, "'true' or 'false' is required for use_ssl option on s3 output"
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
59
|
+
|
60
|
+
@ext, @mime_type = case @store_as
|
61
|
+
when 'gzip'
|
62
|
+
['gz', 'application/x-gzip']
|
63
|
+
when 'lzo'
|
64
|
+
check_command('lzop', 'LZO')
|
65
|
+
@command_parameter = '-qf1' if @command_parameter.nil?
|
66
|
+
['lzo', 'application/x-lzop']
|
67
|
+
when 'lzma2'
|
68
|
+
check_command('xz', 'LZMA2')
|
69
|
+
@command_parameter = '-qf0' if @command_parameter.nil?
|
70
|
+
['xz', 'application/x-xz']
|
71
|
+
when 'json'
|
72
|
+
['json', 'application/json']
|
73
|
+
else
|
74
|
+
['txt', 'text/plain']
|
75
|
+
end
|
76
|
+
|
77
|
+
if format_json = conf['format_json']
|
78
|
+
$log.warn "format_json is deprecated. Use 'format json' instead"
|
79
|
+
conf['format'] = 'json'
|
80
|
+
else
|
81
|
+
conf['format'] = @format
|
82
|
+
end
|
83
|
+
@formatter = TextFormatter.create(conf)
|
84
|
+
|
85
|
+
if @localtime
|
86
|
+
@path_slicer = Proc.new {|path|
|
87
|
+
Time.now.strftime(path)
|
88
|
+
}
|
89
|
+
else
|
90
|
+
@path_slicer = Proc.new {|path|
|
91
|
+
Time.now.utc.strftime(path)
|
92
|
+
}
|
93
|
+
end
|
94
|
+
end
|
95
|
+
|
96
|
+
def start
|
97
|
+
super
|
98
|
+
options = {}
|
99
|
+
if @aws_key_id && @aws_sec_key
|
100
|
+
options[:access_key_id] = @aws_key_id
|
101
|
+
options[:secret_access_key] = @aws_sec_key
|
102
|
+
end
|
103
|
+
options[:region] = @s3_region if @s3_region
|
104
|
+
options[:endpoint] = @s3_endpoint if @s3_endpoint
|
105
|
+
options[:proxy_uri] = @proxy_uri if @proxy_uri
|
106
|
+
options[:use_ssl] = @use_ssl
|
107
|
+
|
108
|
+
@s3 = AWS::S3.new(options)
|
109
|
+
@bucket = @s3.buckets[@s3_bucket]
|
110
|
+
|
111
|
+
check_apikeys if @check_apikey_on_start
|
112
|
+
ensure_bucket
|
113
|
+
end
|
114
|
+
|
115
|
+
def format(tag, time, record)
|
116
|
+
@formatter.format(tag, time, record)
|
117
|
+
end
|
118
|
+
|
119
|
+
def write(chunk)
|
120
|
+
i = 0
|
121
|
+
previous_path = nil
|
122
|
+
|
123
|
+
begin
|
124
|
+
path = @path_slicer.call(@path)
|
125
|
+
values_for_s3_object_key = {
|
126
|
+
"path" => path,
|
127
|
+
"time_slice" => chunk.key,
|
128
|
+
"file_extension" => @ext,
|
129
|
+
"index" => i
|
130
|
+
}
|
131
|
+
s3path = @s3_object_key_format.gsub(%r(%{[^}]+})) { |expr|
|
132
|
+
values_for_s3_object_key[expr[2...expr.size-1]]
|
133
|
+
}
|
134
|
+
if (i > 0) && (s3path == previous_path)
|
135
|
+
raise "duplicated path is generated. use %{index} in s3_object_key_format: path = #{s3path}"
|
136
|
+
end
|
137
|
+
|
138
|
+
i += 1
|
139
|
+
previous_path = s3path
|
140
|
+
end while @bucket.objects[s3path].exists?
|
141
|
+
|
142
|
+
tmp = Tempfile.new("s3-")
|
143
|
+
begin
|
144
|
+
if @store_as == "gzip"
|
145
|
+
w = Zlib::GzipWriter.new(tmp)
|
146
|
+
chunk.write_to(w)
|
147
|
+
w.close
|
148
|
+
elsif @store_as == "lzo"
|
149
|
+
w = Tempfile.new("chunk-tmp")
|
150
|
+
chunk.write_to(w)
|
151
|
+
w.close
|
152
|
+
tmp.close
|
153
|
+
# We don't check the return code because we can't recover lzop failure.
|
154
|
+
system "lzop #{@command_parameter} -o #{tmp.path} #{w.path}"
|
155
|
+
elsif @store_as == "lzma2"
|
156
|
+
w = Tempfile.new("chunk-xz-tmp")
|
157
|
+
chunk.write_to(w)
|
158
|
+
w.close
|
159
|
+
tmp.close
|
160
|
+
system "xz #{@command_parameter} -c #{w.path} > #{tmp.path}"
|
161
|
+
else
|
162
|
+
chunk.write_to(tmp)
|
163
|
+
tmp.close
|
164
|
+
end
|
165
|
+
@bucket.objects[s3path].write(Pathname.new(tmp.path), {:content_type => @mime_type,
|
166
|
+
:reduced_redundancy => @reduced_redundancy})
|
167
|
+
ensure
|
168
|
+
tmp.close(true) rescue nil
|
169
|
+
w.close rescue nil
|
170
|
+
w.unlink rescue nil
|
171
|
+
end
|
172
|
+
end
|
173
|
+
|
174
|
+
private
|
175
|
+
|
176
|
+
def ensure_bucket
|
177
|
+
if !@bucket.exists?
|
178
|
+
if @auto_create_bucket
|
179
|
+
log.info "Creating bucket #{@s3_bucket} on #{@s3_endpoint}"
|
180
|
+
@s3.buckets.create(@s3_bucket)
|
181
|
+
else
|
182
|
+
raise "The specified bucket does not exist: bucket = #{@s3_bucket}"
|
183
|
+
end
|
184
|
+
end
|
185
|
+
end
|
186
|
+
|
187
|
+
def check_apikeys
|
188
|
+
@bucket.empty?
|
189
|
+
rescue
|
190
|
+
raise "aws_key_id or aws_sec_key is invalid. Please check your configuration"
|
191
|
+
end
|
192
|
+
|
193
|
+
def check_command(command, algo)
|
194
|
+
begin
|
195
|
+
Open3.capture3("#{command} -V")
|
196
|
+
rescue Errno::ENOENT
|
197
|
+
raise ConfigError, "'#{command}' utility must be in PATH for #{algo} compression"
|
198
|
+
end
|
199
|
+
end
|
200
|
+
end
|
201
|
+
end
|
data/test/test_out_s3.rb
ADDED
@@ -0,0 +1,322 @@
|
|
1
|
+
require 'fluent/test'
|
2
|
+
require 'fluent/plugin/out_s3'
|
3
|
+
|
4
|
+
require 'flexmock/test_unit'
|
5
|
+
require 'zlib'
|
6
|
+
|
7
|
+
class S3OutputTest < Test::Unit::TestCase
|
8
|
+
def setup
|
9
|
+
require 'aws-sdk'
|
10
|
+
Fluent::Test.setup
|
11
|
+
end
|
12
|
+
|
13
|
+
CONFIG = %[
|
14
|
+
aws_key_id test_key_id
|
15
|
+
aws_sec_key test_sec_key
|
16
|
+
s3_bucket test_bucket
|
17
|
+
path log
|
18
|
+
utc
|
19
|
+
buffer_type memory
|
20
|
+
]
|
21
|
+
|
22
|
+
def create_driver(conf = CONFIG)
|
23
|
+
Fluent::Test::BufferedOutputTestDriver.new(Fluent::S3Output) do
|
24
|
+
def write(chunk)
|
25
|
+
chunk.read
|
26
|
+
end
|
27
|
+
|
28
|
+
private
|
29
|
+
|
30
|
+
def ensure_bucket
|
31
|
+
end
|
32
|
+
|
33
|
+
def check_apikeys
|
34
|
+
end
|
35
|
+
end.configure(conf)
|
36
|
+
end
|
37
|
+
|
38
|
+
def test_configure
|
39
|
+
d = create_driver
|
40
|
+
assert_equal 'test_key_id', d.instance.aws_key_id
|
41
|
+
assert_equal 'test_sec_key', d.instance.aws_sec_key
|
42
|
+
assert_equal 'test_bucket', d.instance.s3_bucket
|
43
|
+
assert_equal 'log', d.instance.path
|
44
|
+
assert d.instance.instance_variable_get(:@use_ssl)
|
45
|
+
assert_equal 'gz', d.instance.instance_variable_get(:@ext)
|
46
|
+
assert_equal 'application/x-gzip', d.instance.instance_variable_get(:@mime_type)
|
47
|
+
end
|
48
|
+
|
49
|
+
def test_configure_with_mime_type_json
|
50
|
+
conf = CONFIG.clone
|
51
|
+
conf << "\nstore_as json\n"
|
52
|
+
d = create_driver(conf)
|
53
|
+
assert_equal 'json', d.instance.instance_variable_get(:@ext)
|
54
|
+
assert_equal 'application/json', d.instance.instance_variable_get(:@mime_type)
|
55
|
+
end
|
56
|
+
|
57
|
+
def test_configure_with_mime_type_text
|
58
|
+
conf = CONFIG.clone
|
59
|
+
conf << "\nstore_as text\n"
|
60
|
+
d = create_driver(conf)
|
61
|
+
assert_equal 'txt', d.instance.instance_variable_get(:@ext)
|
62
|
+
assert_equal 'text/plain', d.instance.instance_variable_get(:@mime_type)
|
63
|
+
end
|
64
|
+
|
65
|
+
def test_configure_with_mime_type_lzo
|
66
|
+
conf = CONFIG.clone
|
67
|
+
conf << "\nstore_as lzo\n"
|
68
|
+
d = create_driver(conf)
|
69
|
+
assert_equal 'lzo', d.instance.instance_variable_get(:@ext)
|
70
|
+
assert_equal 'application/x-lzop', d.instance.instance_variable_get(:@mime_type)
|
71
|
+
rescue => e
|
72
|
+
# TODO: replace code with disable lzop command
|
73
|
+
assert(e.is_a?(Fluent::ConfigError))
|
74
|
+
end
|
75
|
+
|
76
|
+
def test_path_slicing
|
77
|
+
config = CONFIG.clone.gsub(/path\slog/, "path log/%Y/%m/%d")
|
78
|
+
d = create_driver(config)
|
79
|
+
path_slicer = d.instance.instance_variable_get(:@path_slicer)
|
80
|
+
path = d.instance.instance_variable_get(:@path)
|
81
|
+
slice = path_slicer.call(path)
|
82
|
+
assert_equal slice, Time.now.utc.strftime("log/%Y/%m/%d")
|
83
|
+
end
|
84
|
+
|
85
|
+
def test_path_slicing_utc
|
86
|
+
config = CONFIG.clone.gsub(/path\slog/, "path log/%Y/%m/%d")
|
87
|
+
config << "\nutc\n"
|
88
|
+
d = create_driver(config)
|
89
|
+
path_slicer = d.instance.instance_variable_get(:@path_slicer)
|
90
|
+
path = d.instance.instance_variable_get(:@path)
|
91
|
+
slice = path_slicer.call(path)
|
92
|
+
assert_equal slice, Time.now.utc.strftime("log/%Y/%m/%d")
|
93
|
+
end
|
94
|
+
|
95
|
+
def test_format
|
96
|
+
d = create_driver
|
97
|
+
|
98
|
+
time = Time.parse("2011-01-02 13:14:15 UTC").to_i
|
99
|
+
d.emit({"a"=>1}, time)
|
100
|
+
d.emit({"a"=>2}, time)
|
101
|
+
|
102
|
+
d.expect_format %[2011-01-02T13:14:15Z\ttest\t{"a":1}\n]
|
103
|
+
d.expect_format %[2011-01-02T13:14:15Z\ttest\t{"a":2}\n]
|
104
|
+
|
105
|
+
d.run
|
106
|
+
end
|
107
|
+
|
108
|
+
def test_format_included_tag_and_time
|
109
|
+
config = [CONFIG, 'include_tag_key true', 'include_time_key true'].join("\n")
|
110
|
+
d = create_driver(config)
|
111
|
+
|
112
|
+
time = Time.parse("2011-01-02 13:14:15 UTC").to_i
|
113
|
+
d.emit({"a"=>1}, time)
|
114
|
+
d.emit({"a"=>2}, time)
|
115
|
+
|
116
|
+
d.expect_format %[2011-01-02T13:14:15Z\ttest\t{"a":1,"tag":"test","time":"2011-01-02T13:14:15Z"}\n]
|
117
|
+
d.expect_format %[2011-01-02T13:14:15Z\ttest\t{"a":2,"tag":"test","time":"2011-01-02T13:14:15Z"}\n]
|
118
|
+
|
119
|
+
d.run
|
120
|
+
end
|
121
|
+
|
122
|
+
def test_format_with_format_ltsv
|
123
|
+
config = [CONFIG, 'format ltsv'].join("\n")
|
124
|
+
d = create_driver(config)
|
125
|
+
|
126
|
+
time = Time.parse("2011-01-02 13:14:15 UTC").to_i
|
127
|
+
d.emit({"a"=>1, "b"=>1}, time)
|
128
|
+
d.emit({"a"=>2, "b"=>2}, time)
|
129
|
+
|
130
|
+
d.expect_format %[a:1\tb:1\n]
|
131
|
+
d.expect_format %[a:2\tb:2\n]
|
132
|
+
|
133
|
+
d.run
|
134
|
+
end
|
135
|
+
|
136
|
+
def test_format_with_format_json
|
137
|
+
config = [CONFIG, 'format json'].join("\n")
|
138
|
+
d = create_driver(config)
|
139
|
+
|
140
|
+
time = Time.parse("2011-01-02 13:14:15 UTC").to_i
|
141
|
+
d.emit({"a"=>1}, time)
|
142
|
+
d.emit({"a"=>2}, time)
|
143
|
+
|
144
|
+
d.expect_format %[{"a":1}\n]
|
145
|
+
d.expect_format %[{"a":2}\n]
|
146
|
+
|
147
|
+
d.run
|
148
|
+
end
|
149
|
+
|
150
|
+
def test_format_with_format_json_deprecated
|
151
|
+
config = [CONFIG, 'format_json true'].join("\n")
|
152
|
+
d = create_driver(config)
|
153
|
+
|
154
|
+
time = Time.parse("2011-01-02 13:14:15 UTC").to_i
|
155
|
+
d.emit({"a"=>1}, time)
|
156
|
+
d.emit({"a"=>2}, time)
|
157
|
+
|
158
|
+
d.expect_format %[{"a":1}\n]
|
159
|
+
d.expect_format %[{"a":2}\n]
|
160
|
+
|
161
|
+
d.run
|
162
|
+
end
|
163
|
+
|
164
|
+
def test_format_with_format_json_included_tag
|
165
|
+
config = [CONFIG, 'format_json true', 'include_tag_key true'].join("\n")
|
166
|
+
d = create_driver(config)
|
167
|
+
|
168
|
+
time = Time.parse("2011-01-02 13:14:15 UTC").to_i
|
169
|
+
d.emit({"a"=>1}, time)
|
170
|
+
d.emit({"a"=>2}, time)
|
171
|
+
|
172
|
+
d.expect_format %[{"a":1,"tag":"test"}\n]
|
173
|
+
d.expect_format %[{"a":2,"tag":"test"}\n]
|
174
|
+
|
175
|
+
d.run
|
176
|
+
end
|
177
|
+
|
178
|
+
def test_format_with_format_json_included_time
|
179
|
+
config = [CONFIG, 'format json', 'include_time_key true'].join("\n")
|
180
|
+
d = create_driver(config)
|
181
|
+
|
182
|
+
time = Time.parse("2011-01-02 13:14:15 UTC").to_i
|
183
|
+
d.emit({"a"=>1}, time)
|
184
|
+
d.emit({"a"=>2}, time)
|
185
|
+
|
186
|
+
d.expect_format %[{"a":1,"time":"2011-01-02T13:14:15Z"}\n]
|
187
|
+
d.expect_format %[{"a":2,"time":"2011-01-02T13:14:15Z"}\n]
|
188
|
+
|
189
|
+
d.run
|
190
|
+
end
|
191
|
+
|
192
|
+
def test_format_with_format_json_included_tag_and_time
|
193
|
+
config = [CONFIG, 'format json', 'include_tag_key true', 'include_time_key true'].join("\n")
|
194
|
+
d = create_driver(config)
|
195
|
+
|
196
|
+
time = Time.parse("2011-01-02 13:14:15 UTC").to_i
|
197
|
+
d.emit({"a"=>1}, time)
|
198
|
+
d.emit({"a"=>2}, time)
|
199
|
+
|
200
|
+
d.expect_format %[{"a":1,"tag":"test","time":"2011-01-02T13:14:15Z"}\n]
|
201
|
+
d.expect_format %[{"a":2,"tag":"test","time":"2011-01-02T13:14:15Z"}\n]
|
202
|
+
|
203
|
+
d.run
|
204
|
+
end
|
205
|
+
|
206
|
+
def test_chunk_to_write
|
207
|
+
d = create_driver
|
208
|
+
|
209
|
+
time = Time.parse("2011-01-02 13:14:15 UTC").to_i
|
210
|
+
d.emit({"a"=>1}, time)
|
211
|
+
d.emit({"a"=>2}, time)
|
212
|
+
|
213
|
+
# S3OutputTest#write returns chunk.read
|
214
|
+
data = d.run
|
215
|
+
|
216
|
+
assert_equal %[2011-01-02T13:14:15Z\ttest\t{"a":1}\n] +
|
217
|
+
%[2011-01-02T13:14:15Z\ttest\t{"a":2}\n],
|
218
|
+
data
|
219
|
+
end
|
220
|
+
|
221
|
+
CONFIG2 = %[
|
222
|
+
hostname testing.node.local
|
223
|
+
aws_key_id test_key_id
|
224
|
+
aws_sec_key test_sec_key
|
225
|
+
s3_bucket test_bucket
|
226
|
+
s3_object_key_format %{path}/events/ts=%{time_slice}/events_%{index}-%{hostname}.%{file_extension}
|
227
|
+
time_slice_format %Y%m%d-%H
|
228
|
+
path log
|
229
|
+
utc
|
230
|
+
buffer_type memory
|
231
|
+
auto_create_bucket false
|
232
|
+
log_level debug
|
233
|
+
]
|
234
|
+
|
235
|
+
def create_time_sliced_driver(additional_conf = '')
|
236
|
+
d = Fluent::Test::TimeSlicedOutputTestDriver.new(Fluent::S3Output) do
|
237
|
+
private
|
238
|
+
|
239
|
+
def check_apikeys
|
240
|
+
end
|
241
|
+
end.configure([CONFIG2, additional_conf].join("\n"))
|
242
|
+
d
|
243
|
+
end
|
244
|
+
|
245
|
+
def test_write_with_custom_s3_object_key_format
|
246
|
+
# Assert content of event logs which are being sent to S3
|
247
|
+
s3obj = flexmock(AWS::S3::S3Object)
|
248
|
+
s3obj.should_receive(:exists?).with_any_args.and_return { false }
|
249
|
+
s3obj.should_receive(:write).with(
|
250
|
+
on { |pathname|
|
251
|
+
data = nil
|
252
|
+
# Event logs are compressed in GZip
|
253
|
+
pathname.open { |f|
|
254
|
+
gz = Zlib::GzipReader.new(f)
|
255
|
+
data = gz.read
|
256
|
+
gz.close
|
257
|
+
}
|
258
|
+
assert_equal %[2011-01-02T13:14:15Z\ttest\t{"a":1}\n] +
|
259
|
+
%[2011-01-02T13:14:15Z\ttest\t{"a":2}\n],
|
260
|
+
data
|
261
|
+
|
262
|
+
pathname.to_s.match(%r|s3-|)
|
263
|
+
},
|
264
|
+
{:content_type => "application/x-gzip", :reduced_redundancy => false})
|
265
|
+
|
266
|
+
# Assert the key of S3Object, which event logs are stored in
|
267
|
+
s3obj_col = flexmock(AWS::S3::ObjectCollection)
|
268
|
+
s3obj_col.should_receive(:[]).with(
|
269
|
+
on { |key|
|
270
|
+
key == "log/events/ts=20110102-13/events_0-testing.node.local.gz"
|
271
|
+
}).
|
272
|
+
and_return {
|
273
|
+
s3obj
|
274
|
+
}
|
275
|
+
|
276
|
+
# Partial mock the S3Bucket, not to make an actual connection to Amazon S3
|
277
|
+
s3bucket, _ = setup_mocks(true)
|
278
|
+
s3bucket.should_receive(:objects).with_any_args.and_return { s3obj_col }
|
279
|
+
|
280
|
+
# We must use TimeSlicedOutputTestDriver instead of BufferedOutputTestDriver,
|
281
|
+
# to make assertions on chunks' keys
|
282
|
+
d = create_time_sliced_driver
|
283
|
+
|
284
|
+
time = Time.parse("2011-01-02 13:14:15 UTC").to_i
|
285
|
+
d.emit({"a"=>1}, time)
|
286
|
+
d.emit({"a"=>2}, time)
|
287
|
+
|
288
|
+
# Finally, the instance of S3Output is initialized and then invoked
|
289
|
+
d.run
|
290
|
+
end
|
291
|
+
|
292
|
+
def setup_mocks(exists_return = false)
|
293
|
+
s3bucket = flexmock(AWS::S3::Bucket)
|
294
|
+
s3bucket.should_receive(:exists?).with_any_args.and_return { exists_return }
|
295
|
+
s3bucket_col = flexmock(AWS::S3::BucketCollection)
|
296
|
+
s3bucket_col.should_receive(:[]).with_any_args.and_return { s3bucket }
|
297
|
+
flexmock(AWS::S3).new_instances do |bucket|
|
298
|
+
bucket.should_receive(:buckets).with_any_args.and_return { s3bucket_col }
|
299
|
+
end
|
300
|
+
|
301
|
+
return s3bucket, s3bucket_col
|
302
|
+
end
|
303
|
+
|
304
|
+
def test_auto_create_bucket_false_with_non_existence_bucket
|
305
|
+
s3bucket, s3bucket_col = setup_mocks
|
306
|
+
|
307
|
+
d = create_time_sliced_driver('auto_create_bucket false')
|
308
|
+
assert_raise(RuntimeError, "The specified bucket does not exist: bucket = test_bucket") {
|
309
|
+
d.run
|
310
|
+
}
|
311
|
+
end
|
312
|
+
|
313
|
+
def test_auto_create_bucket_true_with_non_existence_bucket
|
314
|
+
s3bucket, s3bucket_col = setup_mocks
|
315
|
+
s3bucket_col.should_receive(:create).with_any_args.and_return { true }
|
316
|
+
|
317
|
+
d = create_time_sliced_driver('auto_create_bucket true')
|
318
|
+
assert_nothing_raised {
|
319
|
+
d.run
|
320
|
+
}
|
321
|
+
end
|
322
|
+
end
|
metadata
ADDED
@@ -0,0 +1,137 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: fluent-plugin-s3-fork
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.4.2
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- Hayato Tomoda
|
8
|
+
autorequire:
|
9
|
+
bindir: bin
|
10
|
+
cert_chain: []
|
11
|
+
date: 2014-10-29 00:00:00.000000000 Z
|
12
|
+
dependencies:
|
13
|
+
- !ruby/object:Gem::Dependency
|
14
|
+
name: fluentd
|
15
|
+
requirement: !ruby/object:Gem::Requirement
|
16
|
+
requirements:
|
17
|
+
- - "~>"
|
18
|
+
- !ruby/object:Gem::Version
|
19
|
+
version: 0.10.49
|
20
|
+
type: :runtime
|
21
|
+
prerelease: false
|
22
|
+
version_requirements: !ruby/object:Gem::Requirement
|
23
|
+
requirements:
|
24
|
+
- - "~>"
|
25
|
+
- !ruby/object:Gem::Version
|
26
|
+
version: 0.10.49
|
27
|
+
- !ruby/object:Gem::Dependency
|
28
|
+
name: aws-sdk
|
29
|
+
requirement: !ruby/object:Gem::Requirement
|
30
|
+
requirements:
|
31
|
+
- - "~>"
|
32
|
+
- !ruby/object:Gem::Version
|
33
|
+
version: '1.38'
|
34
|
+
type: :runtime
|
35
|
+
prerelease: false
|
36
|
+
version_requirements: !ruby/object:Gem::Requirement
|
37
|
+
requirements:
|
38
|
+
- - "~>"
|
39
|
+
- !ruby/object:Gem::Version
|
40
|
+
version: '1.38'
|
41
|
+
- !ruby/object:Gem::Dependency
|
42
|
+
name: yajl-ruby
|
43
|
+
requirement: !ruby/object:Gem::Requirement
|
44
|
+
requirements:
|
45
|
+
- - "~>"
|
46
|
+
- !ruby/object:Gem::Version
|
47
|
+
version: '1.0'
|
48
|
+
type: :runtime
|
49
|
+
prerelease: false
|
50
|
+
version_requirements: !ruby/object:Gem::Requirement
|
51
|
+
requirements:
|
52
|
+
- - "~>"
|
53
|
+
- !ruby/object:Gem::Version
|
54
|
+
version: '1.0'
|
55
|
+
- !ruby/object:Gem::Dependency
|
56
|
+
name: fluent-mixin-config-placeholders
|
57
|
+
requirement: !ruby/object:Gem::Requirement
|
58
|
+
requirements:
|
59
|
+
- - ">="
|
60
|
+
- !ruby/object:Gem::Version
|
61
|
+
version: 0.3.0
|
62
|
+
type: :runtime
|
63
|
+
prerelease: false
|
64
|
+
version_requirements: !ruby/object:Gem::Requirement
|
65
|
+
requirements:
|
66
|
+
- - ">="
|
67
|
+
- !ruby/object:Gem::Version
|
68
|
+
version: 0.3.0
|
69
|
+
- !ruby/object:Gem::Dependency
|
70
|
+
name: rake
|
71
|
+
requirement: !ruby/object:Gem::Requirement
|
72
|
+
requirements:
|
73
|
+
- - ">="
|
74
|
+
- !ruby/object:Gem::Version
|
75
|
+
version: 0.9.2
|
76
|
+
type: :development
|
77
|
+
prerelease: false
|
78
|
+
version_requirements: !ruby/object:Gem::Requirement
|
79
|
+
requirements:
|
80
|
+
- - ">="
|
81
|
+
- !ruby/object:Gem::Version
|
82
|
+
version: 0.9.2
|
83
|
+
- !ruby/object:Gem::Dependency
|
84
|
+
name: flexmock
|
85
|
+
requirement: !ruby/object:Gem::Requirement
|
86
|
+
requirements:
|
87
|
+
- - ">="
|
88
|
+
- !ruby/object:Gem::Version
|
89
|
+
version: 1.2.0
|
90
|
+
type: :development
|
91
|
+
prerelease: false
|
92
|
+
version_requirements: !ruby/object:Gem::Requirement
|
93
|
+
requirements:
|
94
|
+
- - ">="
|
95
|
+
- !ruby/object:Gem::Version
|
96
|
+
version: 1.2.0
|
97
|
+
description: Amazon S3 output plugin for Fluentd event collector
|
98
|
+
email: tomodian@gmail.com
|
99
|
+
executables: []
|
100
|
+
extensions: []
|
101
|
+
extra_rdoc_files: []
|
102
|
+
files:
|
103
|
+
- ".travis.yml"
|
104
|
+
- AUTHORS
|
105
|
+
- ChangeLog
|
106
|
+
- Gemfile
|
107
|
+
- README.rdoc
|
108
|
+
- Rakefile
|
109
|
+
- VERSION
|
110
|
+
- fluent-plugin-s3.gemspec
|
111
|
+
- lib/fluent/plugin/out_s3.rb
|
112
|
+
- test/test_out_s3.rb
|
113
|
+
homepage: https://github.com/tomodian/fluent-plugin-s3
|
114
|
+
licenses: []
|
115
|
+
metadata: {}
|
116
|
+
post_install_message:
|
117
|
+
rdoc_options: []
|
118
|
+
require_paths:
|
119
|
+
- lib
|
120
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
121
|
+
requirements:
|
122
|
+
- - ">="
|
123
|
+
- !ruby/object:Gem::Version
|
124
|
+
version: '0'
|
125
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
126
|
+
requirements:
|
127
|
+
- - ">="
|
128
|
+
- !ruby/object:Gem::Version
|
129
|
+
version: '0'
|
130
|
+
requirements: []
|
131
|
+
rubyforge_project:
|
132
|
+
rubygems_version: 2.2.2
|
133
|
+
signing_key:
|
134
|
+
specification_version: 4
|
135
|
+
summary: Amazon S3 output plugin for Fluentd event collector
|
136
|
+
test_files:
|
137
|
+
- test/test_out_s3.rb
|