fluent-plugin-buffered-metrics 0.0.1
Sign up to get free protection for your applications and to get access to all the features.
- data.tar.gz.sig +0 -0
- data/.gitignore +4 -0
- data/LICENSE +13 -0
- data/LICENSE-Apache-2.0.txt +202 -0
- data/README.md +117 -0
- data/certs/oss@hotschedules.com.cert +25 -0
- data/fluent-plugin-buffered-metrics.gemspec +20 -0
- data/lib/fluent/metrics_backends.rb +201 -0
- data/lib/fluent/plugin/out_buffered_metrics.rb +176 -0
- metadata +114 -0
- metadata.gz.sig +0 -0
data.tar.gz.sig
ADDED
Binary file
|
data/.gitignore
ADDED
data/LICENSE
ADDED
@@ -0,0 +1,13 @@
|
|
1
|
+
Copyright 2017 Red Book Connect LLC. operating as HotSchedules
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
@@ -0,0 +1,202 @@
|
|
1
|
+
|
2
|
+
Apache License
|
3
|
+
Version 2.0, January 2004
|
4
|
+
http://www.apache.org/licenses/
|
5
|
+
|
6
|
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
7
|
+
|
8
|
+
1. Definitions.
|
9
|
+
|
10
|
+
"License" shall mean the terms and conditions for use, reproduction,
|
11
|
+
and distribution as defined by Sections 1 through 9 of this document.
|
12
|
+
|
13
|
+
"Licensor" shall mean the copyright owner or entity authorized by
|
14
|
+
the copyright owner that is granting the License.
|
15
|
+
|
16
|
+
"Legal Entity" shall mean the union of the acting entity and all
|
17
|
+
other entities that control, are controlled by, or are under common
|
18
|
+
control with that entity. For the purposes of this definition,
|
19
|
+
"control" means (i) the power, direct or indirect, to cause the
|
20
|
+
direction or management of such entity, whether by contract or
|
21
|
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
22
|
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
23
|
+
|
24
|
+
"You" (or "Your") shall mean an individual or Legal Entity
|
25
|
+
exercising permissions granted by this License.
|
26
|
+
|
27
|
+
"Source" form shall mean the preferred form for making modifications,
|
28
|
+
including but not limited to software source code, documentation
|
29
|
+
source, and configuration files.
|
30
|
+
|
31
|
+
"Object" form shall mean any form resulting from mechanical
|
32
|
+
transformation or translation of a Source form, including but
|
33
|
+
not limited to compiled object code, generated documentation,
|
34
|
+
and conversions to other media types.
|
35
|
+
|
36
|
+
"Work" shall mean the work of authorship, whether in Source or
|
37
|
+
Object form, made available under the License, as indicated by a
|
38
|
+
copyright notice that is included in or attached to the work
|
39
|
+
(an example is provided in the Appendix below).
|
40
|
+
|
41
|
+
"Derivative Works" shall mean any work, whether in Source or Object
|
42
|
+
form, that is based on (or derived from) the Work and for which the
|
43
|
+
editorial revisions, annotations, elaborations, or other modifications
|
44
|
+
represent, as a whole, an original work of authorship. For the purposes
|
45
|
+
of this License, Derivative Works shall not include works that remain
|
46
|
+
separable from, or merely link (or bind by name) to the interfaces of,
|
47
|
+
the Work and Derivative Works thereof.
|
48
|
+
|
49
|
+
"Contribution" shall mean any work of authorship, including
|
50
|
+
the original version of the Work and any modifications or additions
|
51
|
+
to that Work or Derivative Works thereof, that is intentionally
|
52
|
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
53
|
+
or by an individual or Legal Entity authorized to submit on behalf of
|
54
|
+
the copyright owner. For the purposes of this definition, "submitted"
|
55
|
+
means any form of electronic, verbal, or written communication sent
|
56
|
+
to the Licensor or its representatives, including but not limited to
|
57
|
+
communication on electronic mailing lists, source code control systems,
|
58
|
+
and issue tracking systems that are managed by, or on behalf of, the
|
59
|
+
Licensor for the purpose of discussing and improving the Work, but
|
60
|
+
excluding communication that is conspicuously marked or otherwise
|
61
|
+
designated in writing by the copyright owner as "Not a Contribution."
|
62
|
+
|
63
|
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
64
|
+
on behalf of whom a Contribution has been received by Licensor and
|
65
|
+
subsequently incorporated within the Work.
|
66
|
+
|
67
|
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
68
|
+
this License, each Contributor hereby grants to You a perpetual,
|
69
|
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
70
|
+
copyright license to reproduce, prepare Derivative Works of,
|
71
|
+
publicly display, publicly perform, sublicense, and distribute the
|
72
|
+
Work and such Derivative Works in Source or Object form.
|
73
|
+
|
74
|
+
3. Grant of Patent License. Subject to the terms and conditions of
|
75
|
+
this License, each Contributor hereby grants to You a perpetual,
|
76
|
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
77
|
+
(except as stated in this section) patent license to make, have made,
|
78
|
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
79
|
+
where such license applies only to those patent claims licensable
|
80
|
+
by such Contributor that are necessarily infringed by their
|
81
|
+
Contribution(s) alone or by combination of their Contribution(s)
|
82
|
+
with the Work to which such Contribution(s) was submitted. If You
|
83
|
+
institute patent litigation against any entity (including a
|
84
|
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
85
|
+
or a Contribution incorporated within the Work constitutes direct
|
86
|
+
or contributory patent infringement, then any patent licenses
|
87
|
+
granted to You under this License for that Work shall terminate
|
88
|
+
as of the date such litigation is filed.
|
89
|
+
|
90
|
+
4. Redistribution. You may reproduce and distribute copies of the
|
91
|
+
Work or Derivative Works thereof in any medium, with or without
|
92
|
+
modifications, and in Source or Object form, provided that You
|
93
|
+
meet the following conditions:
|
94
|
+
|
95
|
+
(a) You must give any other recipients of the Work or
|
96
|
+
Derivative Works a copy of this License; and
|
97
|
+
|
98
|
+
(b) You must cause any modified files to carry prominent notices
|
99
|
+
stating that You changed the files; and
|
100
|
+
|
101
|
+
(c) You must retain, in the Source form of any Derivative Works
|
102
|
+
that You distribute, all copyright, patent, trademark, and
|
103
|
+
attribution notices from the Source form of the Work,
|
104
|
+
excluding those notices that do not pertain to any part of
|
105
|
+
the Derivative Works; and
|
106
|
+
|
107
|
+
(d) If the Work includes a "NOTICE" text file as part of its
|
108
|
+
distribution, then any Derivative Works that You distribute must
|
109
|
+
include a readable copy of the attribution notices contained
|
110
|
+
within such NOTICE file, excluding those notices that do not
|
111
|
+
pertain to any part of the Derivative Works, in at least one
|
112
|
+
of the following places: within a NOTICE text file distributed
|
113
|
+
as part of the Derivative Works; within the Source form or
|
114
|
+
documentation, if provided along with the Derivative Works; or,
|
115
|
+
within a display generated by the Derivative Works, if and
|
116
|
+
wherever such third-party notices normally appear. The contents
|
117
|
+
of the NOTICE file are for informational purposes only and
|
118
|
+
do not modify the License. You may add Your own attribution
|
119
|
+
notices within Derivative Works that You distribute, alongside
|
120
|
+
or as an addendum to the NOTICE text from the Work, provided
|
121
|
+
that such additional attribution notices cannot be construed
|
122
|
+
as modifying the License.
|
123
|
+
|
124
|
+
You may add Your own copyright statement to Your modifications and
|
125
|
+
may provide additional or different license terms and conditions
|
126
|
+
for use, reproduction, or distribution of Your modifications, or
|
127
|
+
for any such Derivative Works as a whole, provided Your use,
|
128
|
+
reproduction, and distribution of the Work otherwise complies with
|
129
|
+
the conditions stated in this License.
|
130
|
+
|
131
|
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
132
|
+
any Contribution intentionally submitted for inclusion in the Work
|
133
|
+
by You to the Licensor shall be under the terms and conditions of
|
134
|
+
this License, without any additional terms or conditions.
|
135
|
+
Notwithstanding the above, nothing herein shall supersede or modify
|
136
|
+
the terms of any separate license agreement you may have executed
|
137
|
+
with Licensor regarding such Contributions.
|
138
|
+
|
139
|
+
6. Trademarks. This License does not grant permission to use the trade
|
140
|
+
names, trademarks, service marks, or product names of the Licensor,
|
141
|
+
except as required for reasonable and customary use in describing the
|
142
|
+
origin of the Work and reproducing the content of the NOTICE file.
|
143
|
+
|
144
|
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
145
|
+
agreed to in writing, Licensor provides the Work (and each
|
146
|
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
147
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
148
|
+
implied, including, without limitation, any warranties or conditions
|
149
|
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
150
|
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
151
|
+
appropriateness of using or redistributing the Work and assume any
|
152
|
+
risks associated with Your exercise of permissions under this License.
|
153
|
+
|
154
|
+
8. Limitation of Liability. In no event and under no legal theory,
|
155
|
+
whether in tort (including negligence), contract, or otherwise,
|
156
|
+
unless required by applicable law (such as deliberate and grossly
|
157
|
+
negligent acts) or agreed to in writing, shall any Contributor be
|
158
|
+
liable to You for damages, including any direct, indirect, special,
|
159
|
+
incidental, or consequential damages of any character arising as a
|
160
|
+
result of this License or out of the use or inability to use the
|
161
|
+
Work (including but not limited to damages for loss of goodwill,
|
162
|
+
work stoppage, computer failure or malfunction, or any and all
|
163
|
+
other commercial damages or losses), even if such Contributor
|
164
|
+
has been advised of the possibility of such damages.
|
165
|
+
|
166
|
+
9. Accepting Warranty or Additional Liability. While redistributing
|
167
|
+
the Work or Derivative Works thereof, You may choose to offer,
|
168
|
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
169
|
+
or other liability obligations and/or rights consistent with this
|
170
|
+
License. However, in accepting such obligations, You may act only
|
171
|
+
on Your own behalf and on Your sole responsibility, not on behalf
|
172
|
+
of any other Contributor, and only if You agree to indemnify,
|
173
|
+
defend, and hold each Contributor harmless for any liability
|
174
|
+
incurred by, or claims asserted against, such Contributor by reason
|
175
|
+
of your accepting any such warranty or additional liability.
|
176
|
+
|
177
|
+
END OF TERMS AND CONDITIONS
|
178
|
+
|
179
|
+
APPENDIX: How to apply the Apache License to your work.
|
180
|
+
|
181
|
+
To apply the Apache License to your work, attach the following
|
182
|
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
183
|
+
replaced with your own identifying information. (Don't include
|
184
|
+
the brackets!) The text should be enclosed in the appropriate
|
185
|
+
comment syntax for the file format. We also recommend that a
|
186
|
+
file or class name and description of purpose be included on the
|
187
|
+
same "printed page" as the copyright notice for easier
|
188
|
+
identification within third-party archives.
|
189
|
+
|
190
|
+
Copyright [yyyy] [name of copyright owner]
|
191
|
+
|
192
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
193
|
+
you may not use this file except in compliance with the License.
|
194
|
+
You may obtain a copy of the License at
|
195
|
+
|
196
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
197
|
+
|
198
|
+
Unless required by applicable law or agreed to in writing, software
|
199
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
200
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
201
|
+
See the License for the specific language governing permissions and
|
202
|
+
limitations under the License.
|
data/README.md
ADDED
@@ -0,0 +1,117 @@
|
|
1
|
+
# Fluentd plugin for deriving metrics from output buffer chunks
|
2
|
+
|
3
|
+
## Overview
|
4
|
+
The buffered_metrics plugin started off as something originally written to write metrics to the old Stackdriver v1.0 API. The intended purpose is to be able to derive metrics on scheduled intervals, not in real time as individual events pass through the Fluentd processing pipeline. It is implemented as a subclass of the BufferedOutput class and is derives metrics from logs on a per-chunk basis, with the specified buffer flushing interval serving as the metrics collection and publishing interval. It is kind of hacky and relies on Ruby evals in an attempt to make the metrics derivations as generic and flexible as possible.
|
5
|
+
|
6
|
+
The metrics backend has been completely reworked as a set of subclasses in n effort to separate the metrics derivation from the metrics publishing. This was done in an attempt to avoid backend conversion work, going forward, since it should now be possible to swap a new metrics backend into place without having to touch the parsers.
|
7
|
+
|
8
|
+
Currently, the graphite backend is known to work and the statsd backend should work, but hasn't really been tested. However, the statsd backend has bene hard-coded to "count" data types, since buffered collection for statsd doesn't make a lot of sense since timestamps cannot be preserved.
|
9
|
+
|
10
|
+
## Installation
|
11
|
+
```bash
|
12
|
+
gem install fluent-plugin-buffered-metrics
|
13
|
+
```
|
14
|
+
|
15
|
+
If using the td-agent installation, use the following.
|
16
|
+
|
17
|
+
```bash
|
18
|
+
/opt/td-agent/embedded/bin/gem install fluent-plugin-buffered-metrics
|
19
|
+
```
|
20
|
+
|
21
|
+
## Configuration
|
22
|
+
|
23
|
+
Using this will probably require using the Fluentd copy output plugin. Using the Fluentd copy_ex plugin (fluent-plugin-copy_ex) is highly recommended so that output chains do not short-circuit prior to the metrics handling. Putting this output at the end of the chain is also highly recommended.
|
24
|
+
|
25
|
+
### Parameters
|
26
|
+
|
27
|
+
`metrics_backend`: name of the metrics backend, currently either 'graphite' or 'statsd' (defaults to 'graphite')
|
28
|
+
|
29
|
+
`url`: endpoint for metrics publication (defaults to 'tcp://localhost:2003' for graphite and 'udp://localhost:8125' for statsd)
|
30
|
+
|
31
|
+
`prefix`: used prepend all metric names (optional)
|
32
|
+
|
33
|
+
`sum_maps`: a JSON serialized hash where the keys are Ruby expressions which evaluate to either a Numeric or non-Numeric type and the values are arrays with the the names (Ruby expresssion which evalate to strings) of the metrics to add the value to, when Numeric. (optional)
|
34
|
+
|
35
|
+
`metric_maps`: a JSON serialized hash where the keys evaluate to Booleans ad the values evaluate to metric hashes (optional)
|
36
|
+
|
37
|
+
`counter_defaults`: a JSON serialized array contain default values for metrics which should be sent if there are no occurences of the metric in a buffer chunk (optional)
|
38
|
+
|
39
|
+
`metric_defaults`: a JSON serialized array contain default values for metrics which should be sent if there are no occurences of the metric in a buffer chunk (optional)
|
40
|
+
|
41
|
+
#### Notes
|
42
|
+
|
43
|
+
The `url` parameter, in particular, is not something which is typically used in plugins such as this. In particular, there is *no* hard-coded requirement for the endpoint to be a network socket, regardless of how the metrics backend is typically configured, with individual `host`, `port`, and `proto` parameters required. By using a single `url` parameter, any valid URL can be specified (so long as the handler has been implemented). For instancee, an URL such as "file:///var/tmp/test.out" could be used in any configuration, even if the metrics backend is typically never configured in this way. The primary reason for doing this is to allow for a simple way to debug configurations prior to putting them into live service without having to set up an ad hoc port listener or packet sniff the transmission just to see if the outputs are even in the correct format.
|
44
|
+
|
45
|
+
### Example configuration
|
46
|
+
|
47
|
+
Note that the following is at test configuration, appending to a file on the local filesystem prior to publishing anything to the live backend.
|
48
|
+
```
|
49
|
+
<match **>
|
50
|
+
@type copy_ex
|
51
|
+
<store ignore_error>
|
52
|
+
[your stand log output configuration(s)]
|
53
|
+
</store>
|
54
|
+
<store ignore_error>
|
55
|
+
@type buffered_metrics
|
56
|
+
metrics_backend graphite
|
57
|
+
prefix fluentd.<hostname>.5m
|
58
|
+
#url tcp://localhost:2003
|
59
|
+
url file:///var/tmp/graphite_test.out
|
60
|
+
sum_defaults [{"-.-.-.count":0},{"-.-.-.bytes":0}]
|
61
|
+
sum_maps {"event['record'].empty? ? false : 1":["-.-.-.count","(['tag','facility','level'].map {|t| event['record'][t] || '-'}+['count']).join('.')"],"event['record'].empty? ? false : event.to_s.length":["-.-.-.bytes","(['tag','facility','level'].map {|t| event['record'][t] || '-'}+['bytes']).join('.')"]}
|
62
|
+
metric_maps {}
|
63
|
+
metric_defaults []
|
64
|
+
flush_interval 5m
|
65
|
+
# Make this a working configuration
|
66
|
+
[standard BufferedOutput parameters]
|
67
|
+
</store>
|
68
|
+
</source>
|
69
|
+
```
|
70
|
+
|
71
|
+
Admittedly, the input specification is rather ugly, but there is really no "pretty" way to specify the inputs in a way which preserves the flexibility. The "maps" are intended to be a "data-driven programming" inputs, where keys are the matching conditions and the values are the actions on matches.
|
72
|
+
|
73
|
+
Note that events are processed using the following data structure. The following represents the data structure as JSON, but the event is *not* serialized for processing -- the processing is not based on regex parsing a JSON string serialization of the event. The Fluentd event metadata (`tag` and `timestamp`) are available, as well as the actual event record data in the `record` data structue.
|
74
|
+
|
75
|
+
```JSON
|
76
|
+
{
|
77
|
+
"tag": <Fluentd event tag>,
|
78
|
+
"timestamp": <Fluentd event timestamp>.
|
79
|
+
"record": { <keys/values> }
|
80
|
+
}
|
81
|
+
```
|
82
|
+
|
83
|
+
#### sum_maps:
|
84
|
+
```JSON
|
85
|
+
{
|
86
|
+
"event[record].empty? ? false : 1" : [
|
87
|
+
"-.-.-.count",
|
88
|
+
"([tag,facility,level].map {|t| event[record][t] || -}+[count]).join('.')"
|
89
|
+
],
|
90
|
+
"event[record].empty? ? false : event.to_s.length" : [
|
91
|
+
"-.-.-.bytes",
|
92
|
+
"([tag,facility,level].map {|t| event[record][t] || -}+[bytes]).join('.')"
|
93
|
+
]
|
94
|
+
}
|
95
|
+
```
|
96
|
+
|
97
|
+
The `event[record].empty? ? false : 1` key is a Ruby expression which evaluates to `false` (a Boolean type -- not Numeric) when it should do anything with this particular entry in the buffer chunk, and a `1` (an Integer type -- definitely Numeric). It may be a bit convoluted, but this implements counters (ie. the Numeric value can be anything, including values derived data in the event).
|
98
|
+
|
99
|
+
The first element in the array, `-.-.-.count` is not a Ruby expression, so it does not dynamically evaluate to anything. Every time the key evaulates to "1", the `<prefix>.-.-.-.count` is incremented.
|
100
|
+
|
101
|
+
The second element in the array, `([tag,facility,level].map {|t| event[record][t] || -}+[count]).join('.')`, is a Ruby expression, and the name will be dynamically set based on event data. In this case, all events are expected to have `tag`, `facility`, and `level` keys in the record. When Ruby evaluates this expression, the metric name becomes `<prefix>.<record tag value>.<record facility>.<record level>`.
|
102
|
+
|
103
|
+
So, even though there is only one event matching condition, two distinct metrics (a total, and a subtotal depending on `tag`, `facility`, and `level`) are incremented on every match. The second key, `event[record].empty? ? false : event.to_s.length`, acts in a similar way, except that the increment value is not the value "1" (it is not a counter), but rather the (approximate) size of the event (in bytes).
|
104
|
+
|
105
|
+
#### sum_defaults:
|
106
|
+
```JSON
|
107
|
+
[
|
108
|
+
{
|
109
|
+
"-.-.-.count" : 0
|
110
|
+
},
|
111
|
+
{
|
112
|
+
"-.-.-.bytes" : 0
|
113
|
+
}
|
114
|
+
]
|
115
|
+
```
|
116
|
+
|
117
|
+
The "defaults" specifications are needed if there are metrics which need to be sent with every metrics publication, even it none of the matches are met and the metric is never created during the run. This may be more of an artifact, as this was initially done for Stackdriver, which had real issues with gaps in metrics series.
|
@@ -0,0 +1,25 @@
|
|
1
|
+
-----BEGIN CERTIFICATE-----
|
2
|
+
MIIELzCCAxGgAwIBAgIJAMOoRmNuvAZcMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYD
|
3
|
+
VQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZyYW5j
|
4
|
+
aXNjbzEVMBMGA1UECgwMSG90c2NoZWR1bGVzMQ4wDAYDVQQLDAVJbmZyYTEfMB0G
|
5
|
+
A1UEAwwWaW5mcmEuaG90c2NoZWR1bGVzLmNvbTEjMCEGCSqGSIb3DQEJARYUb3Nz
|
6
|
+
QGhvdHNjaGVkdWxlcy5jb20wHhcNMTcwMjE4MDA1NDU4WhcNMjEwMjE4MDA1NDU4
|
7
|
+
WjCBpzELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM
|
8
|
+
DVNhbiBGcmFuY2lzY28xFTATBgNVBAoMDEhvdHNjaGVkdWxlczEOMAwGA1UECwwF
|
9
|
+
SW5mcmExHzAdBgNVBAMMFmluZnJhLmhvdHNjaGVkdWxlcy5jb20xIzAhBgkqhkiG
|
10
|
+
9w0BCQEWFG9zc0Bob3RzY2hlZHVsZXMuY29tMIIBKDANBgkqhkiG9w0BAQEFAAOC
|
11
|
+
ARUAMIIBEAKCAQcAutTytHjHFb5yK/29POqMS3DyxRSRRbKvBus/rEL3reeKDVlf
|
12
|
+
eO9a/9U3oUcxHFHeISF9uOBSvL88svxhLqxkKLVA0vMbVgDFoGsOlh93nA5Lmw+H
|
13
|
+
SEzG3+Z6JqH8YfAIqVyp1nZcdtC7u8xYpDD65ayjRALLuvOUXORGeqgedlgiwwLp
|
14
|
+
2tZ7A9tCzBZnDjcNenD8zXLCOdRjJiBPq/XQS9h4POWvpom0z+jyUkRj3ojqOoXW
|
15
|
+
bKKdXzdZ3s5BSwgxlWJo+lK/50+xLpjJe4mEJwySkrluy8GOG/UR/j+Y4I1j1FbY
|
16
|
+
RwT9SUSairjh0zgiDC0LTjAJBIenuxxuM9jhRyoiu+YYVwIDAQABo1AwTjAdBgNV
|
17
|
+
HQ4EFgQUtlFsvLIJ59o2GyoRTXKEIPEgnn8wHwYDVR0jBBgwFoAUtlFsvLIJ59o2
|
18
|
+
GyoRTXKEIPEgnn8wDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQcAdELJ
|
19
|
+
QdrR55Xak3q5IhLnIdOLJC6ZEdHI2173SSmytInyDX6fe9uZION4I9WCCm/hCp+t
|
20
|
+
QILh4feu14iDWmBxzy6F6tiyuEjhaU5Ud9wZ/YDBQhP2AHquoPAufxXZAUFlA5er
|
21
|
+
wIYVupToBrWavfbjgRqRHAZ6vwrLwoIiomh2/wv06HLrFmSzj5q6BVp7FFMKDail
|
22
|
+
QWzJ4aQl786RMwPa776eVaKQTmQuTUUppvhVq9QdKyxwGPibAYGbYKZwmmtx+BV2
|
23
|
+
GJevNV8BSuhh6m2AHCxEzMaD7WKNbewTTgGLUlvppaR9SE2V/yUZ7x1Vp8/mGeO6
|
24
|
+
2Zk648Ep9HVPKmwoVuB75+xEQw==
|
25
|
+
-----END CERTIFICATE-----
|
@@ -0,0 +1,20 @@
|
|
1
|
+
# -*- encoding: utf-8 -*-
|
2
|
+
|
3
|
+
Gem::Specification.new do |gem|
|
4
|
+
gem.name = 'fluent-plugin-buffered-metrics'
|
5
|
+
gem.version = ENV.key?('RUBYGEM_VERSION') ? ENV['RUBYGEM_VERSION'] : '0.0.1'
|
6
|
+
gem.authors = ['Alex Yamauchi']
|
7
|
+
gem.email = ['oss@hotschedules.com']
|
8
|
+
gem.homepage = 'https://github.com/hotschedules/fluent-plugin-buffered-metrics'
|
9
|
+
gem.description = %q{Fluentd plugin derive metrics from log buffer chunks and submit to various metrics backends}
|
10
|
+
gem.summary = %q{Fluentd plugin derive metrics from log buffer chunks and submit to various metrics backends at intervals determined by the buffer flushing frequency.}
|
11
|
+
gem.homepage = 'https://github.com/hotschedules/fluent-plugin-buffered-metrics'
|
12
|
+
gem.license = 'Apache-2.0'
|
13
|
+
gem.add_runtime_dependency 'fluentd', '>= 0.10.0'
|
14
|
+
gem.files = `git ls-files`.split("\n")
|
15
|
+
gem.executables = gem.files.grep(%r{^bin/}) { |f| File.basename(f) }
|
16
|
+
gem.test_files = gem.files.grep(%r{^(test|spec|features)/})
|
17
|
+
gem.require_paths = ['lib']
|
18
|
+
gem.signing_key = File.expand_path( ENV.key?('RUBYGEM_SIGNING_KEY') ? ENV['RUBYGEM_SIGNING_KEY'] : '~/certs/oss@hotschedules.com.key' ) if $0 =~ /\bgem[\.0-9]*\z/
|
19
|
+
gem.cert_chain = %w[certs/oss@hotschedules.com.cert]
|
20
|
+
end
|
@@ -0,0 +1,201 @@
|
|
1
|
+
module Fluent
|
2
|
+
|
3
|
+
class SocketLike
|
4
|
+
# Just a little something to try to make any possible output
|
5
|
+
# bakced act like a socket so we don't have to check the type
|
6
|
+
# and change method colls throughout the code.
|
7
|
+
|
8
|
+
def open
|
9
|
+
if @parameters.nil? or @parameters.empty?
|
10
|
+
raise RuntimeError, "SocketLike open called with no parameters set"
|
11
|
+
else
|
12
|
+
if @parameters['proto'] == 'tcp'
|
13
|
+
@socket = TCPSocket.new(
|
14
|
+
@parameters['host'],
|
15
|
+
@parameters['port']
|
16
|
+
)
|
17
|
+
elsif @parameters['proto'] == 'udp'
|
18
|
+
@socket = UDPSocket.new(
|
19
|
+
@parameters['host'],
|
20
|
+
@parameters['port']
|
21
|
+
)
|
22
|
+
elsif @parameters['proto'] == 'unix'
|
23
|
+
@socket = UNIXsocket.new(@parameters['path'])
|
24
|
+
elsif @parameters['proto'] == 'file'
|
25
|
+
@socket = File.new(@parameters['path'],'a')
|
26
|
+
elsif @parameters['proto'] =~ /^http/
|
27
|
+
@socket = Net::HTTP.new(@parameters['host'],@parameters['port'])
|
28
|
+
@socket.use_ssl = @parameters['proto'] == 'https'
|
29
|
+
else
|
30
|
+
raise ArgumentError, 'SocketLike class does not support protocol' + @parameters['proto']
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
def write(string)
|
36
|
+
if @parameters['proto'] =~ /^http/
|
37
|
+
req = Net::HTTP::Post.new(@parameters['path'])
|
38
|
+
req.body = string
|
39
|
+
@parameters['headers'].each do |h|
|
40
|
+
req.add_field(h[0],h[1])
|
41
|
+
end
|
42
|
+
#@socket.request(req) or raise IOError "Error writing to backend"
|
43
|
+
@socket.request(req)
|
44
|
+
else
|
45
|
+
#@socket.write(string) or raise IOError "Error writing to backend"
|
46
|
+
@socket.write(string)
|
47
|
+
end
|
48
|
+
end
|
49
|
+
|
50
|
+
def close
|
51
|
+
if @parameters['proto'] =~ /^http/
|
52
|
+
@socket.finish
|
53
|
+
else
|
54
|
+
@socket.close
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
def initialize(parameters)
|
59
|
+
@parameters = parameters
|
60
|
+
# Is this actually needed here?
|
61
|
+
@socket = nil
|
62
|
+
if @parameters['proto'] =~ /^http/
|
63
|
+
require 'net/http'
|
64
|
+
require 'net/https' if @paramters['proto'] == 'https'
|
65
|
+
end
|
66
|
+
end
|
67
|
+
|
68
|
+
end
|
69
|
+
|
70
|
+
class MetricsBackend
|
71
|
+
|
72
|
+
def get_connection_parameters_defaults
|
73
|
+
# This should be overriden in any any subclass.
|
74
|
+
return {}
|
75
|
+
end
|
76
|
+
|
77
|
+
def set_connection_parameters(url,headers = [])
|
78
|
+
@connection_parameters.merge!(get_connection_parameters_defaults)
|
79
|
+
@connection_parameters.merge!(
|
80
|
+
Hash[['proto','host','port','path'].zip(
|
81
|
+
url.match(/^([^:]*):\/\/([^:\/]*):?(\d*)(\/.*)?/).to_a.map {|e| e.nil? or e.empty? ? nil : e }[1..-1]
|
82
|
+
)]
|
83
|
+
) unless url.nil? or url.empty?
|
84
|
+
@connection_parameters['headers'] ||= []
|
85
|
+
@connection_parameters['headers'] += headers
|
86
|
+
end
|
87
|
+
|
88
|
+
def get_connection_parameters
|
89
|
+
@connection_parameters
|
90
|
+
end
|
91
|
+
|
92
|
+
def initialize(url = nil,headers = [])
|
93
|
+
@output_buffer = []
|
94
|
+
@connection_parameters = {}
|
95
|
+
set_connection_parameters(url, headers)
|
96
|
+
@connection = SocketLike.new(@connection_parameters)
|
97
|
+
end
|
98
|
+
|
99
|
+
def buffer_dump
|
100
|
+
# Allw this to be overridable to facitatte formats such as
|
101
|
+
# multiliine formats, or things JSON where there are
|
102
|
+
# punctuation differences depending upon position.
|
103
|
+
@output_buffer.join("\n") + "\n"
|
104
|
+
end
|
105
|
+
|
106
|
+
def buffer_flush
|
107
|
+
|
108
|
+
begin
|
109
|
+
@connection.open
|
110
|
+
@connection.write(buffer_dump)
|
111
|
+
rescue
|
112
|
+
raise
|
113
|
+
ensure
|
114
|
+
@connection.close
|
115
|
+
end
|
116
|
+
|
117
|
+
@output_buffer = []
|
118
|
+
end
|
119
|
+
|
120
|
+
def buffer?
|
121
|
+
not @output_buffer.empty?
|
122
|
+
end
|
123
|
+
|
124
|
+
def serialize_entry(entry,time)
|
125
|
+
raise NoMethodError, 'The serialize_entry method has not been specified.'
|
126
|
+
end
|
127
|
+
|
128
|
+
def serialize_array(data)
|
129
|
+
return if data.empty?
|
130
|
+
if data[0].is_a?(Hash)
|
131
|
+
return serialize_array_of_hashes(data)
|
132
|
+
elsif data[0].is_a?(Array)
|
133
|
+
return serialize_array_of_arrays(data)
|
134
|
+
else
|
135
|
+
raise ArgumentError, 'serialize_array method input must be of Hash or Array type'
|
136
|
+
end
|
137
|
+
end
|
138
|
+
|
139
|
+
def serialize(data)
|
140
|
+
if data.is_a?(Hash)
|
141
|
+
return serialize_hash(data)
|
142
|
+
elsif data.is_a(Array)
|
143
|
+
return serialize_array(data)
|
144
|
+
else
|
145
|
+
raise ArgumentError, 'serialize method input must be of Hash or Array type'
|
146
|
+
end
|
147
|
+
end
|
148
|
+
|
149
|
+
def buffer_append_array_of_values(data)
|
150
|
+
@output_buffer += data.map {|e| serialize_entry(e[0],e[1]) }
|
151
|
+
end
|
152
|
+
|
153
|
+
def buffer_append_array_of_hashes(data)
|
154
|
+
@output_buffer += data.map {|e| serialize_entry(e,e['time']) }
|
155
|
+
end
|
156
|
+
|
157
|
+
def buffer_append(data)
|
158
|
+
@output_buffer << serialize(data)
|
159
|
+
end
|
160
|
+
|
161
|
+
def buffer_append_entry(entry,time)
|
162
|
+
@output_buffer << serialize_entry(entry,time)
|
163
|
+
end
|
164
|
+
|
165
|
+
end
|
166
|
+
|
167
|
+
class MetricsBackendGraphite < MetricsBackend
|
168
|
+
|
169
|
+
def get_connection_parameters_defaults
|
170
|
+
return { 'proto' => 'tcp', 'host' => 'localhost', 'port' => 2003 }
|
171
|
+
end
|
172
|
+
|
173
|
+
def serialize_entry(entry,time)
|
174
|
+
return sprintf(
|
175
|
+
'%s%s %s %i',
|
176
|
+
entry.key?('prefix') ? entry['prefix'] + '.' : '',
|
177
|
+
entry['name'],entry['value'].to_s,
|
178
|
+
time.to_i
|
179
|
+
)
|
180
|
+
end
|
181
|
+
|
182
|
+
end
|
183
|
+
|
184
|
+
class MetricsBackendStatsd < MetricsBackend
|
185
|
+
|
186
|
+
def get_connection_parameters_defaults
|
187
|
+
return { 'proto' => 'udp', 'host' => 'localhost', 'port' => 8215 }
|
188
|
+
end
|
189
|
+
|
190
|
+
def serialize_entry(entry,time)
|
191
|
+
return sprintf(
|
192
|
+
'%s%s:%s|c',
|
193
|
+
entry.key?('prefix') ? entry['prefix'] + '.' : '',
|
194
|
+
entry['name'],
|
195
|
+
entry['value'].to_i
|
196
|
+
)
|
197
|
+
end
|
198
|
+
|
199
|
+
end
|
200
|
+
|
201
|
+
end
|
@@ -0,0 +1,176 @@
|
|
1
|
+
module Fluent
|
2
|
+
class BufferedMetricsOutput < BufferedOutput
|
3
|
+
|
4
|
+
Plugin.register_output('buffered_metrics', self)
|
5
|
+
|
6
|
+
unless method_defined?(:log)
|
7
|
+
define_method('log') { $log }
|
8
|
+
end
|
9
|
+
|
10
|
+
config_param :metrics_backend, :string, :default => 'graphite'
|
11
|
+
config_param :url, :string, :default => nil
|
12
|
+
config_param :http_headers, :array, :default => []
|
13
|
+
config_param :prefix, :string, :default => nil
|
14
|
+
config_param :instance_id, :string, :default => nil
|
15
|
+
config_param :sum_maps, :hash, :default => {}
|
16
|
+
config_param :sum_defaults, :array, :default => []
|
17
|
+
config_param :metric_maps, :hash, :default => {}
|
18
|
+
config_param :metric_defaults, :array, :default => []
|
19
|
+
|
20
|
+
# The following are overrides for the paramaters inherited from
|
21
|
+
# the superclass to them more sensible defaults. Since this is
|
22
|
+
# likely to be run farily frequently, don't allow for long waits.
|
23
|
+
config_param :retry_limit, :integer, :default => 4
|
24
|
+
config_param :retry_wait, :time, :default => 1.0
|
25
|
+
config_param :max_retry_wait, :time, :default => 5.0
|
26
|
+
|
27
|
+
def initialize
|
28
|
+
super
|
29
|
+
require 'fluent/metrics_backends'
|
30
|
+
end
|
31
|
+
|
32
|
+
def configure(conf)
|
33
|
+
super(conf) {
|
34
|
+
@url = conf.delete('url')
|
35
|
+
@http_headers = conf.delete('http_headers')
|
36
|
+
@metrics_backend = conf.delete('metrics_backend')
|
37
|
+
@prefix = conf.delete('prefix')
|
38
|
+
@sum_maps = conf.delete('sum_maps')
|
39
|
+
@sum_defaults = conf.delete('sum_defaults')
|
40
|
+
@metric_maps = conf.delete('metric_maps')
|
41
|
+
@metric_defaults = conf.delete('metric_defaults')
|
42
|
+
}
|
43
|
+
|
44
|
+
@base_entry = {}
|
45
|
+
|
46
|
+
unless @prefix.nil?
|
47
|
+
|
48
|
+
begin
|
49
|
+
@prefix = eval(@prefix) || eval('"'+@prefix+'"')
|
50
|
+
rescue
|
51
|
+
raise ArgumentError, "Error setting prefix from '#{@prefix}'"
|
52
|
+
end
|
53
|
+
|
54
|
+
@base_entry['prefix'] = @prefix unless @prefix.empty?
|
55
|
+
|
56
|
+
end
|
57
|
+
|
58
|
+
@sum_maps.each do |k,v|
|
59
|
+
@sum_maps[k] = [ v ] unless v.is_a?(Array)
|
60
|
+
end
|
61
|
+
|
62
|
+
@metric_maps.each do |k,v|
|
63
|
+
@metric_maps[k] = [ v ] unless v.is_a?(Array)
|
64
|
+
end
|
65
|
+
|
66
|
+
begin
|
67
|
+
backend_name = @metrics_backend
|
68
|
+
@metrics_backend = Object.const_get(
|
69
|
+
sprintf('Fluent::MetricsBackend%s',backend_name.capitalize)
|
70
|
+
).new(@url,@http_headers)
|
71
|
+
|
72
|
+
rescue => e
|
73
|
+
log.error "Error initializing metrics backend #{backend_name}"
|
74
|
+
raise e
|
75
|
+
end
|
76
|
+
|
77
|
+
end
|
78
|
+
|
79
|
+
def format(tag, time, record)
|
80
|
+
{ 'tag' => tag, 'time' => time, 'record' => record }.to_msgpack
|
81
|
+
end
|
82
|
+
|
83
|
+
def derive_metrics(chunk)
|
84
|
+
# The default timestamp really needs to set from the chunk
|
85
|
+
# metadata. However, there is no way to do this prior to
|
86
|
+
# v0.14. Putting the the VERSION conditional settitng, but
|
87
|
+
# I don't really have a way to test it, at the moment.
|
88
|
+
|
89
|
+
if chunk.methods.include?(/metadata/)
|
90
|
+
# This should work for v0.14 and above and is preferable.
|
91
|
+
timestamp = chunk.metadata.timekey.to_f
|
92
|
+
else
|
93
|
+
timestamp = Time.now.to_f
|
94
|
+
end
|
95
|
+
|
96
|
+
sum_data = {}
|
97
|
+
metric_data = {}
|
98
|
+
|
99
|
+
chunk.msgpack_each do |event|
|
100
|
+
@sum_maps.each do |k,v|
|
101
|
+
|
102
|
+
begin
|
103
|
+
incr = eval(k)
|
104
|
+
if incr.is_a?(Numeric)
|
105
|
+
v.each do |e|
|
106
|
+
begin
|
107
|
+
name = eval(e)
|
108
|
+
rescue Exception
|
109
|
+
name = eval('"'+e+'"')
|
110
|
+
end
|
111
|
+
sum_data[name] ||= 0
|
112
|
+
sum_data[name] += incr
|
113
|
+
end
|
114
|
+
end
|
115
|
+
rescue Exception => e
|
116
|
+
log.error "Failed to process sum_map (#{k},#{v}) for event: #{event}: #{e.trace}"
|
117
|
+
end
|
118
|
+
|
119
|
+
end
|
120
|
+
|
121
|
+
@metric_maps.each do |k,v|
|
122
|
+
|
123
|
+
begin
|
124
|
+
if eval(k)
|
125
|
+
v.each do |e|
|
126
|
+
val = eval(e)
|
127
|
+
if val.is_a?(Hash) and not val.empty?
|
128
|
+
@metrics_backend.buffer_append_entry(
|
129
|
+
@base_entry.merge(val),
|
130
|
+
event['time']
|
131
|
+
)
|
132
|
+
end
|
133
|
+
end
|
134
|
+
end
|
135
|
+
rescue Exception => e
|
136
|
+
log.error "Failed to process metric_map (#{k},#{v}) for event: #{event}: #{e.trace}"
|
137
|
+
end
|
138
|
+
|
139
|
+
end
|
140
|
+
|
141
|
+
end
|
142
|
+
|
143
|
+
@sum_defaults.each do |e|
|
144
|
+
if e.key?('name') and not e['name'].nil? and not e['name'].empty?
|
145
|
+
sum_data[e['name']] = e['value'] unless sum_data.key?(e['name'])
|
146
|
+
end
|
147
|
+
end
|
148
|
+
|
149
|
+
@metrics_backend.buffer_append_array_of_hashes(
|
150
|
+
sum_data.map {|name,value|
|
151
|
+
@base_entry.merge({ 'name' => name, 'value' => value, 'time' => timestamp })
|
152
|
+
}
|
153
|
+
)
|
154
|
+
|
155
|
+
@metrics_backend.buffer_append_array_of_hashes(
|
156
|
+
@metric_defaults.map {|e|
|
157
|
+
if e.key?('name') and not e['name'].nil? and not e['name'].empty?
|
158
|
+
@base_entry.merge(e).merge({'time' => timestamp}) unless metric_data.key?(e['name'])
|
159
|
+
end
|
160
|
+
}
|
161
|
+
)
|
162
|
+
|
163
|
+
end
|
164
|
+
|
165
|
+
def write(chunk)
|
166
|
+
|
167
|
+
# The superclass BufferedOutput provides the retry logic. If the
|
168
|
+
# buffer already has content this must be a retry after a failed
|
169
|
+
# flush, so don't re-scan the chunk for the metrics.
|
170
|
+
derive_metrics(chunk) unless @metrics_backend.buffer?
|
171
|
+
@metrics_backend.buffer_flush if @metrics_backend.buffer?
|
172
|
+
|
173
|
+
end
|
174
|
+
|
175
|
+
end
|
176
|
+
end
|
metadata
ADDED
@@ -0,0 +1,114 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: fluent-plugin-buffered-metrics
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
hash: 29
|
5
|
+
prerelease:
|
6
|
+
segments:
|
7
|
+
- 0
|
8
|
+
- 0
|
9
|
+
- 1
|
10
|
+
version: 0.0.1
|
11
|
+
platform: ruby
|
12
|
+
authors:
|
13
|
+
- Alex Yamauchi
|
14
|
+
autorequire:
|
15
|
+
bindir: bin
|
16
|
+
cert_chain:
|
17
|
+
- |
|
18
|
+
-----BEGIN CERTIFICATE-----
|
19
|
+
MIIELzCCAxGgAwIBAgIJAMOoRmNuvAZcMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYD
|
20
|
+
VQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZyYW5j
|
21
|
+
aXNjbzEVMBMGA1UECgwMSG90c2NoZWR1bGVzMQ4wDAYDVQQLDAVJbmZyYTEfMB0G
|
22
|
+
A1UEAwwWaW5mcmEuaG90c2NoZWR1bGVzLmNvbTEjMCEGCSqGSIb3DQEJARYUb3Nz
|
23
|
+
QGhvdHNjaGVkdWxlcy5jb20wHhcNMTcwMjE4MDA1NDU4WhcNMjEwMjE4MDA1NDU4
|
24
|
+
WjCBpzELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM
|
25
|
+
DVNhbiBGcmFuY2lzY28xFTATBgNVBAoMDEhvdHNjaGVkdWxlczEOMAwGA1UECwwF
|
26
|
+
SW5mcmExHzAdBgNVBAMMFmluZnJhLmhvdHNjaGVkdWxlcy5jb20xIzAhBgkqhkiG
|
27
|
+
9w0BCQEWFG9zc0Bob3RzY2hlZHVsZXMuY29tMIIBKDANBgkqhkiG9w0BAQEFAAOC
|
28
|
+
ARUAMIIBEAKCAQcAutTytHjHFb5yK/29POqMS3DyxRSRRbKvBus/rEL3reeKDVlf
|
29
|
+
eO9a/9U3oUcxHFHeISF9uOBSvL88svxhLqxkKLVA0vMbVgDFoGsOlh93nA5Lmw+H
|
30
|
+
SEzG3+Z6JqH8YfAIqVyp1nZcdtC7u8xYpDD65ayjRALLuvOUXORGeqgedlgiwwLp
|
31
|
+
2tZ7A9tCzBZnDjcNenD8zXLCOdRjJiBPq/XQS9h4POWvpom0z+jyUkRj3ojqOoXW
|
32
|
+
bKKdXzdZ3s5BSwgxlWJo+lK/50+xLpjJe4mEJwySkrluy8GOG/UR/j+Y4I1j1FbY
|
33
|
+
RwT9SUSairjh0zgiDC0LTjAJBIenuxxuM9jhRyoiu+YYVwIDAQABo1AwTjAdBgNV
|
34
|
+
HQ4EFgQUtlFsvLIJ59o2GyoRTXKEIPEgnn8wHwYDVR0jBBgwFoAUtlFsvLIJ59o2
|
35
|
+
GyoRTXKEIPEgnn8wDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQcAdELJ
|
36
|
+
QdrR55Xak3q5IhLnIdOLJC6ZEdHI2173SSmytInyDX6fe9uZION4I9WCCm/hCp+t
|
37
|
+
QILh4feu14iDWmBxzy6F6tiyuEjhaU5Ud9wZ/YDBQhP2AHquoPAufxXZAUFlA5er
|
38
|
+
wIYVupToBrWavfbjgRqRHAZ6vwrLwoIiomh2/wv06HLrFmSzj5q6BVp7FFMKDail
|
39
|
+
QWzJ4aQl786RMwPa776eVaKQTmQuTUUppvhVq9QdKyxwGPibAYGbYKZwmmtx+BV2
|
40
|
+
GJevNV8BSuhh6m2AHCxEzMaD7WKNbewTTgGLUlvppaR9SE2V/yUZ7x1Vp8/mGeO6
|
41
|
+
2Zk648Ep9HVPKmwoVuB75+xEQw==
|
42
|
+
-----END CERTIFICATE-----
|
43
|
+
|
44
|
+
date: 2018-04-05 00:00:00 Z
|
45
|
+
dependencies:
|
46
|
+
- !ruby/object:Gem::Dependency
|
47
|
+
name: fluentd
|
48
|
+
prerelease: false
|
49
|
+
requirement: &id001 !ruby/object:Gem::Requirement
|
50
|
+
none: false
|
51
|
+
requirements:
|
52
|
+
- - ">="
|
53
|
+
- !ruby/object:Gem::Version
|
54
|
+
hash: 55
|
55
|
+
segments:
|
56
|
+
- 0
|
57
|
+
- 10
|
58
|
+
- 0
|
59
|
+
version: 0.10.0
|
60
|
+
type: :runtime
|
61
|
+
version_requirements: *id001
|
62
|
+
description: Fluentd plugin derive metrics from log buffer chunks and submit to various metrics backends
|
63
|
+
email:
|
64
|
+
- oss@hotschedules.com
|
65
|
+
executables: []
|
66
|
+
|
67
|
+
extensions: []
|
68
|
+
|
69
|
+
extra_rdoc_files: []
|
70
|
+
|
71
|
+
files:
|
72
|
+
- .gitignore
|
73
|
+
- LICENSE
|
74
|
+
- LICENSE-Apache-2.0.txt
|
75
|
+
- README.md
|
76
|
+
- certs/oss@hotschedules.com.cert
|
77
|
+
- fluent-plugin-buffered-metrics.gemspec
|
78
|
+
- lib/fluent/metrics_backends.rb
|
79
|
+
- lib/fluent/plugin/out_buffered_metrics.rb
|
80
|
+
homepage: https://github.com/hotschedules/fluent-plugin-buffered-metrics
|
81
|
+
licenses:
|
82
|
+
- Apache-2.0
|
83
|
+
post_install_message:
|
84
|
+
rdoc_options: []
|
85
|
+
|
86
|
+
require_paths:
|
87
|
+
- lib
|
88
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
89
|
+
none: false
|
90
|
+
requirements:
|
91
|
+
- - ">="
|
92
|
+
- !ruby/object:Gem::Version
|
93
|
+
hash: 3
|
94
|
+
segments:
|
95
|
+
- 0
|
96
|
+
version: "0"
|
97
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
98
|
+
none: false
|
99
|
+
requirements:
|
100
|
+
- - ">="
|
101
|
+
- !ruby/object:Gem::Version
|
102
|
+
hash: 3
|
103
|
+
segments:
|
104
|
+
- 0
|
105
|
+
version: "0"
|
106
|
+
requirements: []
|
107
|
+
|
108
|
+
rubyforge_project:
|
109
|
+
rubygems_version: 1.8.25
|
110
|
+
signing_key:
|
111
|
+
specification_version: 3
|
112
|
+
summary: Fluentd plugin derive metrics from log buffer chunks and submit to various metrics backends at intervals determined by the buffer flushing frequency.
|
113
|
+
test_files: []
|
114
|
+
|
metadata.gz.sig
ADDED
Binary file
|