logstash-output-cloudwatch 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml ADDED
@@ -0,0 +1,15 @@
1
+ ---
2
+ !binary "U0hBMQ==":
3
+ metadata.gz: !binary |-
4
+ YzNhZmEzMTFmZmE3NWJkOTViYjM1ODVmZjFiOWRkMjIwMDZmZGE4Nw==
5
+ data.tar.gz: !binary |-
6
+ ODI5ZDg2ZTI3OTFjNzA4NWUzMDcxNTIzMzY3NmViYmY1ODBhOTc4Ng==
7
+ SHA512:
8
+ metadata.gz: !binary |-
9
+ ZTMyMTZiOWEwNTI3MGM0ZGE5NGE5NTI4Y2JlYzI0MjY4OGU5NzA5NWQ3ZjRj
10
+ OTRhYjY1ZmY4ZTRkMDljZjBhMWYzOTZkZjkxYmZmNWQ3NTI0YWY4Nzg5NTY2
11
+ MzU0MDVlMjY4ODExYzgxODZlNzIyOTk1NDVhZjAwMzM4NTg4MDE=
12
+ data.tar.gz: !binary |-
13
+ MmZlMDUxODM5NTM0ZTJjMWE2MWYzYmJiZDIwNDgzZjgwYzE4ZTJkYTc0NDlh
14
+ ZGFjODU2N2ZmYzhkNTE5ZGU5OGJkZmI0NDUxMWRkMzdkZTJjOTVjOTJjYmQw
15
+ MTUwYTRmMDUyMGFiZDk2MzdmNmM3OGNmZTI3YmEzYWE3ZjRiMWI=
data/.gitignore ADDED
@@ -0,0 +1,4 @@
1
+ *.gem
2
+ Gemfile.lock
3
+ .bundle
4
+ vendor
data/Gemfile ADDED
@@ -0,0 +1,4 @@
1
+ source 'http://rubygems.org'
2
+ gem 'rake'
3
+ gem 'gem_publisher'
4
+ gem 'archive-tar-minitar'
data/LICENSE ADDED
@@ -0,0 +1,13 @@
1
+ Copyright (c) 2012-2014 Elasticsearch <http://www.elasticsearch.org>
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
data/Rakefile ADDED
@@ -0,0 +1,6 @@
1
+ @files=[]
2
+
3
+ task :default do
4
+ system("rake -T")
5
+ end
6
+
@@ -0,0 +1,351 @@
1
+ # encoding: utf-8
2
+ require "logstash/outputs/base"
3
+ require "logstash/namespace"
4
+ require "logstash/plugin_mixins/aws_config"
5
+
6
+ # This output lets you aggregate and send metric data to AWS CloudWatch
7
+ #
8
+ # #### Summary:
9
+ # This plugin is intended to be used on a logstash indexer agent (but that
10
+ # is not the only way, see below.) In the intended scenario, one cloudwatch
11
+ # output plugin is configured, on the logstash indexer node, with just AWS API
12
+ # credentials, and possibly a region and/or a namespace. The output looks
13
+ # for fields present in events, and when it finds them, it uses them to
14
+ # calculate aggregate statistics. If the `metricname` option is set in this
15
+ # output, then any events which pass through it will be aggregated & sent to
16
+ # CloudWatch, but that is not recommended. The intended use is to NOT set the
17
+ # metricname option here, and instead to add a `CW_metricname` field (and other
18
+ # fields) to only the events you want sent to CloudWatch.
19
+ #
20
+ # When events pass through this output they are queued for background
21
+ # aggregation and sending, which happens every minute by default. The
22
+ # queue has a maximum size, and when it is full aggregated statistics will be
23
+ # sent to CloudWatch ahead of schedule. Whenever this happens a warning
24
+ # message is written to logstash's log. If you see this you should increase
25
+ # the `queue_size` configuration option to avoid the extra API calls. The queue
26
+ # is emptied every time we send data to CloudWatch.
27
+ #
28
+ # Note: when logstash is stopped the queue is destroyed before it can be processed.
29
+ # This is a known limitation of logstash and will hopefully be addressed in a
30
+ # future version.
31
+ #
32
+ # #### Details:
33
+ # There are two ways to configure this plugin, and they can be used in
34
+ # combination: event fields & per-output defaults
35
+ #
36
+ # Event Field configuration...
37
+ # You add fields to your events in inputs & filters and this output reads
38
+ # those fields to aggregate events. The names of the fields read are
39
+ # configurable via the `field_*` options.
40
+ #
41
+ # Per-output defaults...
42
+ # You set universal defaults in this output plugin's configuration, and
43
+ # if an event does not have a field for that option then the default is
44
+ # used.
45
+ #
46
+ # Notice, the event fields take precedence over the per-output defaults.
47
+ #
48
+ # At a minimum events must have a "metric name" to be sent to CloudWatch.
49
+ # This can be achieved either by providing a default here OR by adding a
50
+ # `CW_metricname` field. By default, if no other configuration is provided
51
+ # besides a metric name, then events will be counted (Unit: Count, Value: 1)
52
+ # by their metric name (either a default or from their `CW_metricname` field)
53
+ #
54
+ # Other fields which can be added to events to modify the behavior of this
55
+ # plugin are, `CW_namespace`, `CW_unit`, `CW_value`, and
56
+ # `CW_dimensions`. All of these field names are configurable in
57
+ # this output. You can also set per-output defaults for any of them.
58
+ # See below for details.
59
+ #
60
+ # Read more about [AWS CloudWatch](http://aws.amazon.com/cloudwatch/),
61
+ # and the specific of API endpoint this output uses,
62
+ # [PutMetricData](http://docs.amazonwebservices.com/AmazonCloudWatch/latest/APIReference/API_PutMetricData.html)
63
+ class LogStash::Outputs::CloudWatch < LogStash::Outputs::Base
64
+ include LogStash::PluginMixins::AwsConfig
65
+
66
+ config_name "cloudwatch"
67
+ milestone 1
68
+
69
+ # Constants
70
+ # aggregate_key members
71
+ DIMENSIONS = "dimensions"
72
+ TIMESTAMP = "timestamp"
73
+ METRIC = "metric"
74
+ COUNT = "count"
75
+ UNIT = "unit"
76
+ SUM = "sum"
77
+ MIN = "min"
78
+ MAX = "max"
79
+ # Units
80
+ COUNT_UNIT = "Count"
81
+ NONE = "None"
82
+
83
+ # How often to send data to CloudWatch
84
+ # This does not affect the event timestamps, events will always have their
85
+ # actual timestamp (to-the-minute) sent to CloudWatch.
86
+ #
87
+ # We only call the API if there is data to send.
88
+ #
89
+ # See the Rufus Scheduler docs for an [explanation of allowed values](https://github.com/jmettraux/rufus-scheduler#the-time-strings-understood-by-rufus-scheduler)
90
+ config :timeframe, :validate => :string, :default => "1m"
91
+
92
+ # How many events to queue before forcing a call to the CloudWatch API ahead of `timeframe` schedule
93
+ # Set this to the number of events-per-timeframe you will be sending to CloudWatch to avoid extra API calls
94
+ config :queue_size, :validate => :number, :default => 10000
95
+
96
+ # The default namespace to use for events which do not have a `CW_namespace` field
97
+ config :namespace, :validate => :string, :default => "Logstash"
98
+
99
+ # The name of the field used to set a different namespace per event
100
+ # Note: Only one namespace can be sent to CloudWatch per API call
101
+ # so setting different namespaces will increase the number of API calls
102
+ # and those cost money.
103
+ config :field_namespace, :validate => :string, :default => "CW_namespace"
104
+
105
+ # The default metric name to use for events which do not have a `CW_metricname` field.
106
+ # Beware: If this is provided then all events which pass through this output will be aggregated and
107
+ # sent to CloudWatch, so use this carefully. Furthermore, when providing this option, you
108
+ # will probably want to also restrict events from passing through this output using event
109
+ # type, tag, and field matching
110
+ config :metricname, :validate => :string
111
+
112
+ # The name of the field used to set the metric name on an event
113
+ # The author of this plugin recommends adding this field to events in inputs &
114
+ # filters rather than using the per-output default setting so that one output
115
+ # plugin on your logstash indexer can serve all events (which of course had
116
+ # fields set on your logstash shippers.)
117
+ config :field_metricname, :validate => :string, :default => "CW_metricname"
118
+
119
+ VALID_UNITS = ["Seconds", "Microseconds", "Milliseconds", "Bytes",
120
+ "Kilobytes", "Megabytes", "Gigabytes", "Terabytes",
121
+ "Bits", "Kilobits", "Megabits", "Gigabits", "Terabits",
122
+ "Percent", COUNT_UNIT, "Bytes/Second", "Kilobytes/Second",
123
+ "Megabytes/Second", "Gigabytes/Second", "Terabytes/Second",
124
+ "Bits/Second", "Kilobits/Second", "Megabits/Second",
125
+ "Gigabits/Second", "Terabits/Second", "Count/Second", NONE]
126
+
127
+ # The default unit to use for events which do not have a `CW_unit` field
128
+ # If you set this option you should probably set the "value" option along with it
129
+ config :unit, :validate => VALID_UNITS, :default => COUNT_UNIT
130
+
131
+ # The name of the field used to set the unit on an event metric
132
+ config :field_unit, :validate => :string, :default => "CW_unit"
133
+
134
+ # The default value to use for events which do not have a `CW_value` field
135
+ # If provided, this must be a string which can be converted to a float, for example...
136
+ # "1", "2.34", ".5", and "0.67"
137
+ # If you set this option you should probably set the `unit` option along with it
138
+ config :value, :validate => :string, :default => "1"
139
+
140
+ # The name of the field used to set the value (float) on an event metric
141
+ config :field_value, :validate => :string, :default => "CW_value"
142
+
143
+ # The default dimensions [ name, value, ... ] to use for events which do not have a `CW_dimensions` field
144
+ config :dimensions, :validate => :hash
145
+
146
+ # The name of the field used to set the dimensions on an event metric
147
+ # The field named here, if present in an event, must have an array of
148
+ # one or more key & value pairs, for example...
149
+ # `add_field => [ "CW_dimensions", "Environment", "CW_dimensions", "prod" ]`
150
+ # or, equivalently...
151
+ # `add_field => [ "CW_dimensions", "Environment" ]`
152
+ # `add_field => [ "CW_dimensions", "prod" ]`
153
+ config :field_dimensions, :validate => :string, :default => "CW_dimensions"
154
+
155
+ public
156
+ def aws_service_endpoint(region)
157
+ return {
158
+ :cloud_watch_endpoint => "monitoring.#{region}.amazonaws.com"
159
+ }
160
+ end
161
+
162
+ public
163
+ def register
164
+ require "thread"
165
+ require "rufus/scheduler"
166
+ require "aws"
167
+
168
+ @cw = AWS::CloudWatch.new(aws_options_hash)
169
+
170
+ @event_queue = SizedQueue.new(@queue_size)
171
+ @scheduler = Rufus::Scheduler.start_new
172
+ @job = @scheduler.every @timeframe do
173
+ @logger.info("Scheduler Activated")
174
+ publish(aggregate({}))
175
+ end
176
+ end # def register
177
+
178
+ public
179
+ def receive(event)
180
+ return unless output?(event)
181
+
182
+ if event == LogStash::SHUTDOWN
183
+ job.trigger()
184
+ job.unschedule()
185
+ @logger.info("CloudWatch aggregator thread shutdown.")
186
+ finished
187
+ return
188
+ end
189
+
190
+ return unless (event[@field_metricname] || @metricname)
191
+
192
+ if (@event_queue.length >= @event_queue.max)
193
+ @job.trigger
194
+ @logger.warn("Posted to AWS CloudWatch ahead of schedule. If you see this often, consider increasing the cloudwatch queue_size option.")
195
+ end
196
+
197
+ @logger.info("Queueing event", :event => event)
198
+ @event_queue << event
199
+ end # def receive
200
+
201
+ private
202
+ def publish(aggregates)
203
+ aggregates.each do |namespace, data|
204
+ @logger.info("Namespace, data: ", :namespace => namespace, :data => data)
205
+ metric_data = []
206
+ data.each do |aggregate_key, stats|
207
+ new_data = {
208
+ :metric_name => aggregate_key[METRIC],
209
+ :timestamp => aggregate_key[TIMESTAMP],
210
+ :unit => aggregate_key[UNIT],
211
+ :statistic_values => {
212
+ :sample_count => stats[COUNT],
213
+ :sum => stats[SUM],
214
+ :minimum => stats[MIN],
215
+ :maximum => stats[MAX],
216
+ }
217
+ }
218
+ dims = aggregate_key[DIMENSIONS]
219
+ if (dims.is_a?(Array) && dims.length > 0 && (dims.length % 2) == 0)
220
+ new_data[:dimensions] = Array.new
221
+ i = 0
222
+ while (i < dims.length)
223
+ new_data[:dimensions] << {:name => dims[i], :value => dims[i+1]}
224
+ i += 2
225
+ end
226
+ end
227
+ metric_data << new_data
228
+ end # data.each
229
+
230
+ begin
231
+ @cw.put_metric_data(
232
+ :namespace => namespace,
233
+ :metric_data => metric_data
234
+ )
235
+ @logger.info("Sent data to AWS CloudWatch OK", :namespace => namespace, :metric_data => metric_data)
236
+ rescue Exception => e
237
+ @logger.warn("Failed to send to AWS CloudWatch", :exception => e, :namespace => namespace, :metric_data => metric_data)
238
+ break
239
+ end
240
+ end # aggregates.each
241
+ return aggregates
242
+ end# def publish
243
+
244
+ private
245
+ def aggregate(aggregates)
246
+ @logger.info("QUEUE SIZE ", :queuesize => @event_queue.size)
247
+ while !@event_queue.empty? do
248
+ begin
249
+ count(aggregates, @event_queue.pop(true))
250
+ rescue Exception => e
251
+ @logger.warn("Exception! Breaking count loop", :exception => e)
252
+ break
253
+ end
254
+ end
255
+ return aggregates
256
+ end # def aggregate
257
+
258
+ private
259
+ def count(aggregates, event)
260
+ # If the event doesn't declare a namespace, use the default
261
+ fnamespace = field(event, @field_namespace)
262
+ namespace = (fnamespace ? fnamespace : event.sprintf(@namespace))
263
+
264
+ funit = field(event, @field_unit)
265
+ unit = (funit ? funit : event.sprintf(@unit))
266
+
267
+ fvalue = field(event, @field_value)
268
+ value = (fvalue ? fvalue : event.sprintf(@value))
269
+
270
+ # We may get to this point with valid Units but missing value. Send zeros.
271
+ val = (!value) ? 0.0 : value.to_f
272
+
273
+ # Event provides exactly one (but not both) of value or unit
274
+ if ( (fvalue == nil) ^ (funit == nil) )
275
+ @logger.warn("Likely config error: event has one of #{@field_value} or #{@field_unit} fields but not both.", :event => event)
276
+ end
277
+
278
+ # If Unit is still not set or is invalid warn about misconfiguration & use NONE
279
+ if (!VALID_UNITS.include?(unit))
280
+ unit = NONE
281
+ @logger.warn("Likely config error: invalid or missing Units (#{unit.to_s}), using '#{NONE}' instead", :event => event)
282
+ end
283
+
284
+ if (!aggregates[namespace])
285
+ aggregates[namespace] = {}
286
+ end
287
+
288
+ dims = event[@field_dimensions]
289
+ if (dims) # event provides dimensions
290
+ # validate the structure
291
+ if (!dims.is_a?(Array) || dims.length == 0 || (dims.length % 2) != 0)
292
+ @logger.warn("Likely config error: CloudWatch dimensions field (#{dims.to_s}) found which is not a positive- & even-length array. Ignoring it.", :event => event)
293
+ dims = nil
294
+ end
295
+ # Best case, we get here and exit the conditional because dims...
296
+ # - is an array
297
+ # - with positive length
298
+ # - and an even number of elements
299
+ elsif (@dimensions.is_a?(Hash)) # event did not provide dimensions, but the output has been configured with a default
300
+ dims = @dimensions.flatten.map{|d| event.sprintf(d)} # into the kind of array described just above
301
+ else
302
+ dims = nil
303
+ end
304
+
305
+ fmetric = field(event, @field_metricname)
306
+ aggregate_key = {
307
+ METRIC => (fmetric ? fmetric : event.sprintf(@metricname)),
308
+ DIMENSIONS => dims,
309
+ UNIT => unit,
310
+ TIMESTAMP => event.sprintf("%{+YYYY-MM-dd'T'HH:mm:00Z}")
311
+ }
312
+
313
+ if (!aggregates[namespace][aggregate_key])
314
+ aggregates[namespace][aggregate_key] = {}
315
+ end
316
+
317
+ if (!aggregates[namespace][aggregate_key][MAX] || val > aggregates[namespace][aggregate_key][MAX])
318
+ aggregates[namespace][aggregate_key][MAX] = val
319
+ end
320
+
321
+ if (!aggregates[namespace][aggregate_key][MIN] || val < aggregates[namespace][aggregate_key][MIN])
322
+ aggregates[namespace][aggregate_key][MIN] = val
323
+ end
324
+
325
+ if (!aggregates[namespace][aggregate_key][COUNT])
326
+ aggregates[namespace][aggregate_key][COUNT] = 1
327
+ else
328
+ aggregates[namespace][aggregate_key][COUNT] += 1
329
+ end
330
+
331
+ if (!aggregates[namespace][aggregate_key][SUM])
332
+ aggregates[namespace][aggregate_key][SUM] = val
333
+ else
334
+ aggregates[namespace][aggregate_key][SUM] += val
335
+ end
336
+ end # def count
337
+
338
+ private
339
+ def field(event, fieldname)
340
+ if !event[fieldname]
341
+ return nil
342
+ else
343
+ if event[fieldname].is_a?(Array)
344
+ return event[fieldname][0]
345
+ else
346
+ return event[fieldname]
347
+ end
348
+ end
349
+ end # def field
350
+
351
+ end # class LogStash::Outputs::CloudWatch
@@ -0,0 +1,29 @@
1
+ Gem::Specification.new do |s|
2
+
3
+ s.name = 'logstash-output-cloudwatch'
4
+ s.version = '0.1.0'
5
+ s.licenses = ['Apache License (2.0)']
6
+ s.summary = "This output lets you aggregate and send metric data to AWS CloudWatch"
7
+ s.description = "This output lets you aggregate and send metric data to AWS CloudWatch"
8
+ s.authors = ["Elasticsearch"]
9
+ s.email = 'richard.pijnenburg@elasticsearch.com'
10
+ s.homepage = "http://logstash.net/"
11
+ s.require_paths = ["lib"]
12
+
13
+ # Files
14
+ s.files = `git ls-files`.split($\)+::Dir.glob('vendor/*')
15
+
16
+ # Tests
17
+ s.test_files = s.files.grep(%r{^(test|spec|features)/})
18
+
19
+ # Special flag to let us know this is actually a logstash plugin
20
+ s.metadata = { "logstash_plugin" => "true", "group" => "output" }
21
+
22
+ # Gem dependencies
23
+ s.add_runtime_dependency 'logstash', '>= 1.4.0', '< 2.0.0'
24
+ s.add_runtime_dependency 'logstash-mixin-aws'
25
+ s.add_runtime_dependency 'rufus-scheduler', [ '~> 2.0.24' ]
26
+ s.add_runtime_dependency 'aws-sdk'
27
+
28
+ end
29
+
@@ -0,0 +1,9 @@
1
+ require "gem_publisher"
2
+
3
+ desc "Publish gem to RubyGems.org"
4
+ task :publish_gem do |t|
5
+ gem_file = Dir.glob(File.expand_path('../*.gemspec',File.dirname(__FILE__))).first
6
+ gem = GemPublisher.publish_if_updated(gem_file, :rubygems)
7
+ puts "Published #{gem}" if gem
8
+ end
9
+
@@ -0,0 +1,169 @@
1
+ require "net/http"
2
+ require "uri"
3
+ require "digest/sha1"
4
+
5
+ def vendor(*args)
6
+ return File.join("vendor", *args)
7
+ end
8
+
9
+ directory "vendor/" => ["vendor"] do |task, args|
10
+ mkdir task.name
11
+ end
12
+
13
+ def fetch(url, sha1, output)
14
+
15
+ puts "Downloading #{url}"
16
+ actual_sha1 = download(url, output)
17
+
18
+ if actual_sha1 != sha1
19
+ fail "SHA1 does not match (expected '#{sha1}' but got '#{actual_sha1}')"
20
+ end
21
+ end # def fetch
22
+
23
+ def file_fetch(url, sha1)
24
+ filename = File.basename( URI(url).path )
25
+ output = "vendor/#{filename}"
26
+ task output => [ "vendor/" ] do
27
+ begin
28
+ actual_sha1 = file_sha1(output)
29
+ if actual_sha1 != sha1
30
+ fetch(url, sha1, output)
31
+ end
32
+ rescue Errno::ENOENT
33
+ fetch(url, sha1, output)
34
+ end
35
+ end.invoke
36
+
37
+ return output
38
+ end
39
+
40
+ def file_sha1(path)
41
+ digest = Digest::SHA1.new
42
+ fd = File.new(path, "r")
43
+ while true
44
+ begin
45
+ digest << fd.sysread(16384)
46
+ rescue EOFError
47
+ break
48
+ end
49
+ end
50
+ return digest.hexdigest
51
+ ensure
52
+ fd.close if fd
53
+ end
54
+
55
+ def download(url, output)
56
+ uri = URI(url)
57
+ digest = Digest::SHA1.new
58
+ tmp = "#{output}.tmp"
59
+ Net::HTTP.start(uri.host, uri.port, :use_ssl => (uri.scheme == "https")) do |http|
60
+ request = Net::HTTP::Get.new(uri.path)
61
+ http.request(request) do |response|
62
+ fail "HTTP fetch failed for #{url}. #{response}" if [200, 301].include?(response.code)
63
+ size = (response["content-length"].to_i || -1).to_f
64
+ count = 0
65
+ File.open(tmp, "w") do |fd|
66
+ response.read_body do |chunk|
67
+ fd.write(chunk)
68
+ digest << chunk
69
+ if size > 0 && $stdout.tty?
70
+ count += chunk.bytesize
71
+ $stdout.write(sprintf("\r%0.2f%%", count/size * 100))
72
+ end
73
+ end
74
+ end
75
+ $stdout.write("\r \r") if $stdout.tty?
76
+ end
77
+ end
78
+
79
+ File.rename(tmp, output)
80
+
81
+ return digest.hexdigest
82
+ rescue SocketError => e
83
+ puts "Failure while downloading #{url}: #{e}"
84
+ raise
85
+ ensure
86
+ File.unlink(tmp) if File.exist?(tmp)
87
+ end # def download
88
+
89
+ def untar(tarball, &block)
90
+ require "archive/tar/minitar"
91
+ tgz = Zlib::GzipReader.new(File.open(tarball))
92
+ # Pull out typesdb
93
+ tar = Archive::Tar::Minitar::Input.open(tgz)
94
+ tar.each do |entry|
95
+ path = block.call(entry)
96
+ next if path.nil?
97
+ parent = File.dirname(path)
98
+
99
+ mkdir_p parent unless File.directory?(parent)
100
+
101
+ # Skip this file if the output file is the same size
102
+ if entry.directory?
103
+ mkdir path unless File.directory?(path)
104
+ else
105
+ entry_mode = entry.instance_eval { @mode } & 0777
106
+ if File.exists?(path)
107
+ stat = File.stat(path)
108
+ # TODO(sissel): Submit a patch to archive-tar-minitar upstream to
109
+ # expose headers in the entry.
110
+ entry_size = entry.instance_eval { @size }
111
+ # If file sizes are same, skip writing.
112
+ next if stat.size == entry_size && (stat.mode & 0777) == entry_mode
113
+ end
114
+ puts "Extracting #{entry.full_name} from #{tarball} #{entry_mode.to_s(8)}"
115
+ File.open(path, "w") do |fd|
116
+ # eof? check lets us skip empty files. Necessary because the API provided by
117
+ # Archive::Tar::Minitar::Reader::EntryStream only mostly acts like an
118
+ # IO object. Something about empty files in this EntryStream causes
119
+ # IO.copy_stream to throw "can't convert nil into String" on JRuby
120
+ # TODO(sissel): File a bug about this.
121
+ while !entry.eof?
122
+ chunk = entry.read(16384)
123
+ fd.write(chunk)
124
+ end
125
+ #IO.copy_stream(entry, fd)
126
+ end
127
+ File.chmod(entry_mode, path)
128
+ end
129
+ end
130
+ tar.close
131
+ File.unlink(tarball) if File.file?(tarball)
132
+ end # def untar
133
+
134
+ def ungz(file)
135
+
136
+ outpath = file.gsub('.gz', '')
137
+ tgz = Zlib::GzipReader.new(File.open(file))
138
+ begin
139
+ File.open(outpath, "w") do |out|
140
+ IO::copy_stream(tgz, out)
141
+ end
142
+ File.unlink(file)
143
+ rescue
144
+ File.unlink(outpath) if File.file?(outpath)
145
+ raise
146
+ end
147
+ tgz.close
148
+ end
149
+
150
+ desc "Process any vendor files required for this plugin"
151
+ task "vendor" do |task, args|
152
+
153
+ @files.each do |file|
154
+ download = file_fetch(file['url'], file['sha1'])
155
+ if download =~ /.tar.gz/
156
+ prefix = download.gsub('.tar.gz', '').gsub('vendor/', '')
157
+ untar(download) do |entry|
158
+ if !file['files'].nil?
159
+ next unless file['files'].include?(entry.full_name.gsub(prefix, ''))
160
+ out = entry.full_name.split("/").last
161
+ end
162
+ File.join('vendor', out)
163
+ end
164
+ elsif download =~ /.gz/
165
+ ungz(download)
166
+ end
167
+ end
168
+
169
+ end
@@ -0,0 +1,18 @@
1
+ require "spec_helper"
2
+ require "logstash/plugin"
3
+ require "logstash/json"
4
+
5
+ describe "outputs/cloudwatch" do
6
+
7
+
8
+ output = LogStash::Plugin.lookup("output", "cloudwatch").new
9
+
10
+ it "should register" do
11
+ expect {output.register}.to_not raise_error
12
+ end
13
+
14
+ it "should respond correctly to a receive call" do
15
+ event = LogStash::Event.new
16
+ expect { output.receive(event) }.to_not raise_error
17
+ end
18
+ end
metadata ADDED
@@ -0,0 +1,117 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: logstash-output-cloudwatch
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.1.0
5
+ platform: ruby
6
+ authors:
7
+ - Elasticsearch
8
+ autorequire:
9
+ bindir: bin
10
+ cert_chain: []
11
+ date: 2014-11-06 00:00:00.000000000 Z
12
+ dependencies:
13
+ - !ruby/object:Gem::Dependency
14
+ name: logstash
15
+ requirement: !ruby/object:Gem::Requirement
16
+ requirements:
17
+ - - ! '>='
18
+ - !ruby/object:Gem::Version
19
+ version: 1.4.0
20
+ - - <
21
+ - !ruby/object:Gem::Version
22
+ version: 2.0.0
23
+ type: :runtime
24
+ prerelease: false
25
+ version_requirements: !ruby/object:Gem::Requirement
26
+ requirements:
27
+ - - ! '>='
28
+ - !ruby/object:Gem::Version
29
+ version: 1.4.0
30
+ - - <
31
+ - !ruby/object:Gem::Version
32
+ version: 2.0.0
33
+ - !ruby/object:Gem::Dependency
34
+ name: logstash-mixin-aws
35
+ requirement: !ruby/object:Gem::Requirement
36
+ requirements:
37
+ - - ! '>='
38
+ - !ruby/object:Gem::Version
39
+ version: '0'
40
+ type: :runtime
41
+ prerelease: false
42
+ version_requirements: !ruby/object:Gem::Requirement
43
+ requirements:
44
+ - - ! '>='
45
+ - !ruby/object:Gem::Version
46
+ version: '0'
47
+ - !ruby/object:Gem::Dependency
48
+ name: rufus-scheduler
49
+ requirement: !ruby/object:Gem::Requirement
50
+ requirements:
51
+ - - ~>
52
+ - !ruby/object:Gem::Version
53
+ version: 2.0.24
54
+ type: :runtime
55
+ prerelease: false
56
+ version_requirements: !ruby/object:Gem::Requirement
57
+ requirements:
58
+ - - ~>
59
+ - !ruby/object:Gem::Version
60
+ version: 2.0.24
61
+ - !ruby/object:Gem::Dependency
62
+ name: aws-sdk
63
+ requirement: !ruby/object:Gem::Requirement
64
+ requirements:
65
+ - - ! '>='
66
+ - !ruby/object:Gem::Version
67
+ version: '0'
68
+ type: :runtime
69
+ prerelease: false
70
+ version_requirements: !ruby/object:Gem::Requirement
71
+ requirements:
72
+ - - ! '>='
73
+ - !ruby/object:Gem::Version
74
+ version: '0'
75
+ description: This output lets you aggregate and send metric data to AWS CloudWatch
76
+ email: richard.pijnenburg@elasticsearch.com
77
+ executables: []
78
+ extensions: []
79
+ extra_rdoc_files: []
80
+ files:
81
+ - .gitignore
82
+ - Gemfile
83
+ - LICENSE
84
+ - Rakefile
85
+ - lib/logstash/outputs/cloudwatch.rb
86
+ - logstash-output-cloudwatch.gemspec
87
+ - rakelib/publish.rake
88
+ - rakelib/vendor.rake
89
+ - spec/outputs/cloudwatch_spec.rb
90
+ homepage: http://logstash.net/
91
+ licenses:
92
+ - Apache License (2.0)
93
+ metadata:
94
+ logstash_plugin: 'true'
95
+ group: output
96
+ post_install_message:
97
+ rdoc_options: []
98
+ require_paths:
99
+ - lib
100
+ required_ruby_version: !ruby/object:Gem::Requirement
101
+ requirements:
102
+ - - ! '>='
103
+ - !ruby/object:Gem::Version
104
+ version: '0'
105
+ required_rubygems_version: !ruby/object:Gem::Requirement
106
+ requirements:
107
+ - - ! '>='
108
+ - !ruby/object:Gem::Version
109
+ version: '0'
110
+ requirements: []
111
+ rubyforge_project:
112
+ rubygems_version: 2.4.1
113
+ signing_key:
114
+ specification_version: 4
115
+ summary: This output lets you aggregate and send metric data to AWS CloudWatch
116
+ test_files:
117
+ - spec/outputs/cloudwatch_spec.rb