logstash-integration-aws 0.1.0.pre
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/CHANGELOG.PRE.MERGE.md +658 -0
- data/CHANGELOG.md +15 -0
- data/CONTRIBUTORS +40 -0
- data/Gemfile +11 -0
- data/LICENSE +202 -0
- data/NOTICE.TXT +5 -0
- data/README.md +205 -0
- data/docs/codec-cloudfront.asciidoc +53 -0
- data/docs/codec-cloudtrail.asciidoc +45 -0
- data/docs/index.asciidoc +38 -0
- data/docs/input-cloudwatch.asciidoc +320 -0
- data/docs/input-s3.asciidoc +346 -0
- data/docs/input-sqs.asciidoc +287 -0
- data/docs/output-cloudwatch.asciidoc +321 -0
- data/docs/output-s3.asciidoc +442 -0
- data/docs/output-sns.asciidoc +166 -0
- data/docs/output-sqs.asciidoc +242 -0
- data/lib/logstash/codecs/cloudfront.rb +84 -0
- data/lib/logstash/codecs/cloudtrail.rb +47 -0
- data/lib/logstash/inputs/cloudwatch.rb +338 -0
- data/lib/logstash/inputs/s3.rb +466 -0
- data/lib/logstash/inputs/sqs.rb +196 -0
- data/lib/logstash/outputs/cloudwatch.rb +346 -0
- data/lib/logstash/outputs/s3/file_repository.rb +121 -0
- data/lib/logstash/outputs/s3/path_validator.rb +18 -0
- data/lib/logstash/outputs/s3/size_and_time_rotation_policy.rb +24 -0
- data/lib/logstash/outputs/s3/size_rotation_policy.rb +26 -0
- data/lib/logstash/outputs/s3/temporary_file.rb +71 -0
- data/lib/logstash/outputs/s3/temporary_file_factory.rb +129 -0
- data/lib/logstash/outputs/s3/time_rotation_policy.rb +26 -0
- data/lib/logstash/outputs/s3/uploader.rb +74 -0
- data/lib/logstash/outputs/s3/writable_directory_validator.rb +17 -0
- data/lib/logstash/outputs/s3/write_bucket_permission_validator.rb +60 -0
- data/lib/logstash/outputs/s3.rb +405 -0
- data/lib/logstash/outputs/sns.rb +133 -0
- data/lib/logstash/outputs/sqs.rb +167 -0
- data/lib/logstash/plugin_mixins/aws_config/generic.rb +54 -0
- data/lib/logstash/plugin_mixins/aws_config/v2.rb +93 -0
- data/lib/logstash/plugin_mixins/aws_config.rb +8 -0
- data/logstash-integration-aws.gemspec +52 -0
- data/spec/codecs/cloudfront_spec.rb +92 -0
- data/spec/codecs/cloudtrail_spec.rb +56 -0
- data/spec/fixtures/aws_credentials_file_sample_test.yml +2 -0
- data/spec/fixtures/aws_temporary_credentials_file_sample_test.yml +3 -0
- data/spec/fixtures/cloudfront.log +4 -0
- data/spec/fixtures/compressed.log.gee.zip +0 -0
- data/spec/fixtures/compressed.log.gz +0 -0
- data/spec/fixtures/compressed.log.gzip +0 -0
- data/spec/fixtures/invalid_utf8.gbk.log +2 -0
- data/spec/fixtures/json.log +2 -0
- data/spec/fixtures/json_with_message.log +2 -0
- data/spec/fixtures/multiline.log +6 -0
- data/spec/fixtures/multiple_compressed_streams.gz +0 -0
- data/spec/fixtures/uncompressed.log +2 -0
- data/spec/inputs/cloudwatch_spec.rb +85 -0
- data/spec/inputs/s3_spec.rb +610 -0
- data/spec/inputs/sincedb_spec.rb +17 -0
- data/spec/inputs/sqs_spec.rb +324 -0
- data/spec/integration/cloudwatch_spec.rb +25 -0
- data/spec/integration/dynamic_prefix_spec.rb +92 -0
- data/spec/integration/gzip_file_spec.rb +62 -0
- data/spec/integration/gzip_size_rotation_spec.rb +63 -0
- data/spec/integration/outputs/sqs_spec.rb +98 -0
- data/spec/integration/restore_from_crash_spec.rb +67 -0
- data/spec/integration/s3_spec.rb +66 -0
- data/spec/integration/size_rotation_spec.rb +59 -0
- data/spec/integration/sqs_spec.rb +110 -0
- data/spec/integration/stress_test_spec.rb +60 -0
- data/spec/integration/time_based_rotation_with_constant_write_spec.rb +60 -0
- data/spec/integration/time_based_rotation_with_stale_write_spec.rb +64 -0
- data/spec/integration/upload_current_file_on_shutdown_spec.rb +51 -0
- data/spec/outputs/cloudwatch_spec.rb +38 -0
- data/spec/outputs/s3/file_repository_spec.rb +143 -0
- data/spec/outputs/s3/size_and_time_rotation_policy_spec.rb +77 -0
- data/spec/outputs/s3/size_rotation_policy_spec.rb +41 -0
- data/spec/outputs/s3/temporary_file_factory_spec.rb +89 -0
- data/spec/outputs/s3/temporary_file_spec.rb +47 -0
- data/spec/outputs/s3/time_rotation_policy_spec.rb +60 -0
- data/spec/outputs/s3/uploader_spec.rb +69 -0
- data/spec/outputs/s3/writable_directory_validator_spec.rb +40 -0
- data/spec/outputs/s3/write_bucket_permission_validator_spec.rb +49 -0
- data/spec/outputs/s3_spec.rb +232 -0
- data/spec/outputs/sns_spec.rb +160 -0
- data/spec/plugin_mixin/aws_config_spec.rb +217 -0
- data/spec/spec_helper.rb +8 -0
- data/spec/support/helpers.rb +119 -0
- data/spec/unit/outputs/sqs_spec.rb +247 -0
- metadata +467 -0
@@ -0,0 +1,167 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
|
3
|
+
require 'aws-sdk-sqs'
|
4
|
+
require 'logstash/errors'
|
5
|
+
require 'logstash/namespace'
|
6
|
+
require 'logstash/outputs/base'
|
7
|
+
require 'logstash/plugin_mixins/aws_config'
|
8
|
+
|
9
|
+
# Push events to an Amazon Web Services (AWS) Simple Queue Service (SQS) queue.
|
10
|
+
#
|
11
|
+
# SQS is a simple, scalable queue system that is part of the Amazon Web
|
12
|
+
# Services suite of tools. Although SQS is similar to other queuing systems
|
13
|
+
# such as Advanced Message Queuing Protocol (AMQP), it uses a custom API and
|
14
|
+
# requires that you have an AWS account. See http://aws.amazon.com/sqs/ for
|
15
|
+
# more details on how SQS works, what the pricing schedule looks like and how
|
16
|
+
# to setup a queue.
|
17
|
+
#
|
18
|
+
# The "consumer" identity must have the following permissions on the queue:
|
19
|
+
#
|
20
|
+
# * `sqs:GetQueueUrl`
|
21
|
+
# * `sqs:SendMessage`
|
22
|
+
# * `sqs:SendMessageBatch`
|
23
|
+
#
|
24
|
+
# Typically, you should setup an IAM policy, create a user and apply the IAM
|
25
|
+
# policy to the user. See http://aws.amazon.com/iam/ for more details on
|
26
|
+
# setting up AWS identities. A sample policy is as follows:
|
27
|
+
#
|
28
|
+
# [source,json]
|
29
|
+
# {
|
30
|
+
# "Version": "2012-10-17",
|
31
|
+
# "Statement": [
|
32
|
+
# {
|
33
|
+
# "Effect": "Allow",
|
34
|
+
# "Action": [
|
35
|
+
# "sqs:GetQueueUrl",
|
36
|
+
# "sqs:SendMessage",
|
37
|
+
# "sqs:SendMessageBatch"
|
38
|
+
# ],
|
39
|
+
# "Resource": "arn:aws:sqs:us-east-1:123456789012:my-sqs-queue"
|
40
|
+
# }
|
41
|
+
# ]
|
42
|
+
# }
|
43
|
+
#
|
44
|
+
# ==== Batch Publishing
|
45
|
+
# This output publishes messages to SQS in batches in order to optimize event
|
46
|
+
# throughput and increase performance. This is done using the
|
47
|
+
# [`SendMessageBatch`](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessageBatch.html)
|
48
|
+
# API. When publishing messages to SQS in batches, the following service limits
|
49
|
+
# must be respected (see
|
50
|
+
# [Limits in Amazon SQS](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-messages.html)):
|
51
|
+
#
|
52
|
+
# * The maximum allowed individual message size is 256KiB.
|
53
|
+
# * The maximum total payload size (i.e. the sum of the sizes of all
|
54
|
+
# individual messages within a batch) is also 256KiB.
|
55
|
+
#
|
56
|
+
# This plugin will dynamically adjust the size of the batch published to SQS in
|
57
|
+
# order to ensure that the total payload size does not exceed 256KiB.
|
58
|
+
#
|
59
|
+
# WARNING: This output cannot currently handle messages larger than 256KiB. Any
|
60
|
+
# single message exceeding this size will be dropped.
|
61
|
+
#
|
62
|
+
class LogStash::Outputs::SQS < LogStash::Outputs::Base
|
63
|
+
include LogStash::PluginMixins::AwsConfig::V2
|
64
|
+
|
65
|
+
config_name 'sqs'
|
66
|
+
default :codec, 'json'
|
67
|
+
|
68
|
+
concurrency :shared
|
69
|
+
|
70
|
+
# The number of events to be sent in each batch. Set this to `1` to disable
|
71
|
+
# the batch sending of messages.
|
72
|
+
config :batch_events, :validate => :number, :default => 10
|
73
|
+
|
74
|
+
# The maximum number of bytes for any message sent to SQS. Messages exceeding
|
75
|
+
# this size will be dropped. See
|
76
|
+
# http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-messages.html.
|
77
|
+
config :message_max_size, :validate => :bytes, :default => '256KiB'
|
78
|
+
|
79
|
+
# The name of the target SQS queue. Note that this is just the name of the
|
80
|
+
# queue, not the URL or ARN.
|
81
|
+
config :queue, :validate => :string, :required => true
|
82
|
+
|
83
|
+
# Account ID of the AWS account which owns the queue. Note IAM permissions
|
84
|
+
# need to be configured on both accounts to function.
|
85
|
+
config :queue_owner_aws_account_id, :validate => :string, :required => false
|
86
|
+
|
87
|
+
public
|
88
|
+
def register
|
89
|
+
@sqs = Aws::SQS::Client.new(aws_options_hash)
|
90
|
+
|
91
|
+
if @batch_events > 10
|
92
|
+
raise LogStash::ConfigurationError, 'The maximum batch size is 10 events'
|
93
|
+
elsif @batch_events < 1
|
94
|
+
raise LogStash::ConfigurationError, 'The batch size must be greater than 0'
|
95
|
+
end
|
96
|
+
|
97
|
+
begin
|
98
|
+
params = { queue_name: @queue }
|
99
|
+
params[:queue_owner_aws_account_id] = @queue_owner_aws_account_id if @queue_owner_aws_account_id
|
100
|
+
|
101
|
+
@logger.debug('Connecting to SQS queue', params.merge(region: region))
|
102
|
+
@queue_url = @sqs.get_queue_url(params)[:queue_url]
|
103
|
+
@logger.info('Connected to SQS queue successfully', params.merge(region: region))
|
104
|
+
rescue Aws::SQS::Errors::ServiceError => e
|
105
|
+
@logger.error('Failed to connect to SQS', :error => e)
|
106
|
+
raise LogStash::ConfigurationError, 'Verify the SQS queue name and your credentials'
|
107
|
+
end
|
108
|
+
end
|
109
|
+
|
110
|
+
public
|
111
|
+
def multi_receive_encoded(encoded_events)
|
112
|
+
if @batch_events > 1
|
113
|
+
multi_receive_encoded_batch(encoded_events)
|
114
|
+
else
|
115
|
+
multi_receive_encoded_single(encoded_events)
|
116
|
+
end
|
117
|
+
end
|
118
|
+
|
119
|
+
private
|
120
|
+
def multi_receive_encoded_batch(encoded_events)
|
121
|
+
bytes = 0
|
122
|
+
entries = []
|
123
|
+
|
124
|
+
# Split the events into multiple batches to ensure that no single batch
|
125
|
+
# exceeds `@message_max_size` bytes.
|
126
|
+
encoded_events.each_with_index do |encoded_event, index|
|
127
|
+
event, encoded = encoded_event
|
128
|
+
|
129
|
+
if encoded.bytesize > @message_max_size
|
130
|
+
@logger.warn('Message exceeds maximum length and will be dropped', :message_size => encoded.bytesize)
|
131
|
+
next
|
132
|
+
end
|
133
|
+
|
134
|
+
if entries.size >= @batch_events or (bytes + encoded.bytesize) > @message_max_size
|
135
|
+
send_message_batch(entries)
|
136
|
+
|
137
|
+
bytes = 0
|
138
|
+
entries = []
|
139
|
+
end
|
140
|
+
|
141
|
+
bytes += encoded.bytesize
|
142
|
+
entries.push(:id => index.to_s, :message_body => encoded)
|
143
|
+
end
|
144
|
+
|
145
|
+
send_message_batch(entries) unless entries.empty?
|
146
|
+
end
|
147
|
+
|
148
|
+
private
|
149
|
+
def multi_receive_encoded_single(encoded_events)
|
150
|
+
encoded_events.each do |encoded_event|
|
151
|
+
event, encoded = encoded_event
|
152
|
+
|
153
|
+
if encoded.bytesize > @message_max_size
|
154
|
+
@logger.warn('Message exceeds maximum length and will be dropped', :message_size => encoded.bytesize)
|
155
|
+
next
|
156
|
+
end
|
157
|
+
|
158
|
+
@sqs.send_message(:queue_url => @queue_url, :message_body => encoded)
|
159
|
+
end
|
160
|
+
end
|
161
|
+
|
162
|
+
private
|
163
|
+
def send_message_batch(entries)
|
164
|
+
@logger.debug("Publishing #{entries.size} messages to SQS", :queue_url => @queue_url, :entries => entries)
|
165
|
+
@sqs.send_message_batch(:queue_url => @queue_url, :entries => entries)
|
166
|
+
end
|
167
|
+
end
|
@@ -0,0 +1,54 @@
|
|
1
|
+
module LogStash::PluginMixins::AwsConfig::Generic
|
2
|
+
def self.included(base)
|
3
|
+
base.extend(self)
|
4
|
+
base.generic_aws_config
|
5
|
+
end
|
6
|
+
|
7
|
+
def generic_aws_config
|
8
|
+
# The AWS Region
|
9
|
+
config :region, :validate => :string, :default => LogStash::PluginMixins::AwsConfig::US_EAST_1
|
10
|
+
|
11
|
+
# This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order:
|
12
|
+
#
|
13
|
+
# 1. Static configuration, using `access_key_id` and `secret_access_key` params in the logstash plugin config
|
14
|
+
# 2. External credentials file specified by `aws_credentials_file`
|
15
|
+
# 3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`
|
16
|
+
# 4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY`
|
17
|
+
# 5. IAM Instance Profile (available when running inside EC2)
|
18
|
+
config :access_key_id, :validate => :string
|
19
|
+
|
20
|
+
# The AWS Secret Access Key
|
21
|
+
config :secret_access_key, :validate => :password
|
22
|
+
|
23
|
+
# The AWS Session token for temporary credential
|
24
|
+
config :session_token, :validate => :password
|
25
|
+
|
26
|
+
# URI to proxy server if required
|
27
|
+
config :proxy_uri, :validate => :string
|
28
|
+
|
29
|
+
# Custom endpoint to connect to s3
|
30
|
+
config :endpoint, :validate => :string
|
31
|
+
|
32
|
+
# The AWS IAM Role to assume, if any.
|
33
|
+
# This is used to generate temporary credentials typically for cross-account access.
|
34
|
+
# See https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html for more information.
|
35
|
+
# When `role_arn` is set, AWS (`access_key_id`/`secret_access_key`) credentials still get used if they're configured.
|
36
|
+
config :role_arn, :validate => :string
|
37
|
+
|
38
|
+
# Session name to use when assuming an IAM role
|
39
|
+
config :role_session_name, :validate => :string, :default => "logstash"
|
40
|
+
|
41
|
+
# Path to YAML file containing a hash of AWS credentials.
|
42
|
+
# This file will only be loaded if `access_key_id` and
|
43
|
+
# `secret_access_key` aren't set. The contents of the
|
44
|
+
# file should look like this:
|
45
|
+
#
|
46
|
+
# [source,ruby]
|
47
|
+
# ----------------------------------
|
48
|
+
# :access_key_id: "12345"
|
49
|
+
# :secret_access_key: "54321"
|
50
|
+
# ----------------------------------
|
51
|
+
#
|
52
|
+
config :aws_credentials_file, :validate => :string
|
53
|
+
end
|
54
|
+
end
|
@@ -0,0 +1,93 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require "logstash/plugin_mixins/aws_config/generic"
|
3
|
+
|
4
|
+
module LogStash::PluginMixins::AwsConfig::V2
|
5
|
+
def self.included(base)
|
6
|
+
base.extend(self)
|
7
|
+
base.send(:include, LogStash::PluginMixins::AwsConfig::Generic)
|
8
|
+
end
|
9
|
+
|
10
|
+
public
|
11
|
+
def aws_options_hash
|
12
|
+
opts = {}
|
13
|
+
|
14
|
+
opts[:http_proxy] = @proxy_uri if @proxy_uri
|
15
|
+
|
16
|
+
if @role_arn
|
17
|
+
credentials = assume_role(opts.dup)
|
18
|
+
opts[:credentials] = credentials
|
19
|
+
else
|
20
|
+
credentials = aws_credentials
|
21
|
+
opts[:credentials] = credentials if credentials
|
22
|
+
end
|
23
|
+
|
24
|
+
if self.respond_to?(:aws_service_endpoint)
|
25
|
+
# used by CloudWatch to basically do the same as bellow (returns { region: region })
|
26
|
+
opts.merge!(self.aws_service_endpoint(@region))
|
27
|
+
else
|
28
|
+
# NOTE: setting :region works with the aws sdk (resolves correct endpoint)
|
29
|
+
opts[:region] = @region
|
30
|
+
end
|
31
|
+
|
32
|
+
opts[:endpoint] = @endpoint unless @endpoint.nil?
|
33
|
+
|
34
|
+
if respond_to?(:additional_settings)
|
35
|
+
opts = symbolize_keys_and_cast_true_false(additional_settings).merge(opts)
|
36
|
+
end
|
37
|
+
|
38
|
+
return opts
|
39
|
+
end
|
40
|
+
|
41
|
+
private
|
42
|
+
|
43
|
+
def aws_credentials
|
44
|
+
if @access_key_id && @secret_access_key
|
45
|
+
Aws::Credentials.new(@access_key_id, @secret_access_key.value, @session_token ? @session_token.value : nil)
|
46
|
+
elsif @access_key_id.nil? ^ @secret_access_key.nil?
|
47
|
+
@logger.warn("Likely config error: Only one of access_key_id or secret_access_key was provided but not both.")
|
48
|
+
secret_access_key = @secret_access_key ? @secret_access_key.value : nil
|
49
|
+
Aws::Credentials.new(@access_key_id, secret_access_key, @session_token ? @session_token.value : nil)
|
50
|
+
elsif @aws_credentials_file
|
51
|
+
credentials_opts = YAML.load_file(@aws_credentials_file)
|
52
|
+
credentials_opts.default_proc = lambda { |hash, key| hash.fetch(key.to_s, nil) }
|
53
|
+
Aws::Credentials.new(credentials_opts[:access_key_id],
|
54
|
+
credentials_opts[:secret_access_key],
|
55
|
+
credentials_opts[:session_token])
|
56
|
+
else
|
57
|
+
nil # AWS client will read ENV or ~/.aws/credentials
|
58
|
+
end
|
59
|
+
end
|
60
|
+
alias credentials aws_credentials
|
61
|
+
|
62
|
+
def assume_role(opts = {})
|
63
|
+
unless opts.key?(:credentials)
|
64
|
+
credentials = aws_credentials
|
65
|
+
opts[:credentials] = credentials if credentials
|
66
|
+
end
|
67
|
+
|
68
|
+
# for a regional endpoint :region is always required by AWS
|
69
|
+
opts[:region] = @region
|
70
|
+
|
71
|
+
Aws::AssumeRoleCredentials.new(
|
72
|
+
:client => Aws::STS::Client.new(opts),
|
73
|
+
:role_arn => @role_arn,
|
74
|
+
:role_session_name => @role_session_name
|
75
|
+
)
|
76
|
+
end
|
77
|
+
|
78
|
+
def symbolize_keys_and_cast_true_false(hash)
|
79
|
+
case hash
|
80
|
+
when Hash
|
81
|
+
symbolized = {}
|
82
|
+
hash.each { |key, value| symbolized[key.to_sym] = symbolize_keys_and_cast_true_false(value) }
|
83
|
+
symbolized
|
84
|
+
when 'true'
|
85
|
+
true
|
86
|
+
when 'false'
|
87
|
+
false
|
88
|
+
else
|
89
|
+
hash
|
90
|
+
end
|
91
|
+
end
|
92
|
+
|
93
|
+
end
|
@@ -0,0 +1,52 @@
|
|
1
|
+
Gem::Specification.new do |s|
|
2
|
+
s.name = "logstash-integration-aws"
|
3
|
+
s.version = "0.1.0.pre"
|
4
|
+
s.licenses = ["Apache-2.0"]
|
5
|
+
s.summary = "Collection of Logstash plugins that integrate with AWS"
|
6
|
+
s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program"
|
7
|
+
s.authors = ["Elastic"]
|
8
|
+
s.email = "info@elastic.co"
|
9
|
+
s.homepage = "http://www.elastic.co/guide/en/logstash/current/index.html"
|
10
|
+
s.metadata = {
|
11
|
+
"logstash_plugin" => "true",
|
12
|
+
"logstash_group" => "integration",
|
13
|
+
"integration_plugins" => %w(
|
14
|
+
logstash-codec-cloudfront
|
15
|
+
logstash-codec-cloudtrail
|
16
|
+
logstash-input-cloudwatch
|
17
|
+
logstash-input-s3
|
18
|
+
logstash-input-sqs
|
19
|
+
logstash-mixin-aws
|
20
|
+
logstash-output-cloudwatch
|
21
|
+
logstash-output-s3
|
22
|
+
logstash-output-sns
|
23
|
+
logstash-output-sqs).join(",")
|
24
|
+
}
|
25
|
+
|
26
|
+
|
27
|
+
s.require_paths = ["lib"]
|
28
|
+
s.files = Dir["lib/**/*","spec/**/*","*.gemspec","*.md","CONTRIBUTORS","Gemfile","LICENSE","NOTICE.TXT", "VERSION", "docs/**/*"]
|
29
|
+
s.test_files = s.files.grep(%r{^(test|spec|features)/})
|
30
|
+
|
31
|
+
s.add_runtime_dependency "logstash-core-plugin-api", ">= 2.1.12", "<= 2.99"
|
32
|
+
s.add_runtime_dependency "concurrent-ruby"
|
33
|
+
s.add_runtime_dependency "logstash-codec-json"
|
34
|
+
s.add_runtime_dependency "logstash-codec-plain"
|
35
|
+
s.add_runtime_dependency "rufus-scheduler", ">= 3.0.9"
|
36
|
+
s.add_runtime_dependency "stud", "~> 0.0.22"
|
37
|
+
s.add_runtime_dependency "aws-sdk-core", "~> 3"
|
38
|
+
s.add_runtime_dependency "aws-sdk-s3"
|
39
|
+
s.add_runtime_dependency "aws-sdk-sqs"
|
40
|
+
s.add_runtime_dependency "aws-sdk-sns"
|
41
|
+
s.add_runtime_dependency "aws-sdk-cloudwatch"
|
42
|
+
s.add_runtime_dependency "aws-sdk-cloudfront"
|
43
|
+
s.add_runtime_dependency "aws-sdk-resourcegroups"
|
44
|
+
|
45
|
+
s.add_development_dependency "logstash-codec-json_lines"
|
46
|
+
s.add_development_dependency "logstash-codec-multiline"
|
47
|
+
s.add_development_dependency "logstash-codec-json"
|
48
|
+
s.add_development_dependency "logstash-codec-line"
|
49
|
+
s.add_development_dependency "logstash-devutils"
|
50
|
+
s.add_development_dependency "logstash-input-generator"
|
51
|
+
s.add_development_dependency "timecop"
|
52
|
+
end
|
@@ -0,0 +1,92 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require "logstash/devutils/rspec/spec_helper"
|
3
|
+
require "logstash/codecs/cloudfront"
|
4
|
+
require "logstash/errors"
|
5
|
+
require "stringio"
|
6
|
+
require "zlib"
|
7
|
+
|
8
|
+
def compress_with_gzip(io)
|
9
|
+
compressed = StringIO.new('', 'r+b')
|
10
|
+
|
11
|
+
gzip = Zlib::GzipWriter.new(compressed)
|
12
|
+
gzip.write(io.read)
|
13
|
+
gzip.finish
|
14
|
+
|
15
|
+
compressed.rewind
|
16
|
+
|
17
|
+
compressed
|
18
|
+
end
|
19
|
+
|
20
|
+
describe LogStash::Codecs::Cloudfront do
|
21
|
+
let!(:uncompressed_cloudfront_log) do
|
22
|
+
# Using format from
|
23
|
+
# http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/AccessLogs.html
|
24
|
+
str = StringIO.new
|
25
|
+
|
26
|
+
str << "#Version: 1.0\n"
|
27
|
+
str << "#Fields: date time x-edge-location c-ip x-event sc-bytes x-cf-status x-cf-client-id cs-uri-stem cs-uri-query c-referrer x-page-url c-user-agent x-sname x-sname-query x-file-ext x-sid\n"
|
28
|
+
str << "2010-03-12 23:51:20 SEA4 192.0.2.147 connect 2014 OK bfd8a98bee0840d9b871b7f6ade9908f rtmp://shqshne4jdp4b6.cloudfront.net/cfx/st key=value http://player.longtailvideo.com/player.swf http://www.longtailvideo.com/support/jw-player-setup-wizard?example=204 LNX%2010,0,32,18 - - - -\n"
|
29
|
+
str << "2010-03-12 23:51:21 SEA4 192.0.2.222 play 3914 OK bfd8a98bee0840d9b871b7f6ade9908f rtmp://shqshne4jdp4b6.cloudfront.net/cfx/st key=value http://player.longtailvideo.com/player.swf http://www.longtailvideo.com/support/jw-player-setup-wizard?example=204 LNX%2010,0,32,18 myvideo p=2&q=4 flv 1\n"
|
30
|
+
|
31
|
+
str.rewind
|
32
|
+
str
|
33
|
+
end
|
34
|
+
|
35
|
+
describe "#decode" do
|
36
|
+
it "should create events from a gzip file" do
|
37
|
+
events = []
|
38
|
+
|
39
|
+
subject.decode(compress_with_gzip(uncompressed_cloudfront_log)) do |event|
|
40
|
+
events << event
|
41
|
+
end
|
42
|
+
|
43
|
+
expect(events.size).to eq(2)
|
44
|
+
end
|
45
|
+
|
46
|
+
it 'should extract the metadata of the file' do
|
47
|
+
events = []
|
48
|
+
|
49
|
+
subject.decode(compress_with_gzip(uncompressed_cloudfront_log)) do |event|
|
50
|
+
events << event
|
51
|
+
end
|
52
|
+
|
53
|
+
expect(events.first.get("cloudfront_version")).to eq("1.0")
|
54
|
+
expect(events.first.get("cloudfront_fields")).to eq("date time x-edge-location c-ip x-event sc-bytes x-cf-status x-cf-client-id cs-uri-stem cs-uri-query c-referrer x-page-url c-user-agent x-sname x-sname-query x-file-ext x-sid")
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
describe "#extract_version" do
|
59
|
+
it "returns the version from a matched string" do
|
60
|
+
line = "#Version: 1.0"
|
61
|
+
|
62
|
+
expect(subject.extract_version(line)).to eq("1.0")
|
63
|
+
end
|
64
|
+
|
65
|
+
it "doesn't return anything if version isnt matched" do
|
66
|
+
line = "Bleh my string"
|
67
|
+
expect(subject.extract_version(line)).to eq(nil)
|
68
|
+
end
|
69
|
+
|
70
|
+
it "doesn't match if #Version is not at the beginning of the string" do
|
71
|
+
line = "2010-03-12 23:53:44 SEA4 192.0.2.4 stop 323914 OK bfd8a98bee0840d9b871b7f6ade9908f #Version: 1.0 Bleh blah"
|
72
|
+
expect(subject.extract_version(line)).to eq(nil)
|
73
|
+
end
|
74
|
+
end
|
75
|
+
|
76
|
+
describe "#extract_fields" do
|
77
|
+
it "return a string with all the fields" do
|
78
|
+
line = "#Fields: date time x-edge-location c-ip x-event sc-bytes x-cf-status x-cf-client-id cs-uri-stem cs-uri-query c-referrer x-page-url c-user-agent x-sname x-sname-query x-file-ext x-sid"
|
79
|
+
expect(subject.extract_fields(line)).to eq("date time x-edge-location c-ip x-event sc-bytes x-cf-status x-cf-client-id cs-uri-stem cs-uri-query c-referrer x-page-url c-user-agent x-sname x-sname-query x-file-ext x-sid")
|
80
|
+
end
|
81
|
+
|
82
|
+
it "doesn't return anything if we can the fields list" do
|
83
|
+
line = "Bleh my string"
|
84
|
+
expect(subject.extract_fields(line)).to eq(nil)
|
85
|
+
end
|
86
|
+
|
87
|
+
it "doesnt match if #Fields: is not at the beginning of the string" do
|
88
|
+
line = "2010-03-12 23:53:44 SEA4 192.0.2.4 stop 323914 OK bfd8a98bee0840d9b871b7f6ade9908f #Fields: 1.0 Bleh blah"
|
89
|
+
expect(subject.extract_fields(line)).to eq(nil)
|
90
|
+
end
|
91
|
+
end
|
92
|
+
end
|
@@ -0,0 +1,56 @@
|
|
1
|
+
require "logstash/devutils/rspec/spec_helper"
|
2
|
+
require "logstash/plugin"
|
3
|
+
require "logstash/codecs/cloudtrail"
|
4
|
+
require 'resolv'
|
5
|
+
|
6
|
+
describe LogStash::Codecs::CloudTrail do
|
7
|
+
|
8
|
+
shared_examples_for "it handles valid ip addresses" do
|
9
|
+
it 'should pass through valid ip addresses' do
|
10
|
+
ip_addresses.each do |valid_ip_address|
|
11
|
+
subject.decode("{\"Records\":[{\"sourceIpAddress\":\"#{valid_ip_address}\"}]}") do |event|
|
12
|
+
expect(event.get("sourceIpAddress")).to eq(valid_ip_address)
|
13
|
+
expect(event.get("sourceHost")).to be_nil
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|
17
|
+
end
|
18
|
+
|
19
|
+
describe '#decode' do
|
20
|
+
it 'accepts data without a Records property' do
|
21
|
+
expect { |b|
|
22
|
+
subject.decode('{}', &b)
|
23
|
+
}.not_to yield_control
|
24
|
+
end
|
25
|
+
|
26
|
+
it 'accepts records with null requestParameters' do
|
27
|
+
expect { |b|
|
28
|
+
subject.decode('{"Records":[{"requestParameters":null}]}', &b)
|
29
|
+
}.to yield_control
|
30
|
+
end
|
31
|
+
|
32
|
+
context 'with ipv4 sourceIpAddress values' do
|
33
|
+
let(:ip_addresses) { ["127.0.0.1", "8.8.8.8", "10.10.10.10", "100.100.100.100", "1.12.123.234"] }
|
34
|
+
it_behaves_like 'it handles valid ip addresses'
|
35
|
+
end
|
36
|
+
|
37
|
+
context 'with ipv6 sourceIpAddress values' do
|
38
|
+
let(:ip_addresses) { ["2001:0db8:85a3:0000:0000:8a2e:0370:7334", "2001:db8:85a3::8a2e:370:7334", "::1", "::"] }
|
39
|
+
it_behaves_like 'it handles valid ip addresses'
|
40
|
+
end
|
41
|
+
|
42
|
+
it 'accepts records with an invalid sourceIpAddress' do
|
43
|
+
subject.decode('{"Records":[{"sourceIpAddress":"www.elastic.co"}]}') do |event|
|
44
|
+
expect(event.get("sourceIpAddress")).to be_nil
|
45
|
+
expect(event.get("sourceHost")).to eq("www.elastic.co")
|
46
|
+
end
|
47
|
+
end
|
48
|
+
|
49
|
+
it 'accepts records with a no sourceIpAddress' do
|
50
|
+
subject.decode('{"Records":[{"sourceIpAddress":null}]}') do |event|
|
51
|
+
expect(event.get("sourceIpAddress")).to be_nil
|
52
|
+
expect(event.get("sourceHost")).to be_nil
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
56
|
+
end
|
@@ -0,0 +1,4 @@
|
|
1
|
+
#Version: 1.0
|
2
|
+
#Fields: date time x-edge-location c-ip x-event sc-bytes x-cf-status x-cf-client-id cs-uri-stem cs-uri-query c-referrer x-page-url c-user-agent x-sname x-sname-query x-file-ext x-sid
|
3
|
+
2010-03-12 23:51:20 SEA4 192.0.2.147 connect 2014 OK bfd8a98bee0840d9b871b7f6ade9908f rtmp://shqshne4jdp4b6.cloudfront.net/cfx/st key=value http://player.longtailvideo.com/player.swf http://www.longtailvideo.com/support/jw-player-setup-wizard?example=204 LNX%2010,0,32,18 - - - -
|
4
|
+
2010-03-12 23:51:21 SEA4 192.0.2.222 play 3914 OK bfd8a98bee0840d9b871b7f6ade9908f rtmp://shqshne4jdp4b6.cloudfront.net/cfx/st key=value http://player.longtailvideo.com/player.swf http://www.longtailvideo.com/support/jw-player-setup-wizard?example=204 LNX%2010,0,32,18 myvideo p=2&q=4 flv 1
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
@@ -0,0 +1,2 @@
|
|
1
|
+
2010-03-12 23:51:20 SEA4 192.0.2.147 connect 2014 OK bfd8a98bee0840d9b871b7f6ade9908f rtmp://shqshne4jdp4b6.cloudfront.net/cfx/st key=value http://player.longtailvideo.com/player.swf http://www.longtailvideo.com/support/jw-player-setup-wizard?example=204 LNX%2010,0,32,18 - - - -
|
2
|
+
2010-03-12 23:51:21 SEA4 192.0.2.222 play 3914 OK bfd8a98bee0840d9b871b7f6ade9908f rtmp://shqshne4jdp4b6.cloudfront.net/cfx/st key=value http://player.longtailvideo.com/player.swf http://www.longtailvideo.com/support/jw-player-setup-wizard?example=204 LNX%2010,0,32,18 myvideo p=2&q=4 flv 1
|
@@ -0,0 +1,85 @@
|
|
1
|
+
require 'logstash/devutils/rspec/spec_helper'
|
2
|
+
require 'logstash/devutils/rspec/shared_examples'
|
3
|
+
require 'logstash/inputs/cloudwatch'
|
4
|
+
|
5
|
+
describe LogStash::Inputs::CloudWatch do
|
6
|
+
subject { LogStash::Inputs::CloudWatch.new(config) }
|
7
|
+
let(:config) {
|
8
|
+
{
|
9
|
+
'access_key_id' => '1234',
|
10
|
+
'secret_access_key' => 'secret',
|
11
|
+
'metrics' => [ 'CPUUtilization' ],
|
12
|
+
'region' => 'us-east-1'
|
13
|
+
}
|
14
|
+
}
|
15
|
+
|
16
|
+
|
17
|
+
before do
|
18
|
+
Aws.config[:stub_responses] = true
|
19
|
+
Thread.abort_on_exception = true
|
20
|
+
end
|
21
|
+
|
22
|
+
shared_examples_for 'it requires filters' do
|
23
|
+
context 'without filters' do
|
24
|
+
it "raises an error" do
|
25
|
+
expect { subject.register }.to raise_error(StandardError)
|
26
|
+
end
|
27
|
+
end
|
28
|
+
|
29
|
+
context 'with filters' do
|
30
|
+
let (:config) { super().merge('filters' => { 'tag:Monitoring' => 'Yes' })}
|
31
|
+
|
32
|
+
it "registers succesfully" do
|
33
|
+
expect { subject.register }.to_not raise_error
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
shared_examples_for 'it does not require filters' do
|
39
|
+
context 'without filters' do
|
40
|
+
it "registers succesfully" do
|
41
|
+
expect { subject.register }.to_not raise_error
|
42
|
+
end
|
43
|
+
end
|
44
|
+
|
45
|
+
context 'with filters' do
|
46
|
+
let (:config) { super().merge('filters' => { 'tag:Monitoring' => 'Yes' })}
|
47
|
+
|
48
|
+
it "registers succesfully" do
|
49
|
+
expect { subject.register }.to_not raise_error
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
describe 'shutdown' do
|
55
|
+
let(:metrics) { double("metrics") }
|
56
|
+
let(:config) { super().merge('namespace' => 'AWS/EC2') }
|
57
|
+
|
58
|
+
before do
|
59
|
+
allow(subject).to receive(:metrics_for).and_return(metrics)
|
60
|
+
allow(metrics).to receive(:count).and_return(1)
|
61
|
+
allow(metrics).to receive(:each).and_return(['DiskWriteBytes'])
|
62
|
+
end
|
63
|
+
|
64
|
+
it_behaves_like "an interruptible input plugin"
|
65
|
+
end
|
66
|
+
|
67
|
+
describe '#register' do
|
68
|
+
|
69
|
+
context "EC2 namespace" do
|
70
|
+
let(:config) { super().merge('namespace' => 'AWS/EC2') }
|
71
|
+
it_behaves_like 'it does not require filters'
|
72
|
+
end
|
73
|
+
|
74
|
+
context "EBS namespace" do
|
75
|
+
let(:config) { super().merge('namespace' => 'AWS/EBS') }
|
76
|
+
it_behaves_like 'it requires filters'
|
77
|
+
end
|
78
|
+
|
79
|
+
context "RDS namespace" do
|
80
|
+
let(:config) { super().merge('namespace' => 'AWS/RDS') }
|
81
|
+
it_behaves_like 'it requires filters'
|
82
|
+
end
|
83
|
+
|
84
|
+
end
|
85
|
+
end
|