logstash-output-amazon_es 0.1.0 → 0.1.1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: f4be0ad347662212f575bffa969d9615f2adce96
4
- data.tar.gz: 733cf9fc22f94f0e0abe2ffdd20ca3678a9d8efe
3
+ metadata.gz: fa08a9c4871b87099540ea32d0483dc5139a1cf4
4
+ data.tar.gz: 78aec4f345108386564cd42d66dad11c88ce8c6d
5
5
  SHA512:
6
- metadata.gz: e2a50db3166e1c20d5bcb54e173d9d6b65114b97939b00aa0d063d10abc276d2c9156c6489e6349826272ed7a73d176eb48575ffd7a707e1df9186db86a0f550
7
- data.tar.gz: 5ae7de589e20025120db13d9b9988c0ebcae2e985c071d9f2e44402df8aeb0ccc4af255ee406d2e6f74a44060f6b936ee0f6ad97ded6f2069a42a1f82025e3ab
6
+ metadata.gz: 35a4b6abc9a18a7140cbadefaeae8c9b641869621017ba310947573398f762c86b7ef02ace17cb52ec78dbba87985320141022bdb582b610942746ff85873970
7
+ data.tar.gz: 283f9c8fc7b6302a41dc029c5a3815a8c7faa29eb2df17e8a447b56bfc808a250add140d8002cfcbe221219973d5930059d87b26c00bebad70beec10250a8a24
data/README.md CHANGED
@@ -27,8 +27,8 @@ An example configuration:
27
27
  amazon_es {
28
28
  hosts => ["foo.us-east-1.es.amazonaws.com"]
29
29
  region => "us-east-1"
30
- access_key => 'ACCESS_KEY' (Will be made optional in next release to support instance profiles)
31
- secret_key => 'SECRET_KEY'
30
+ aws_access_key_id => 'ACCESS_KEY' (Will be made optional in next release to support instance profiles)
31
+ aws_secret_access_key => 'SECRET_KEY'
32
32
  index => "production-logs-%{+YYYY.MM.dd}"
33
33
  }
34
34
  }
@@ -52,7 +52,7 @@ An example configuration:
52
52
  * max_retries (number, default => 3) - Set max retry for each event
53
53
  * retry_max_items (number, default => 5000) - Set retry queue size for events that failed to send
54
54
  * retry_max_interval (number, default => 5) - Set max interval between bulk retries
55
- * index (string, default => "logstash-%{+YYYY.MM.dd}") - Elasticsearch index to write events into
55
+ * index (string - all lowercase, default => "logstash-%{+YYYY.MM.dd}") - Elasticsearch index to write events into
56
56
  * flush_size (number , default => 500) - This setting controls how many events will be buffered before sending a batch of events in bulk API
57
57
  * idle_flush_time (number, default => 1) - The amount of time in seconds since last flush before a flush is forced.
58
58
  This setting helps ensure slow event rates don't get stuck in Logstash.
@@ -318,7 +318,7 @@ class LogStash::Outputs::AmazonES < LogStash::Outputs::Base
318
318
 
319
319
  bulk_response = @client.bulk(es_actions)
320
320
 
321
- if bulk_response["errors"]
321
+ if bulk_response["errors"] && bulk_response["items"]
322
322
  actions_to_retry = []
323
323
 
324
324
  bulk_response['items'].each_with_index do |item,idx|
@@ -1,7 +1,7 @@
1
1
  Gem::Specification.new do |s|
2
2
 
3
3
  s.name = 'logstash-output-amazon_es'
4
- s.version = '0.1.0'
4
+ s.version = '0.1.1'
5
5
  s.licenses = ['apache-2.0']
6
6
  s.summary = "Logstash Output to Amazon Elasticsearch Service"
7
7
  s.description = "Output events to Amazon Elasticsearch Service with V4 signing"
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-output-amazon_es
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.0
4
+ version: 0.1.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Amazon
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2015-10-01 00:00:00.000000000 Z
11
+ date: 2015-10-14 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: concurrent-ruby