logstash-output-amazon_es 2.0.1-java → 6.4.0-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. checksums.yaml +5 -5
  2. data/CONTRIBUTORS +12 -0
  3. data/Gemfile +8 -0
  4. data/LICENSE +10 -199
  5. data/README.md +34 -65
  6. data/lib/logstash/outputs/amazon_es.rb +218 -423
  7. data/lib/logstash/outputs/amazon_es/common.rb +347 -0
  8. data/lib/logstash/outputs/amazon_es/common_configs.rb +141 -0
  9. data/lib/logstash/outputs/amazon_es/elasticsearch-template-es2x.json +95 -0
  10. data/lib/logstash/outputs/amazon_es/elasticsearch-template-es5x.json +46 -0
  11. data/lib/logstash/outputs/amazon_es/elasticsearch-template-es6x.json +45 -0
  12. data/lib/logstash/outputs/amazon_es/elasticsearch-template-es7x.json +46 -0
  13. data/lib/logstash/outputs/amazon_es/http_client.rb +359 -74
  14. data/lib/logstash/outputs/amazon_es/http_client/manticore_adapter.rb +169 -0
  15. data/lib/logstash/outputs/amazon_es/http_client/pool.rb +457 -0
  16. data/lib/logstash/outputs/amazon_es/http_client_builder.rb +164 -0
  17. data/lib/logstash/outputs/amazon_es/template_manager.rb +36 -0
  18. data/logstash-output-amazon_es.gemspec +13 -22
  19. data/spec/es_spec_helper.rb +37 -0
  20. data/spec/unit/http_client_builder_spec.rb +189 -0
  21. data/spec/unit/outputs/elasticsearch/http_client/manticore_adapter_spec.rb +105 -0
  22. data/spec/unit/outputs/elasticsearch/http_client/pool_spec.rb +198 -0
  23. data/spec/unit/outputs/elasticsearch/http_client_spec.rb +222 -0
  24. data/spec/unit/outputs/elasticsearch/template_manager_spec.rb +25 -0
  25. data/spec/unit/outputs/elasticsearch_spec.rb +615 -0
  26. data/spec/unit/outputs/error_whitelist_spec.rb +60 -0
  27. metadata +49 -110
  28. data/lib/logstash/outputs/amazon_es/aws_transport.rb +0 -109
  29. data/lib/logstash/outputs/amazon_es/aws_v4_signer.rb +0 -7
  30. data/lib/logstash/outputs/amazon_es/aws_v4_signer_impl.rb +0 -62
  31. data/lib/logstash/outputs/amazon_es/elasticsearch-template.json +0 -41
  32. data/spec/amazon_es_spec_helper.rb +0 -69
  33. data/spec/unit/outputs/amazon_es_spec.rb +0 -50
  34. data/spec/unit/outputs/elasticsearch/protocol_spec.rb +0 -36
  35. data/spec/unit/outputs/elasticsearch_proxy_spec.rb +0 -58
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
- SHA1:
3
- metadata.gz: fe28932780893f008536b1c504907e747d1c5505
4
- data.tar.gz: 3af463b9a92973d567b43e079aee1761ff8bc9be
2
+ SHA256:
3
+ metadata.gz: 3c010485bbf51026b39f574b1bf1f532f81dde611519c9048e44f5b3559edaca
4
+ data.tar.gz: eb34d6b24ff01e2856012ce75dc2fd92f641f8e926c191f447461dcdf97114a9
5
5
  SHA512:
6
- metadata.gz: fe91a7d8491a64030589e5767bdf23f7cc19098688d41626cc46aa7b56451ff7109eb2acc40a41587ff1ddb2dccce16e96e592012d911160b86967340e011bce
7
- data.tar.gz: 9bd5f852a9e5e09770b3159b751f8e7ad66e3cc6b8b52030e0a082463b903af6559de106522ef568e3edbabb70309968187483024225f8ef010c72974cd6bf62
6
+ metadata.gz: 79f7ca9c8188d8a9ffa6a68e0e882d005fc8f47137734a1d741c2df0fad92b830ae6415cbde3b4cc0c91989ff7e5e2f8d8694914f086c5e3f4adaa0e520054e7
7
+ data.tar.gz: 83f261d5982333ea68bfd161f8eed25388b98f8e1f03f6512eacabb6b4782987f374c4342bb3715efbfad01f414ef518b7dc9cc0be438a96cfbea6d855fc5336
@@ -0,0 +1,12 @@
1
+ The following is a list of people who have contributed ideas, code, bug
2
+ reports, or in general have helped logstash along its way.
3
+
4
+ Contributors:
5
+ * Frank Xu (qinyaox)
6
+ * Qingyu Zhou (zhoqingy)
7
+ * Ankit Malpani(malpani)
8
+
9
+ Note: If you've sent us patches, bug reports, or otherwise contributed to
10
+ Logstash, and you aren't on the list above and want to be, please let us know
11
+ and we'll make sure you're here. Contributions from folks like you are what make
12
+ open source awesome.
data/Gemfile CHANGED
@@ -1,3 +1,11 @@
1
1
  source 'https://rubygems.org'
2
2
 
3
3
  gemspec
4
+
5
+ logstash_path = ENV["LOGSTASH_PATH"] || "../../logstash"
6
+ use_logstash_source = ENV["LOGSTASH_SOURCE"] && ENV["LOGSTASH_SOURCE"].to_s == "1"
7
+
8
+ if Dir.exist?(logstash_path) && use_logstash_source
9
+ gem 'logstash-core', :path => "#{logstash_path}/logstash-core"
10
+ gem 'logstash-core-plugin-api', :path => "#{logstash_path}/logstash-core-plugin-api"
11
+ end
data/LICENSE CHANGED
@@ -1,202 +1,13 @@
1
- Apache License
2
- Version 2.0, January 2004
3
- http://www.apache.org/licenses/
1
+ Copyright (c) 2012-2018 Elasticsearch <http://www.elastic.co>
4
2
 
5
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
6
 
7
- 1. Definitions.
8
-
9
- "License" shall mean the terms and conditions for use, reproduction,
10
- and distribution as defined by Sections 1 through 9 of this document.
11
-
12
- "Licensor" shall mean the copyright owner or entity authorized by
13
- the copyright owner that is granting the License.
14
-
15
- "Legal Entity" shall mean the union of the acting entity and all
16
- other entities that control, are controlled by, or are under common
17
- control with that entity. For the purposes of this definition,
18
- "control" means (i) the power, direct or indirect, to cause the
19
- direction or management of such entity, whether by contract or
20
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
- outstanding shares, or (iii) beneficial ownership of such entity.
22
-
23
- "You" (or "Your") shall mean an individual or Legal Entity
24
- exercising permissions granted by this License.
25
-
26
- "Source" form shall mean the preferred form for making modifications,
27
- including but not limited to software source code, documentation
28
- source, and configuration files.
29
-
30
- "Object" form shall mean any form resulting from mechanical
31
- transformation or translation of a Source form, including but
32
- not limited to compiled object code, generated documentation,
33
- and conversions to other media types.
34
-
35
- "Work" shall mean the work of authorship, whether in Source or
36
- Object form, made available under the License, as indicated by a
37
- copyright notice that is included in or attached to the work
38
- (an example is provided in the Appendix below).
39
-
40
- "Derivative Works" shall mean any work, whether in Source or Object
41
- form, that is based on (or derived from) the Work and for which the
42
- editorial revisions, annotations, elaborations, or other modifications
43
- represent, as a whole, an original work of authorship. For the purposes
44
- of this License, Derivative Works shall not include works that remain
45
- separable from, or merely link (or bind by name) to the interfaces of,
46
- the Work and Derivative Works thereof.
47
-
48
- "Contribution" shall mean any work of authorship, including
49
- the original version of the Work and any modifications or additions
50
- to that Work or Derivative Works thereof, that is intentionally
51
- submitted to Licensor for inclusion in the Work by the copyright owner
52
- or by an individual or Legal Entity authorized to submit on behalf of
53
- the copyright owner. For the purposes of this definition, "submitted"
54
- means any form of electronic, verbal, or written communication sent
55
- to the Licensor or its representatives, including but not limited to
56
- communication on electronic mailing lists, source code control systems,
57
- and issue tracking systems that are managed by, or on behalf of, the
58
- Licensor for the purpose of discussing and improving the Work, but
59
- excluding communication that is conspicuously marked or otherwise
60
- designated in writing by the copyright owner as "Not a Contribution."
61
-
62
- "Contributor" shall mean Licensor and any individual or Legal Entity
63
- on behalf of whom a Contribution has been received by Licensor and
64
- subsequently incorporated within the Work.
65
-
66
- 2. Grant of Copyright License. Subject to the terms and conditions of
67
- this License, each Contributor hereby grants to You a perpetual,
68
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
- copyright license to reproduce, prepare Derivative Works of,
70
- publicly display, publicly perform, sublicense, and distribute the
71
- Work and such Derivative Works in Source or Object form.
72
-
73
- 3. Grant of Patent License. Subject to the terms and conditions of
74
- this License, each Contributor hereby grants to You a perpetual,
75
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
- (except as stated in this section) patent license to make, have made,
77
- use, offer to sell, sell, import, and otherwise transfer the Work,
78
- where such license applies only to those patent claims licensable
79
- by such Contributor that are necessarily infringed by their
80
- Contribution(s) alone or by combination of their Contribution(s)
81
- with the Work to which such Contribution(s) was submitted. If You
82
- institute patent litigation against any entity (including a
83
- cross-claim or counterclaim in a lawsuit) alleging that the Work
84
- or a Contribution incorporated within the Work constitutes direct
85
- or contributory patent infringement, then any patent licenses
86
- granted to You under this License for that Work shall terminate
87
- as of the date such litigation is filed.
88
-
89
- 4. Redistribution. You may reproduce and distribute copies of the
90
- Work or Derivative Works thereof in any medium, with or without
91
- modifications, and in Source or Object form, provided that You
92
- meet the following conditions:
93
-
94
- (a) You must give any other recipients of the Work or
95
- Derivative Works a copy of this License; and
96
-
97
- (b) You must cause any modified files to carry prominent notices
98
- stating that You changed the files; and
99
-
100
- (c) You must retain, in the Source form of any Derivative Works
101
- that You distribute, all copyright, patent, trademark, and
102
- attribution notices from the Source form of the Work,
103
- excluding those notices that do not pertain to any part of
104
- the Derivative Works; and
105
-
106
- (d) If the Work includes a "NOTICE" text file as part of its
107
- distribution, then any Derivative Works that You distribute must
108
- include a readable copy of the attribution notices contained
109
- within such NOTICE file, excluding those notices that do not
110
- pertain to any part of the Derivative Works, in at least one
111
- of the following places: within a NOTICE text file distributed
112
- as part of the Derivative Works; within the Source form or
113
- documentation, if provided along with the Derivative Works; or,
114
- within a display generated by the Derivative Works, if and
115
- wherever such third-party notices normally appear. The contents
116
- of the NOTICE file are for informational purposes only and
117
- do not modify the License. You may add Your own attribution
118
- notices within Derivative Works that You distribute, alongside
119
- or as an addendum to the NOTICE text from the Work, provided
120
- that such additional attribution notices cannot be construed
121
- as modifying the License.
122
-
123
- You may add Your own copyright statement to Your modifications and
124
- may provide additional or different license terms and conditions
125
- for use, reproduction, or distribution of Your modifications, or
126
- for any such Derivative Works as a whole, provided Your use,
127
- reproduction, and distribution of the Work otherwise complies with
128
- the conditions stated in this License.
129
-
130
- 5. Submission of Contributions. Unless You explicitly state otherwise,
131
- any Contribution intentionally submitted for inclusion in the Work
132
- by You to the Licensor shall be under the terms and conditions of
133
- this License, without any additional terms or conditions.
134
- Notwithstanding the above, nothing herein shall supersede or modify
135
- the terms of any separate license agreement you may have executed
136
- with Licensor regarding such Contributions.
137
-
138
- 6. Trademarks. This License does not grant permission to use the trade
139
- names, trademarks, service marks, or product names of the Licensor,
140
- except as required for reasonable and customary use in describing the
141
- origin of the Work and reproducing the content of the NOTICE file.
142
-
143
- 7. Disclaimer of Warranty. Unless required by applicable law or
144
- agreed to in writing, Licensor provides the Work (and each
145
- Contributor provides its Contributions) on an "AS IS" BASIS,
146
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
- implied, including, without limitation, any warranties or conditions
148
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
- PARTICULAR PURPOSE. You are solely responsible for determining the
150
- appropriateness of using or redistributing the Work and assume any
151
- risks associated with Your exercise of permissions under this License.
152
-
153
- 8. Limitation of Liability. In no event and under no legal theory,
154
- whether in tort (including negligence), contract, or otherwise,
155
- unless required by applicable law (such as deliberate and grossly
156
- negligent acts) or agreed to in writing, shall any Contributor be
157
- liable to You for damages, including any direct, indirect, special,
158
- incidental, or consequential damages of any character arising as a
159
- result of this License or out of the use or inability to use the
160
- Work (including but not limited to damages for loss of goodwill,
161
- work stoppage, computer failure or malfunction, or any and all
162
- other commercial damages or losses), even if such Contributor
163
- has been advised of the possibility of such damages.
164
-
165
- 9. Accepting Warranty or Additional Liability. While redistributing
166
- the Work or Derivative Works thereof, You may choose to offer,
167
- and charge a fee for, acceptance of support, warranty, indemnity,
168
- or other liability obligations and/or rights consistent with this
169
- License. However, in accepting such obligations, You may act only
170
- on Your own behalf and on Your sole responsibility, not on behalf
171
- of any other Contributor, and only if You agree to indemnify,
172
- defend, and hold each Contributor harmless for any liability
173
- incurred by, or claims asserted against, such Contributor by reason
174
- of your accepting any such warranty or additional liability.
175
-
176
- END OF TERMS AND CONDITIONS
177
-
178
- APPENDIX: How to apply the Apache License to your work.
179
-
180
- To apply the Apache License to your work, attach the following
181
- boilerplate notice, with the fields enclosed by brackets "{}"
182
- replaced with your own identifying information. (Don't include
183
- the brackets!) The text should be enclosed in the appropriate
184
- comment syntax for the file format. We also recommend that a
185
- file or class name and description of purpose be included on the
186
- same "printed page" as the copyright notice for easier
187
- identification within third-party archives.
188
-
189
- Copyright {yyyy} {name of copyright owner}
190
-
191
- Licensed under the Apache License, Version 2.0 (the "License");
192
- you may not use this file except in compliance with the License.
193
- You may obtain a copy of the License at
194
-
195
- http://www.apache.org/licenses/LICENSE-2.0
196
-
197
- Unless required by applicable law or agreed to in writing, software
198
- distributed under the License is distributed on an "AS IS" BASIS,
199
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
- See the License for the specific language governing permissions and
201
- limitations under the License.
7
+ http://www.apache.org/licenses/LICENSE-2.0
202
8
 
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
data/README.md CHANGED
@@ -4,21 +4,6 @@ This is a plugin for [Logstash](https://github.com/elastic/logstash).
4
4
 
5
5
  It is fully free and fully open source. The license is Apache 2.0, meaning you are pretty much free to use it however you want in whatever way.
6
6
 
7
- # Setting Up
8
-
9
- ## Installation
10
- One command installation
11
- `bin/plugin install logstash-output-amazon_es`
12
-
13
- While we are in the process of getting this plugin fully integrated within logstash to make installation simpler,
14
- if above does not work, or you would like to patch code here is a workaround to install this plugin within your logstash:
15
-
16
- 1. Check out/clone this code from github
17
- 2. Build plugin using - `gem build logstash-output-amazon_es.gemspec` ( this works with jruby and rubygem versions > 1.9)
18
- 3. Install plugin using `<logstash-home>/bin/plugin install logstash-output-amazon_es-0.2.0-java.gem` (or the non java variant)
19
- 4. For 2.3 support, please use '<logstash-home>/bin/logstash-plugin install logstash-output-amazon_es-1.0-java.gem'
20
- 5. For 5.2 support, please use '<logstash-home>/bin/logstash-plugin install logstash-output-amazon_es-2.0.0-java.gem'
21
-
22
7
  ## Configuration for Amazon Elasticsearch Output plugin
23
8
 
24
9
  To run the Logstash output Amazon Elasticsearch plugin simply add a configuration following the below documentation.
@@ -29,12 +14,12 @@ An example configuration:
29
14
  amazon_es {
30
15
  hosts => ["foo.us-east-1.es.amazonaws.com"]
31
16
  region => "us-east-1"
32
- # aws_access_key_id, aws_secret_access_key optional if instance profile is configured
33
- aws_access_key_id => 'ACCESS_KEY'
34
- aws_secret_access_key => 'SECRET_KEY'
35
- index => "production-logs-%{+YYYY.MM.dd}"
36
- }
37
- }
17
+ # aws_access_key_id, aws_secret_access_key optional if instance profile is configured
18
+ aws_access_key_id => 'ACCESS_KEY'
19
+ aws_secret_access_key => 'SECRET_KEY'
20
+ index => "production-logs-%{+YYYY.MM.dd}"
21
+ }
22
+ }
38
23
 
39
24
  * Required Parameters
40
25
  * hosts (array of string) - Amazon Elasticsearch domain endpoint. eg ["foo.us-east-1.es.amazonaws.com"]
@@ -42,27 +27,15 @@ An example configuration:
42
27
 
43
28
  * Optional Parameters
44
29
  * Credential parameters
45
- * aws_access_key_id, :validate => :string - Optional AWS Access key
30
+ * aws_access_key_id, :validate => :string - Optional AWS Access key
46
31
  * aws_secret_access_key, :validate => :string - Optional AWS Secret Key
47
- The credential resolution logic can be described as follows:
48
- - User passed aws_access_key_id and aws_secret_access_key in aes configuration
49
- - Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
32
+ The credential resolution logic can be described as follows:
33
+ - User passed aws_access_key_id and aws_secret_access_key in aes configuration
34
+ - Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
50
35
  (RECOMMENDED since they are recognized by all the AWS SDKs and CLI except for .NET),
51
36
  or AWS_ACCESS_KEY and AWS_SECRET_KEY (only recognized by Java SDK)
52
- - Credential profiles file at the default location (~/.aws/credentials) shared by all AWS SDKs and the AWS CLI
53
- - Instance profile credentials delivered through the Amazon EC2 metadata service
54
- * Retry Parameters
55
- * max_retries (number, default => 3) - Set max retry for each event
56
- * retry_max_items (number, default => 5000) - Set retry queue size for events that failed to send
57
- * retry_max_interval (number, default => 5) - Set max interval between bulk retries
58
- * index (string - all lowercase, default => "logstash-%{+YYYY.MM.dd}") - Elasticsearch index to write events into
59
- * flush_size (number , default => 500) - This setting controls how many events will be buffered before sending a batch of events in bulk API
60
- * idle_flush_time (number, default => 1) - The amount of time in seconds since last flush before a flush is forced.
61
- This setting helps ensure slow event rates don't get stuck in Logstash.
62
- For example, if your `flush_size` is 100, and you have received 10 events,
63
- and it has been more than `idle_flush_time` seconds since the last flush,
64
- Logstash will flush those 10 events automatically.
65
- This helps keep both fast and slow log streams moving along in near-real-time.
37
+ - Credential profiles file at the default location (~/.aws/credentials) shared by all AWS SDKs and the AWS CLI
38
+ - Instance profile credentials delivered through the Amazon EC2 metadata service
66
39
  * template (path) - You can set the path to your own template here, if you so desire. If not set, the included template will be used.
67
40
  * template_name (string, default => "logstash") - defines how the template is named inside Elasticsearch
68
41
  * port (string, default 443) - Amazon Elasticsearch Service listens on port 443 - https (default) and 80 - http. Tweak this for custom proxy.
@@ -81,7 +54,7 @@ Need help? Try #logstash on freenode IRC or the https://discuss.elastic.co/c/log
81
54
 
82
55
  ## Developing
83
56
 
84
- ### 1. Plugin Development and Testing
57
+ ### 1. Plugin Developement and Testing
85
58
 
86
59
  #### Code
87
60
  - To get started, you'll need JRuby with the Bundler gem installed.
@@ -107,17 +80,6 @@ bundle install
107
80
  bundle exec rspec
108
81
  ```
109
82
 
110
- - Run integration tests
111
-
112
- Dependencies: [Docker](http://docker.com)
113
-
114
- Before the test suite is run, we will load and run an
115
- Elasticsearch instance within a docker container. This container
116
- will be cleaned up when suite has finished.
117
-
118
- ```sh
119
- bundle exec rspec --tag integration
120
- ```
121
83
 
122
84
  ### 2. Running your unpublished Plugin in Logstash
123
85
 
@@ -129,7 +91,12 @@ gem "logstash-filter-awesome", :path => "/your/local/logstash-filter-awesome"
129
91
  ```
130
92
  - Install plugin
131
93
  ```sh
94
+ # Logstash 2.3 and higher
95
+ bin/logstash-plugin install --no-verify
96
+
97
+ # Prior to Logstash 2.3
132
98
  bin/plugin install --no-verify
99
+
133
100
  ```
134
101
  - Run Logstash with your plugin
135
102
  ```sh
@@ -138,6 +105,15 @@ bin/logstash -e 'filter {awesome {}}'
138
105
  At this point any modifications to the plugin code will be applied to this local Logstash setup. After modifying the plugin, simply rerun Logstash.
139
106
 
140
107
  #### 2.2 Run in an installed Logstash
108
+ Before build your gemfile, please make sure use JRuby. Here is how you can know your local ruby version:
109
+ ```sh
110
+ rvm list
111
+ ```
112
+ Please make sure you current using jruby. Here is how you can change to jruby
113
+ ```sh
114
+ rvm jruby
115
+ ```
116
+
141
117
 
142
118
  You can use the same **2.1** method to run your plugin in an installed Logstash by editing its `Gemfile` and pointing the `:path` to your local plugin development directory or you can build the gem and install it using:
143
119
 
@@ -147,7 +123,12 @@ gem build logstash-filter-awesome.gemspec
147
123
  ```
148
124
  - Install the plugin from the Logstash home
149
125
  ```sh
150
- bin/plugin install /your/local/plugin/logstash-filter-awesome.gem
126
+ # Logstash 2.3 and higher
127
+ bin/logstash-plugin install --no-verify
128
+
129
+ # Prior to Logstash 2.3
130
+ bin/plugin install --no-verify
131
+
151
132
  ```
152
133
  - Start Logstash and proceed to test the plugin
153
134
 
@@ -155,19 +136,7 @@ bin/plugin install /your/local/plugin/logstash-filter-awesome.gem
155
136
 
156
137
  All contributions are welcome: ideas, patches, documentation, bug reports, complaints, and even something you drew up on a napkin.
157
138
 
158
- Programming is not a required skill. Whatever you've seen about open source and maintainers or community members saying "send patches or die" - you will not see that here.
139
+ Programming is not a required skill. Whatever you've seen about open source and maintainers or community members saying "send patches or die" - you will not see that here.
159
140
 
160
141
  It is more important to the community that you are able to contribute.
161
142
 
162
- For more information about contributing, see the [CONTRIBUTING](https://github.com/elastic/logstash/blob/master/CONTRIBUTING.md) file.
163
-
164
- ## Building the Logstash output plugin with Docker
165
-
166
- **Prerequisites:**
167
-
168
- - [Docker Engine](https://www.docker.com/products/docker-engine) >= 1.9.1
169
- - [Docker Compose](https://docs.docker.com/compose/) >= 1.6.0
170
-
171
- docker-compose up
172
-
173
- This will result in a newly created binary inside the host-mounted volume `${PWD}` named `logstash-output-amazon_es-<VERSION>-java.gem`. Where `<VERSION>` is defined as value of `s.version` in [logstash-output-amazon_es.gemspec](logstash-output-amazon_es.gemspec) file.
@@ -2,117 +2,130 @@
2
2
  require "logstash/namespace"
3
3
  require "logstash/environment"
4
4
  require "logstash/outputs/base"
5
- require "logstash/outputs/amazon_es/http_client"
6
5
  require "logstash/json"
7
6
  require "concurrent"
8
7
  require "stud/buffer"
9
- require "socket"
10
- require "thread"
11
- require "uri"
12
-
13
- # This output plugin emits data to Amazon Elasticsearch with support for signing requests using AWS V4 Signatures
8
+ require "socket" # for Socket.gethostname
9
+ require "thread" # for safe queueing
10
+ require "uri" # for escaping user input
11
+ require "forwardable"
12
+
13
+ # .Compatibility Note
14
+ # [NOTE]
15
+ # ================================================================================
16
+ # Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting]
17
+ # called `http.content_type.required`. If this option is set to `true`, and you
18
+ # are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output
19
+ # plugin to version 6.2.5 or higher.
20
+ #
21
+ # ================================================================================
22
+ #
23
+ # This plugin is the recommended method of storing logs in Elasticsearch.
24
+ # If you plan on using the Kibana web interface, you'll want to use this output.
25
+ #
26
+ # This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0.
27
+ # We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower,
28
+ # yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having
29
+ # to upgrade Logstash in lock-step.
30
+ #
31
+ # You can learn more about Elasticsearch at <https://www.elastic.co/products/elasticsearch>
32
+ #
33
+ # ==== Template management for Elasticsearch 5.x
34
+ # Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0.
35
+ # Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default
36
+ # behavior.
14
37
  #
38
+ # ** Users installing ES 5.x and LS 5.x **
39
+ # This change will not affect you and you will continue to use the ES defaults.
15
40
  #
16
- # The configuration and experience is similar to logstash-output-elasticsearch plugin and we have added Signature V4 support for the same
17
- # Some of the default configurations like connection timeouts have been tuned for optimal performance with Amazon Elasticsearch
41
+ # ** Users upgrading from LS 2.x to LS 5.x with ES 5.x **
42
+ # LS will not force upgrade the template, if `logstash` template already exists. This means you will still use
43
+ # `.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after
44
+ # the new template is installed.
18
45
  #
19
46
  # ==== Retry Policy
20
47
  #
21
- # This plugin uses the same retry policy as logstash-output-elasticsearch, It uses bulk API to optimize its
22
- # imports into Elasticsearch.. These requests may experience either partial or total failures.
23
- # Events are retried if they fail due to either a network error or the status codes
24
- # 429 (the server is busy), 409 (Version Conflict), or 503 (temporary overloading/maintenance).
48
+ # The retry policy has changed significantly in the 2.2.0 release.
49
+ # This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience
50
+ # either partial or total failures.
25
51
  #
26
- # The retry policy's logic can be described as follows:
52
+ # The following errors are retried infinitely:
27
53
  #
28
- # - Block and retry all events in the bulk response that experience transient network exceptions until
29
- # a successful submission is received by Elasticsearch.
30
- # - Retry the subset of sent events which resulted in ES errors of a retryable nature.
31
- # - Events which returned retryable error codes will be pushed onto a separate queue for
32
- # retrying events. Events in this queue will be retried a maximum of 5 times by default (configurable through :max_retries).
33
- # The size of this queue is capped by the value set in :retry_max_items.
34
- # - Events from the retry queue are submitted again when the queue reaches its max size or when
35
- # the max interval time is reached. The max interval time is configurable via :retry_max_interval.
36
- # - Events which are not retryable or have reached their max retry count are logged to stderr.
37
-
38
- class LogStash::Outputs::AmazonES < LogStash::Outputs::Base
39
- attr_reader :client
40
-
41
- include Stud::Buffer
42
- RETRYABLE_CODES = [409, 429, 503]
43
- SUCCESS_CODES = [200, 201]
44
-
45
- config_name "amazon_es"
46
-
47
- # The index to write events to. This can be dynamic using the `%{foo}` syntax.
48
- # The default value will partition your indices by day so you can more easily
49
- # delete old data or only search specific date ranges.
50
- # Indexes may not contain uppercase characters.
51
- # For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}
52
- config :index, :validate => :string, :default => "logstash-%{+YYYY.MM.dd}"
53
-
54
- # The index type to write events to. Generally you should try to write only
55
- # similar events to the same 'type'. String expansion `%{foo}` works here.
56
- #
57
- # Deprecated in favor of `document_type` field.
58
- config :index_type, :validate => :string, :deprecated => "Please use the 'document_type' setting instead. It has the same effect, but is more appropriately named."
59
-
60
- # The document type to write events to. Generally you should try to write only
61
- # similar events to the same 'type'. String expansion `%{foo}` works here.
62
- # Unless you set 'document_type', the event 'type' will be used if it exists
63
- # otherwise the document type will be assigned the value of 'logs'
64
- config :document_type, :validate => :string
65
-
66
- # Starting in Logstash 1.3 (unless you set option `manage_template` to false)
67
- # a default mapping template for Elasticsearch will be applied, if you do not
68
- # already have one set to match the index pattern defined (default of
69
- # `logstash-%{+YYYY.MM.dd}`), minus any variables. For example, in this case
70
- # the template will be applied to all indices starting with `logstash-*`
71
- #
72
- # If you have dynamic templating (e.g. creating indices based on field names)
73
- # then you should set `manage_template` to false and use the REST API to upload
74
- # your templates manually.
75
- config :manage_template, :validate => :boolean, :default => true
76
-
77
- # This configuration option defines how the template is named inside Elasticsearch.
78
- # Note that if you have used the template management features and subsequently
79
- # change this, you will need to prune the old template manually, e.g.
80
- #
81
- # `curl -XDELETE <http://localhost:9200/_template/OldTemplateName?pretty>`
82
- #
83
- # where `OldTemplateName` is whatever the former setting was.
84
- config :template_name, :validate => :string, :default => "logstash"
54
+ # - Network errors (inability to connect)
55
+ # - 429 (Too many requests) and
56
+ # - 503 (Service unavailable) errors
57
+ #
58
+ # NOTE: 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions.
59
+ # It is more performant for Elasticsearch to retry these exceptions than this plugin.
60
+ #
61
+ # ==== Batch Sizes ====
62
+ # This plugin attempts to send batches of events as a single request. However, if
63
+ # a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request.
64
+ #
65
+ # ==== DNS Caching
66
+ #
67
+ # This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl],
68
+ # a global setting for the JVM.
69
+ #
70
+ # As an example, to set your DNS TTL to 1 second you would set
71
+ # the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`.
72
+ #
73
+ # Keep in mind that a connection with keepalive enabled will
74
+ # not reevaluate its DNS value while the keepalive is in effect.
75
+ #
76
+ # ==== HTTP Compression
77
+ #
78
+ # This plugin supports request and response compression. Response compression is enabled by default and
79
+ # for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for
80
+ # it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in
81
+ # Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin
82
+ #
83
+ # For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression`
84
+ # setting in their Logstash config file.
85
+ #
86
+ class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
87
+ declare_threadsafe!
85
88
 
86
- # You can set the path to your own template here, if you so desire.
87
- # If not set, the included template will be used.
88
- config :template, :validate => :path
89
+ require "logstash/outputs/amazon_es/http_client"
90
+ require "logstash/outputs/amazon_es/http_client_builder"
91
+ require "logstash/outputs/amazon_es/common_configs"
92
+ require "logstash/outputs/amazon_es/common"
89
93
 
90
- # Overwrite the current template with whatever is configured
91
- # in the `template` and `template_name` directives.
92
- config :template_overwrite, :validate => :boolean, :default => false
94
+ # Protocol agnostic (i.e. non-http, non-java specific) configs go here
95
+ include(LogStash::Outputs::ElasticSearch::CommonConfigs)
93
96
 
94
- # The document ID for the index. Useful for overwriting existing entries in
95
- # Elasticsearch with the same ID.
96
- config :document_id, :validate => :string
97
+ # Protocol agnostic methods
98
+ include(LogStash::Outputs::ElasticSearch::Common)
97
99
 
98
- # A routing override to be applied to all processed events.
99
- # This can be dynamic using the `%{foo}` syntax.
100
- config :routing, :validate => :string
100
+ config_name "amazon_es"
101
101
 
102
+ # The Elasticsearch action to perform. Valid actions are:
103
+ #
104
+ # - index: indexes a document (an event from Logstash).
105
+ # - delete: deletes a document by id (An id is required for this action)
106
+ # - create: indexes a document, fails if a document by that id already exists in the index.
107
+ # - update: updates a document by id. Update has a special case where you can upsert -- update a
108
+ # document if not already present. See the `upsert` option. NOTE: This does not work and is not supported
109
+ # in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash!
110
+ # - A sprintf style string to change the action based on the content of the event. The value `%{[foo]}`
111
+ # would use the foo field for the action
112
+ #
113
+ # For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation]
114
+ config :action, :validate => :string, :default => "index"
102
115
 
103
- # Set the endpoint of your Amazon Elasticsearch domain. This will always be array of size 1
104
- # ["foo.us-east-1.es.amazonaws.com"]
105
- config :hosts, :validate => :array
116
+ # Username to authenticate to a secure Elasticsearch cluster
117
+ config :user, :validate => :string
118
+ # Password to authenticate to a secure Elasticsearch cluster
119
+ config :password, :validate => :password
106
120
 
107
121
  # You can set the remote port as part of the host, or explicitly here as well
108
- config :port, :validate => :string, :default => 443
122
+ config :port, :validate => :number, :default => 443
109
123
 
110
124
  # Sets the protocol thats used to connect to elastisearch
111
125
  config :protocol, :validate => :string, :default => "https"
112
126
 
113
127
  #Signing specific details
114
128
  config :region, :validate => :string, :default => "us-east-1"
115
-
116
129
  # Credential resolution logic works as follows:
117
130
  #
118
131
  # - User passed aws_access_key_id and aws_secret_access_key in aes configuration
@@ -124,344 +137,126 @@ class LogStash::Outputs::AmazonES < LogStash::Outputs::Base
124
137
  config :aws_access_key_id, :validate => :string
125
138
  config :aws_secret_access_key, :validate => :string
126
139
 
127
-
128
- # This plugin uses the bulk index api for improved indexing performance.
129
- # To make efficient bulk api calls, we will buffer a certain number of
130
- # events before flushing that out to Elasticsearch. This setting
131
- # controls how many events will be buffered before sending a batch
132
- # of events.
133
- config :flush_size, :validate => :number, :default => 500
134
-
135
- # The amount of time since last flush before a flush is forced.
136
- #
137
- # This setting helps ensure slow event rates don't get stuck in Logstash.
138
- # For example, if your `flush_size` is 100, and you have received 10 events,
139
- # and it has been more than `idle_flush_time` seconds since the last flush,
140
- # Logstash will flush those 10 events automatically.
141
- #
142
- # This helps keep both fast and slow log streams moving along in
143
- # near-real-time.
144
- config :idle_flush_time, :validate => :number, :default => 1
145
-
146
- # The Elasticsearch action to perform. Valid actions are: `index`, `delete`.
147
- #
148
- # Use of this setting *REQUIRES* you also configure the `document_id` setting
149
- # because `delete` actions all require a document id.
150
- #
151
- # What does each action do?
152
- #
153
- # - index: indexes a document (an event from Logstash).
154
- # - delete: deletes a document by id
155
- # - create: indexes a document, fails if a document by that id already exists in the index.
156
- # - update: updates a document by id
157
- # following action is not supported by HTTP protocol
158
- #
159
- # For more details on actions, check out the http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation]
160
- config :action, :validate => %w(index delete create update), :default => "index"
161
-
162
- # Username and password (only valid when protocol is HTTP; this setting works with HTTP or HTTPS auth)
163
- config :user, :validate => :string
164
- config :password, :validate => :password
165
-
166
- # HTTP Path at which the Elasticsearch server lives. Use this if you must run ES behind a proxy that remaps
140
+ # HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps
167
141
  # the root path for the Elasticsearch HTTP API lives.
168
- config :path, :validate => :string, :default => "/"
169
-
170
- # Set max retry for each event
171
- config :max_retries, :validate => :number, :default => 3
172
-
173
- # Set retry policy for events that failed to send
174
- config :retry_max_items, :validate => :number, :default => 5000
175
-
176
- # Set max interval between bulk retries
177
- config :retry_max_interval, :validate => :number, :default => 5
178
-
179
- # Set the address of a forward HTTP proxy. Must be used with the 'http' protocol
180
- # Can be either a string, such as 'http://localhost:123' or a hash in the form
181
- # {host: 'proxy.org' port: 80 scheme: 'http'}
182
- # Note, this is NOT a SOCKS proxy, but a plain HTTP proxy
183
- config :proxy
184
-
185
- # Enable doc_as_upsert for update mode
186
- # create a new document with source if document_id doesn't exists
187
- config :doc_as_upsert, :validate => :boolean, :default => false
188
-
189
- # Set upsert content for update mode
190
- # create a new document with this parameter as json string if document_id doesn't exists
191
- config :upsert, :validate => :string, :default => ""
192
-
193
- public
194
- def register
195
- @hosts = Array(@hosts)
196
- # retry-specific variables
197
- @retry_flush_mutex = Mutex.new
198
- @retry_teardown_requested = Concurrent::AtomicBoolean.new(false)
199
- # needs flushing when interval
200
- @retry_queue_needs_flushing = ConditionVariable.new
201
- @retry_queue_not_full = ConditionVariable.new
202
- @retry_queue = Queue.new
203
- @submit_mutex = Mutex.new
204
-
205
- client_settings = {}
206
- common_options = {
207
- :client_settings => client_settings
208
- }
209
-
210
- client_settings[:path] = "/#{@path}/".gsub(/\/+/, "/") # Normalize slashes
211
- @logger.debug? && @logger.debug("Normalizing http path", :path => @path, :normalized => client_settings[:path])
212
-
213
- if @hosts.nil? || @hosts.empty?
214
- @logger.info("No 'host' set in elasticsearch output. Defaulting to localhost")
215
- @hosts = ["localhost"]
216
- end
217
-
218
- client_settings.merge! setup_proxy()
219
- common_options.merge! setup_basic_auth()
220
-
221
- # Update API setup
222
- update_options = {
223
- :upsert => @upsert,
224
- :doc_as_upsert => @doc_as_upsert
225
- }
226
- common_options.merge! update_options if @action == 'update'
227
-
228
- @client = LogStash::Outputs::AES::HttpClient.new(
229
- common_options.merge(:hosts => @hosts, :port => @port, :region => @region, :aws_access_key_id => @aws_access_key_id, :aws_secret_access_key => @aws_secret_access_key,:protocol => @protocol)
230
- )
231
-
232
- if @manage_template
233
- begin
234
- @logger.info("Automatic template management enabled", :manage_template => @manage_template.to_s)
235
- @client.template_install(@template_name, get_template, @template_overwrite)
236
- rescue => e
237
- @logger.error("Failed to install template: #{e.message}")
238
- end
239
- end
240
-
241
- @logger.info("New Elasticsearch output", :hosts => @hosts, :port => @port)
242
-
243
- @client_idx = 0
244
-
245
- buffer_initialize(
246
- :max_items => @flush_size,
247
- :max_interval => @idle_flush_time,
248
- :logger => @logger
249
- )
250
-
251
- @retry_timer_thread = Thread.new do
252
- loop do
253
- sleep(@retry_max_interval)
254
- @retry_flush_mutex.synchronize { @retry_queue_needs_flushing.signal }
255
- end
256
- end
257
-
258
- @retry_thread = Thread.new do
259
- while @retry_teardown_requested.false?
260
- @retry_flush_mutex.synchronize { @retry_queue_needs_flushing.wait(@retry_flush_mutex) }
261
- retry_flush
262
- end
263
- end
264
- end # def register
265
-
266
- public
267
- def get_template
268
- if @template.nil?
269
- @template = ::File.expand_path('amazon_es/elasticsearch-template.json', ::File.dirname(__FILE__))
270
- if !File.exists?(@template)
271
- raise "You must specify 'template => ...' in your elasticsearch output (I looked for '#{@template}')"
272
- end
273
- end
274
- template_json = IO.read(@template).gsub(/\n/,'')
275
- template = LogStash::Json.load(template_json)
276
- @logger.info("Using mapping template", :template => template)
277
- return template
278
- end # def get_template
279
-
280
- public
281
- def receive(event)
282
- return unless output?(event)
283
-
284
- # block until we have not maxed out our
285
- # retry queue. This is applying back-pressure
286
- # to slow down the receive-rate
287
- @retry_flush_mutex.synchronize {
288
- @retry_queue_not_full.wait(@retry_flush_mutex) while @retry_queue.size > @retry_max_items
289
- }
290
-
291
- event.set('[@metadata][retry_count]', 0)
292
-
293
- # Set the 'type' value for the index.
294
- type = if @document_type
295
- event.sprintf(@document_type)
296
- elsif @index_type # deprecated
297
- event.sprintf(@index_type)
298
- else
299
- event.get('type') || 'logs'
300
- end
301
-
302
- params = {
303
- :_id => @document_id ? event.sprintf(@document_id) : nil,
304
- :_index => event.sprintf(@index),
305
- :_type => type,
306
- :_routing => @routing ? event.sprintf(@routing) : nil
307
- }
308
-
309
- params[:_upsert] = LogStash::Json.load(event.sprintf(@upsert)) if @action == 'update' && @upsert != ""
310
-
311
- buffer_receive([event.sprintf(@action), params, event])
312
- end # def receive
313
-
314
- public
315
- # The submit method can be called from both the
316
- # Stud::Buffer flush thread and from our own retry thread.
317
- def submit(actions)
318
- @submit_mutex.synchronize do
319
- es_actions = actions.map { |a, doc, event| [a, doc, event.to_hash] }
320
-
321
- bulk_response = @client.bulk(es_actions)
322
-
323
- if bulk_response["errors"] && bulk_response["items"]
324
- actions_to_retry = []
325
-
326
- bulk_response['items'].each_with_index do |item,idx|
327
- action = es_actions[idx]
328
- action_type, props = item.first # These are all hashes with one value, so we destructure them here
329
-
330
- status = props['status']
331
- error = props['error']
332
-
333
- if RETRYABLE_CODES.include?(status)
334
- @logger.warn "retrying failed action with response code: #{status}"
335
- actions_to_retry << action
336
- elsif not SUCCESS_CODES.include?(status)
337
- @logger.warn "failed action", status: status, error: error, action: action
338
- end
339
- end
340
-
341
- retry_push(actions_to_retry) unless actions_to_retry.empty?
342
- end
343
- end
344
- end
345
-
346
- # When there are exceptions raised upon submission, we raise an exception so that
347
- # Stud::Buffer will retry to flush
348
- public
349
- def flush(actions, teardown = false)
350
- begin
351
- submit(actions)
352
- rescue Manticore::SocketException => e
353
- # If we can't even connect to the server let's just print out the URL (:hosts is actually a URL)
354
- # and let the user sort it out from there
355
- @logger.error(
356
- "Attempted to send a bulk request to Elasticsearch configured at '#{@client.client_options[:hosts]}',"+
357
- " but Elasticsearch appears to be unreachable or down!",
358
- :client_config => @client.client_options,
359
- :error_message => e.message
360
- )
361
- @logger.debug("Failed actions for last bad bulk request!", :actions => actions)
362
- rescue => e
363
- # For all other errors print out full connection issues
364
- @logger.error(
365
- "Attempted to send a bulk request to Elasticsearch configured at '#{@client.client_options[:hosts]}'," +
366
- " but an error occurred and it failed! Are you sure you can reach elasticsearch from this machine using " +
367
- "the configuration provided?",
368
- :client_config => @client.client_options,
369
- :error_message => e.message,
370
- :error_class => e.class.name,
371
- :backtrace => e.backtrace
372
- )
373
-
374
- @logger.debug("Failed actions for last bad bulk request!", :actions => actions)
375
-
376
- raise e
377
- end
378
- end # def flush
379
-
380
- public
381
- def teardown
382
-
383
- @retry_teardown_requested.make_true
384
- # First, make sure retry_timer_thread is stopped
385
- # to ensure we do not signal a retry based on
386
- # the retry interval.
387
- Thread.kill(@retry_timer_thread)
388
- @retry_timer_thread.join
389
- # Signal flushing in the case that #retry_flush is in
390
- # the process of waiting for a signal.
391
- @retry_flush_mutex.synchronize { @retry_queue_needs_flushing.signal }
392
- # Now, #retry_flush is ensured to not be in a state of
393
- # waiting and can be safely joined into the main thread
394
- # for further final execution of an in-process remaining call.
395
- @retry_thread.join
396
-
397
- # execute any final actions along with a proceeding retry for any
398
- # final actions that did not succeed.
399
- buffer_flush(:final => true)
400
- retry_flush
401
- end
402
-
403
- private
404
- def setup_proxy
405
- return {} unless @proxy
406
-
407
- # Symbolize keys
408
- proxy = if @proxy.is_a?(Hash)
409
- Hash[@proxy.map {|k,v| [k.to_sym, v]}]
410
- elsif @proxy.is_a?(String)
411
- @proxy
412
- else
413
- raise LogStash::ConfigurationError, "Expected 'proxy' to be a string or hash, not '#{@proxy}''!"
414
- end
415
-
416
- return {:proxy => proxy}
417
- end
418
-
419
- private
420
- def setup_basic_auth
421
- return {} unless @user && @password
422
-
423
- {
424
- :user => ::URI.escape(@user, "@:"),
425
- :password => ::URI.escape(@password.value, "@:")
426
- }
427
- end
428
-
429
-
430
- private
431
- # in charge of submitting any actions in @retry_queue that need to be
432
- # retried
433
- #
434
- # This method is not called concurrently. It is only called by @retry_thread
435
- # and once that thread is ended during the teardown process, a final call
436
- # to this method is done upon teardown in the main thread.
437
- def retry_flush()
438
- unless @retry_queue.empty?
439
- buffer = @retry_queue.size.times.map do
440
- next_action, next_doc, next_event = @retry_queue.pop
441
- next_event.set('[@metadata][retry_count]', next_event.get('[@metadata][retry_count]') + 1)
442
-
443
- if next_event.get('[@metadata][retry_count]') > @max_retries
444
- @logger.error "too many attempts at sending event. dropping: #{next_event}"
445
- nil
446
- else
447
- [next_action, next_doc, next_event]
448
- end
449
- end.compact
450
-
451
- submit(buffer) unless buffer.empty?
452
- end
453
-
454
- @retry_flush_mutex.synchronize {
455
- @retry_queue_not_full.signal if @retry_queue.size < @retry_max_items
456
- }
142
+ # Note that if you use paths as components of URLs in the 'hosts' field you may
143
+ # not also set this field. That will raise an error at startup
144
+ config :path, :validate => :string
145
+
146
+ # HTTP Path to perform the _bulk requests to
147
+ # this defaults to a concatenation of the path parameter and "_bulk"
148
+ config :bulk_path, :validate => :string
149
+
150
+ # Pass a set of key value pairs as the URL query string. This query string is added
151
+ # to every host listed in the 'hosts' configuration. If the 'hosts' list contains
152
+ # urls that already have query strings, the one specified here will be appended.
153
+ config :parameters, :validate => :hash
154
+
155
+ # Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme
156
+ # is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used.
157
+ # If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts'
158
+ config :ssl, :validate => :boolean
159
+
160
+ # Option to validate the server's certificate. Disabling this severely compromises security.
161
+ # For more information on disabling certificate verification please read
162
+ # https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf
163
+ config :ssl_certificate_verification, :validate => :boolean, :default => true
164
+
165
+ # The .cer or .pem file to validate the server's certificate
166
+ config :cacert, :validate => :path
167
+
168
+ # The JKS truststore to validate the server's certificate.
169
+ # Use either `:truststore` or `:cacert`
170
+ config :truststore, :validate => :path
171
+
172
+ # Set the truststore password
173
+ config :truststore_password, :validate => :password
174
+
175
+ # The keystore used to present a certificate to the server.
176
+ # It can be either .jks or .p12
177
+ config :keystore, :validate => :path
178
+
179
+ # Set the keystore password
180
+ config :keystore_password, :validate => :password
181
+
182
+ # This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list.
183
+ # Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use
184
+ # this with master nodes, you probably want to disable HTTP on them by setting
185
+ # `http.enabled` to false in their amazon_es.yml. You can either use the `sniffing` option or
186
+ # manually enter multiple Elasticsearch hosts using the `hosts` parameter.
187
+ config :sniffing, :validate => :boolean, :default => false
188
+
189
+ # How long to wait, in seconds, between sniffing attempts
190
+ config :sniffing_delay, :validate => :number, :default => 5
191
+
192
+ # HTTP Path to be used for the sniffing requests
193
+ # the default value is computed by concatenating the path value and "_nodes/http"
194
+ # if sniffing_path is set it will be used as an absolute path
195
+ # do not use full URL here, only paths, e.g. "/sniff/_nodes/http"
196
+ config :sniffing_path, :validate => :string
197
+
198
+ # Set the address of a forward HTTP proxy.
199
+ # This used to accept hashes as arguments but now only accepts
200
+ # arguments of the URI type to prevent leaking credentials.
201
+ config :proxy, :validate => :uri
202
+
203
+ # Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If
204
+ # a timeout occurs, the request will be retried.
205
+ config :timeout, :validate => :number, :default => 60
206
+
207
+ # Set the Elasticsearch errors in the whitelist that you don't want to log.
208
+ # A useful example is when you want to skip all 409 errors
209
+ # which are `document_already_exists_exception`.
210
+ config :failure_type_logging_whitelist, :validate => :array, :default => []
211
+
212
+ # While the output tries to reuse connections efficiently we have a maximum.
213
+ # This sets the maximum number of open connections the output will create.
214
+ # Setting this too low may mean frequently closing / opening connections
215
+ # which is bad.
216
+ config :pool_max, :validate => :number, :default => 1000
217
+
218
+ # While the output tries to reuse connections efficiently we have a maximum per endpoint.
219
+ # This sets the maximum number of open connections per endpoint the output will create.
220
+ # Setting this too low may mean frequently closing / opening connections
221
+ # which is bad.
222
+ config :pool_max_per_route, :validate => :number, :default => 100
223
+
224
+ # HTTP Path where a HEAD request is sent when a backend is marked down
225
+ # the request is sent in the background to see if it has come back again
226
+ # before it is once again eligible to service requests.
227
+ # If you have custom firewall rules you may need to change this
228
+ config :healthcheck_path, :validate => :string
229
+
230
+ # How frequently, in seconds, to wait between resurrection attempts.
231
+ # Resurrection is the process by which backend endpoints marked 'down' are checked
232
+ # to see if they have come back to life
233
+ config :resurrect_delay, :validate => :number, :default => 5
234
+
235
+ # How long to wait before checking if the connection is stale before executing a request on a connection using keepalive.
236
+ # You may want to set this lower, if you get connection errors regularly
237
+ # Quoting the Apache commons docs (this client is based Apache Commmons):
238
+ # 'Defines period of inactivity in milliseconds after which persistent connections must
239
+ # be re-validated prior to being leased to the consumer. Non-positive value passed to
240
+ # this method disables connection validation. This check helps detect connections that
241
+ # have become stale (half-closed) while kept inactive in the pool.'
242
+ # See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info]
243
+ config :validate_after_inactivity, :validate => :number, :default => 10000
244
+
245
+ # Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond
246
+ config :http_compression, :validate => :boolean, :default => false
247
+
248
+ # Custom Headers to send on each request to amazon_es nodes
249
+ config :custom_headers, :validate => :hash, :default => {}
250
+
251
+ def build_client
252
+ params["metric"] = metric
253
+ @client ||= ::LogStash::Outputs::ElasticSearch::HttpClientBuilder.build(@logger, @hosts, params)
457
254
  end
458
255
 
459
- private
460
- def retry_push(actions)
461
- Array(actions).each{|action| @retry_queue << action}
462
- @retry_flush_mutex.synchronize {
463
- @retry_queue_needs_flushing.signal if @retry_queue.size >= @retry_max_items
464
- }
256
+ def close
257
+ @stopping.make_true
258
+ stop_template_installer
259
+ @client.close if @client
465
260
  end
466
261
 
467
262
  @@plugins = Gem::Specification.find_all{|spec| spec.name =~ /logstash-output-amazon_es-/ }
@@ -471,4 +266,4 @@ class LogStash::Outputs::AmazonES < LogStash::Outputs::Base
471
266
  require "logstash/outputs/amazon_es/#{name}"
472
267
  end
473
268
 
474
- end
269
+ end # class LogStash::Outputs::Elasticsearch