logstash-output-elasticsearch 0.1.8-java
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +4 -0
- data/Gemfile +3 -0
- data/LICENSE +13 -0
- data/Rakefile +1 -0
- data/lib/logstash/outputs/elasticsearch.rb +476 -0
- data/lib/logstash/outputs/elasticsearch/elasticsearch-template.json +42 -0
- data/lib/logstash/outputs/elasticsearch/protocol.rb +253 -0
- data/logstash-output-elasticsearch.gemspec +42 -0
- data/spec/outputs/elasticsearch_spec.rb +517 -0
- metadata +206 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA1:
|
3
|
+
metadata.gz: b3af73cf207e18362bc27fce6f46c595025ff1dd
|
4
|
+
data.tar.gz: 1ea5ff2ddc327ef163d699235c1560f1317d01f9
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: f66ef2f7593309cdc979b67ad320f1d625728a3bbf15f44a08bf1c85ce47f58049aeb0b617f03d74adc20b10b80512b36a0fddb4ebb5406480d7a1ea6968a53a
|
7
|
+
data.tar.gz: 7c87108ef268afd1f854575d2934f82c2c2e907c3e23c14a490379b02ec81ef5124e589afab077755db8bbd95ec6eced5fccbbd3ca4e15fc14e297002a0439a4
|
data/.gitignore
ADDED
data/Gemfile
ADDED
data/LICENSE
ADDED
@@ -0,0 +1,13 @@
|
|
1
|
+
Copyright (c) 2012-2014 Elasticsearch <http://www.elasticsearch.org>
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
data/Rakefile
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
require "logstash/devutils/rake"
|
@@ -0,0 +1,476 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require "logstash/namespace"
|
3
|
+
require "logstash/environment"
|
4
|
+
require "logstash/outputs/base"
|
5
|
+
require "logstash/json"
|
6
|
+
require "stud/buffer"
|
7
|
+
require "socket" # for Socket.gethostname
|
8
|
+
require "uri" # for escaping user input
|
9
|
+
require 'logstash-output-elasticsearch_jars.rb'
|
10
|
+
|
11
|
+
# This output lets you store logs in Elasticsearch and is the most recommended
|
12
|
+
# output for Logstash. If you plan on using the Kibana web interface, you'll
|
13
|
+
# need to use this output.
|
14
|
+
#
|
15
|
+
# *VERSION NOTE*: Your Elasticsearch cluster must be running Elasticsearch
|
16
|
+
# 1.0.0 or later.
|
17
|
+
#
|
18
|
+
# If you want to set other Elasticsearch options that are not exposed directly
|
19
|
+
# as configuration options, there are two methods:
|
20
|
+
#
|
21
|
+
# * Create an `elasticsearch.yml` file in the $PWD of the Logstash process
|
22
|
+
# * Pass in es.* java properties (`java -Des.node.foo=` or `ruby -J-Des.node.foo=`)
|
23
|
+
#
|
24
|
+
# With the default `protocol` setting ("node"), this plugin will join your
|
25
|
+
# Elasticsearch cluster as a client node, so it will show up in Elasticsearch's
|
26
|
+
# cluster status.
|
27
|
+
#
|
28
|
+
# You can learn more about Elasticsearch at <http://www.elasticsearch.org>
|
29
|
+
#
|
30
|
+
# ## Operational Notes
|
31
|
+
#
|
32
|
+
# If using the default `protocol` setting ("node"), your firewalls might need
|
33
|
+
# to permit port 9300 in *both* directions (from Logstash to Elasticsearch, and
|
34
|
+
# Elasticsearch to Logstash)
|
35
|
+
class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
|
36
|
+
include Stud::Buffer
|
37
|
+
|
38
|
+
config_name "elasticsearch"
|
39
|
+
milestone 3
|
40
|
+
|
41
|
+
# The index to write events to. This can be dynamic using the `%{foo}` syntax.
|
42
|
+
# The default value will partition your indices by day so you can more easily
|
43
|
+
# delete old data or only search specific date ranges.
|
44
|
+
# Indexes may not contain uppercase characters.
|
45
|
+
# For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}
|
46
|
+
config :index, :validate => :string, :default => "logstash-%{+YYYY.MM.dd}"
|
47
|
+
|
48
|
+
# The index type to write events to. Generally you should try to write only
|
49
|
+
# similar events to the same 'type'. String expansion `%{foo}` works here.
|
50
|
+
config :index_type, :validate => :string
|
51
|
+
|
52
|
+
# Starting in Logstash 1.3 (unless you set option `manage_template` to false)
|
53
|
+
# a default mapping template for Elasticsearch will be applied, if you do not
|
54
|
+
# already have one set to match the index pattern defined (default of
|
55
|
+
# `logstash-%{+YYYY.MM.dd}`), minus any variables. For example, in this case
|
56
|
+
# the template will be applied to all indices starting with `logstash-*`
|
57
|
+
#
|
58
|
+
# If you have dynamic templating (e.g. creating indices based on field names)
|
59
|
+
# then you should set `manage_template` to false and use the REST API to upload
|
60
|
+
# your templates manually.
|
61
|
+
config :manage_template, :validate => :boolean, :default => true
|
62
|
+
|
63
|
+
# This configuration option defines how the template is named inside Elasticsearch.
|
64
|
+
# Note that if you have used the template management features and subsequently
|
65
|
+
# change this, you will need to prune the old template manually, e.g.
|
66
|
+
#
|
67
|
+
# `curl -XDELETE <http://localhost:9200/_template/OldTemplateName?pretty>`
|
68
|
+
#
|
69
|
+
# where `OldTemplateName` is whatever the former setting was.
|
70
|
+
config :template_name, :validate => :string, :default => "logstash"
|
71
|
+
|
72
|
+
# You can set the path to your own template here, if you so desire.
|
73
|
+
# If not set, the included template will be used.
|
74
|
+
config :template, :validate => :path
|
75
|
+
|
76
|
+
# Overwrite the current template with whatever is configured
|
77
|
+
# in the `template` and `template_name` directives.
|
78
|
+
config :template_overwrite, :validate => :boolean, :default => false
|
79
|
+
|
80
|
+
# The document ID for the index. Useful for overwriting existing entries in
|
81
|
+
# Elasticsearch with the same ID.
|
82
|
+
config :document_id, :validate => :string, :default => nil
|
83
|
+
|
84
|
+
# The name of your cluster if you set it on the Elasticsearch side. Useful
|
85
|
+
# for discovery.
|
86
|
+
config :cluster, :validate => :string
|
87
|
+
|
88
|
+
# The hostname or IP address of the host to use for Elasticsearch unicast discovery
|
89
|
+
# This is only required if the normal multicast/cluster discovery stuff won't
|
90
|
+
# work in your environment.
|
91
|
+
#
|
92
|
+
# `"127.0.0.1"`
|
93
|
+
# `["127.0.0.1:9300","127.0.0.2:9300"]`
|
94
|
+
config :host, :validate => :array
|
95
|
+
|
96
|
+
# The port for Elasticsearch transport to use.
|
97
|
+
#
|
98
|
+
# If you do not set this, the following defaults are used:
|
99
|
+
# * `protocol => http` - port 9200
|
100
|
+
# * `protocol => transport` - port 9300-9305
|
101
|
+
# * `protocol => node` - port 9300-9305
|
102
|
+
config :port, :validate => :string
|
103
|
+
|
104
|
+
# The name/address of the host to bind to for Elasticsearch clustering
|
105
|
+
config :bind_host, :validate => :string
|
106
|
+
|
107
|
+
# This is only valid for the 'node' protocol.
|
108
|
+
#
|
109
|
+
# The port for the node to listen on.
|
110
|
+
config :bind_port, :validate => :number
|
111
|
+
|
112
|
+
# Run the Elasticsearch server embedded in this process.
|
113
|
+
# This option is useful if you want to run a single Logstash process that
|
114
|
+
# handles log processing and indexing; it saves you from needing to run
|
115
|
+
# a separate Elasticsearch process.
|
116
|
+
config :embedded, :validate => :boolean, :default => false
|
117
|
+
|
118
|
+
# If you are running the embedded Elasticsearch server, you can set the http
|
119
|
+
# port it listens on here; it is not common to need this setting changed from
|
120
|
+
# default.
|
121
|
+
config :embedded_http_port, :validate => :string, :default => "9200-9300"
|
122
|
+
|
123
|
+
# This setting no longer does anything. It exists to keep config validation
|
124
|
+
# from failing. It will be removed in future versions.
|
125
|
+
config :max_inflight_requests, :validate => :number, :default => 50, :deprecated => true
|
126
|
+
|
127
|
+
# The node name Elasticsearch will use when joining a cluster.
|
128
|
+
#
|
129
|
+
# By default, this is generated internally by the ES client.
|
130
|
+
config :node_name, :validate => :string
|
131
|
+
|
132
|
+
# This plugin uses the bulk index api for improved indexing performance.
|
133
|
+
# To make efficient bulk api calls, we will buffer a certain number of
|
134
|
+
# events before flushing that out to Elasticsearch. This setting
|
135
|
+
# controls how many events will be buffered before sending a batch
|
136
|
+
# of events.
|
137
|
+
config :flush_size, :validate => :number, :default => 5000
|
138
|
+
|
139
|
+
# The amount of time since last flush before a flush is forced.
|
140
|
+
#
|
141
|
+
# This setting helps ensure slow event rates don't get stuck in Logstash.
|
142
|
+
# For example, if your `flush_size` is 100, and you have received 10 events,
|
143
|
+
# and it has been more than `idle_flush_time` seconds since the last flush,
|
144
|
+
# Logstash will flush those 10 events automatically.
|
145
|
+
#
|
146
|
+
# This helps keep both fast and slow log streams moving along in
|
147
|
+
# near-real-time.
|
148
|
+
config :idle_flush_time, :validate => :number, :default => 1
|
149
|
+
|
150
|
+
# Choose the protocol used to talk to Elasticsearch.
|
151
|
+
#
|
152
|
+
# The 'node' protocol will connect to the cluster as a normal Elasticsearch
|
153
|
+
# node (but will not store data). This allows you to use things like
|
154
|
+
# multicast discovery. If you use the `node` protocol, you must permit
|
155
|
+
# bidirectional communication on the port 9300 (or whichever port you have
|
156
|
+
# configured).
|
157
|
+
#
|
158
|
+
# The 'transport' protocol will connect to the host you specify and will
|
159
|
+
# not show up as a 'node' in the Elasticsearch cluster. This is useful
|
160
|
+
# in situations where you cannot permit connections outbound from the
|
161
|
+
# Elasticsearch cluster to this Logstash server.
|
162
|
+
#
|
163
|
+
# The 'http' protocol will use the Elasticsearch REST/HTTP interface to talk
|
164
|
+
# to elasticsearch.
|
165
|
+
#
|
166
|
+
# All protocols will use bulk requests when talking to Elasticsearch.
|
167
|
+
#
|
168
|
+
# The default `protocol` setting under java/jruby is "node". The default
|
169
|
+
# `protocol` on non-java rubies is "http"
|
170
|
+
config :protocol, :validate => [ "node", "transport", "http" ]
|
171
|
+
|
172
|
+
# The Elasticsearch action to perform. Valid actions are: `index`, `delete`.
|
173
|
+
#
|
174
|
+
# Use of this setting *REQUIRES* you also configure the `document_id` setting
|
175
|
+
# because `delete` actions all require a document id.
|
176
|
+
#
|
177
|
+
# What does each action do?
|
178
|
+
#
|
179
|
+
# - index: indexes a document (an event from Logstash).
|
180
|
+
# - delete: deletes a document by id
|
181
|
+
#
|
182
|
+
# For more details on actions, check out the http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation]
|
183
|
+
config :action, :validate => :string, :default => "index"
|
184
|
+
|
185
|
+
# Username and password (HTTP only)
|
186
|
+
config :user, :validate => :string
|
187
|
+
config :password, :validate => :password
|
188
|
+
|
189
|
+
# SSL Configurations (HTTP only)
|
190
|
+
#
|
191
|
+
# Enable SSL
|
192
|
+
config :ssl, :validate => :boolean, :default => false
|
193
|
+
|
194
|
+
# The .cer or .pem file to validate the server's certificate
|
195
|
+
config :cacert, :validate => :path
|
196
|
+
|
197
|
+
# The JKS truststore to validate the server's certificate
|
198
|
+
# Use either `:truststore` or `:cacert`
|
199
|
+
config :truststore, :validate => :path
|
200
|
+
|
201
|
+
# Set the truststore password
|
202
|
+
config :truststore_password, :validate => :password
|
203
|
+
|
204
|
+
# helper function to replace placeholders
|
205
|
+
# in index names to wildcards
|
206
|
+
# example:
|
207
|
+
# "logs-%{YYYY}" -> "logs-*"
|
208
|
+
def wildcard_substitute(name)
|
209
|
+
name.gsub(/%\{[^}]+\}/, "*")
|
210
|
+
end
|
211
|
+
|
212
|
+
public
|
213
|
+
def register
|
214
|
+
client_settings = {}
|
215
|
+
|
216
|
+
if @protocol.nil?
|
217
|
+
@protocol = LogStash::Environment.jruby? ? "node" : "http"
|
218
|
+
end
|
219
|
+
|
220
|
+
if ["node", "transport"].include?(@protocol)
|
221
|
+
# Node or TransportClient; requires JRuby
|
222
|
+
raise(LogStash::PluginLoadingError, "This configuration requires JRuby. If you are not using JRuby, you must set 'protocol' to 'http'. For example: output { elasticsearch { protocol => \"http\" } }") unless LogStash::Environment.jruby?
|
223
|
+
|
224
|
+
client_settings["cluster.name"] = @cluster if @cluster
|
225
|
+
client_settings["network.host"] = @bind_host if @bind_host
|
226
|
+
client_settings["transport.tcp.port"] = @bind_port if @bind_port
|
227
|
+
|
228
|
+
if @node_name
|
229
|
+
client_settings["node.name"] = @node_name
|
230
|
+
else
|
231
|
+
client_settings["node.name"] = "logstash-#{Socket.gethostname}-#{$$}-#{object_id}"
|
232
|
+
end
|
233
|
+
|
234
|
+
@@plugins.each do |plugin|
|
235
|
+
name = plugin.name.split('-')[-1]
|
236
|
+
client_settings.merge!(LogStash::Outputs::ElasticSearch.const_get(name.capitalize).create_client_config(self))
|
237
|
+
end
|
238
|
+
end
|
239
|
+
|
240
|
+
require "logstash/outputs/elasticsearch/protocol"
|
241
|
+
|
242
|
+
if @port.nil?
|
243
|
+
@port = case @protocol
|
244
|
+
when "http"; "9200"
|
245
|
+
when "transport", "node"; "9300-9305"
|
246
|
+
end
|
247
|
+
end
|
248
|
+
|
249
|
+
if @host.nil? && @protocol == "http"
|
250
|
+
@logger.info("No 'host' set in elasticsearch output. Defaulting to localhost")
|
251
|
+
@host = ["localhost"]
|
252
|
+
end
|
253
|
+
|
254
|
+
client_settings.merge! setup_ssl()
|
255
|
+
|
256
|
+
common_options = {
|
257
|
+
:protocol => @protocol,
|
258
|
+
:client_settings => client_settings
|
259
|
+
}
|
260
|
+
|
261
|
+
common_options.merge! setup_basic_auth()
|
262
|
+
|
263
|
+
client_class = case @protocol
|
264
|
+
when "transport"
|
265
|
+
LogStash::Outputs::Elasticsearch::Protocols::TransportClient
|
266
|
+
when "node"
|
267
|
+
LogStash::Outputs::Elasticsearch::Protocols::NodeClient
|
268
|
+
when /http/
|
269
|
+
LogStash::Outputs::Elasticsearch::Protocols::HTTPClient
|
270
|
+
end
|
271
|
+
|
272
|
+
if @embedded
|
273
|
+
raise(LogStash::ConfigurationError, "The 'embedded => true' setting is only valid for the elasticsearch output under JRuby. You are running #{RUBY_DESCRIPTION}") unless LogStash::Environment.jruby?
|
274
|
+
# LogStash::Environment.load_elasticsearch_jars!
|
275
|
+
|
276
|
+
# Default @host with embedded to localhost. This should help avoid
|
277
|
+
# newbies tripping on ubuntu and other distros that have a default
|
278
|
+
# firewall that blocks multicast.
|
279
|
+
@host ||= ["localhost"]
|
280
|
+
|
281
|
+
# Start Elasticsearch local.
|
282
|
+
start_local_elasticsearch
|
283
|
+
end
|
284
|
+
|
285
|
+
@client = Array.new
|
286
|
+
|
287
|
+
if protocol == "node" or @host.nil? # if @protocol is "node" or @host is not set
|
288
|
+
options = {
|
289
|
+
:host => @host,
|
290
|
+
:port => @port,
|
291
|
+
}.merge(common_options)
|
292
|
+
@client << client_class.new(options)
|
293
|
+
else # if @protocol in ["transport","http"]
|
294
|
+
@host.each do |host|
|
295
|
+
(_host,_port) = host.split ":"
|
296
|
+
options = {
|
297
|
+
:host => _host,
|
298
|
+
:port => _port || @port,
|
299
|
+
}.merge(common_options)
|
300
|
+
@logger.info "Create client to elasticsearch server on #{_host}:#{_port}"
|
301
|
+
@client << client_class.new(options)
|
302
|
+
end # @host.each
|
303
|
+
end
|
304
|
+
|
305
|
+
if @manage_template
|
306
|
+
for client in @client
|
307
|
+
begin
|
308
|
+
@logger.info("Automatic template management enabled", :manage_template => @manage_template.to_s)
|
309
|
+
client.template_install(@template_name, get_template, @template_overwrite)
|
310
|
+
break
|
311
|
+
rescue => e
|
312
|
+
@logger.error("Failed to install template: #{e.message}")
|
313
|
+
end
|
314
|
+
end # for @client loop
|
315
|
+
end # if @manage_templates
|
316
|
+
|
317
|
+
@logger.info("New Elasticsearch output", :cluster => @cluster,
|
318
|
+
:host => @host, :port => @port, :embedded => @embedded,
|
319
|
+
:protocol => @protocol)
|
320
|
+
|
321
|
+
@client_idx = 0
|
322
|
+
@current_client = @client[@client_idx]
|
323
|
+
|
324
|
+
buffer_initialize(
|
325
|
+
:max_items => @flush_size,
|
326
|
+
:max_interval => @idle_flush_time,
|
327
|
+
:logger => @logger
|
328
|
+
)
|
329
|
+
end # def register
|
330
|
+
|
331
|
+
protected
|
332
|
+
def shift_client
|
333
|
+
@client_idx = (@client_idx+1) % @client.length
|
334
|
+
@current_client = @client[@client_idx]
|
335
|
+
@logger.debug? and @logger.debug("Switched current elasticsearch client to ##{@client_idx} at #{@host[@client_idx]}")
|
336
|
+
end
|
337
|
+
|
338
|
+
private
|
339
|
+
def setup_ssl
|
340
|
+
return {} unless @ssl
|
341
|
+
if @protocol != "http"
|
342
|
+
raise(LogStash::ConfigurationError, "SSL is not supported for '#{@protocol}'. Change the protocol to 'http' if you need SSL.")
|
343
|
+
end
|
344
|
+
@protocol = "https"
|
345
|
+
if @cacert && @truststore
|
346
|
+
raise(LogStash::ConfigurationError, "Use either \"cacert\" or \"truststore\" when configuring the CA certificate") if @truststore
|
347
|
+
end
|
348
|
+
ssl_options = {}
|
349
|
+
if @cacert then
|
350
|
+
@truststore, ssl_options[:truststore_password] = generate_jks @cacert
|
351
|
+
elsif @truststore
|
352
|
+
ssl_options[:truststore_password] = @truststore_password.value if @truststore_password
|
353
|
+
end
|
354
|
+
ssl_options[:truststore] = @truststore
|
355
|
+
{ ssl: ssl_options }
|
356
|
+
end
|
357
|
+
|
358
|
+
private
|
359
|
+
def setup_basic_auth
|
360
|
+
return {} unless @user && @password
|
361
|
+
|
362
|
+
if @protocol =~ /http/
|
363
|
+
{
|
364
|
+
:user => ::URI.escape(@user, "@:"),
|
365
|
+
:password => ::URI.escape(@password.value, "@:")
|
366
|
+
}
|
367
|
+
else
|
368
|
+
raise(LogStash::ConfigurationError, "User and password parameters are not supported for '#{@protocol}'. Change the protocol to 'http' if you need them.")
|
369
|
+
end
|
370
|
+
end
|
371
|
+
|
372
|
+
public
|
373
|
+
def get_template
|
374
|
+
if @template.nil?
|
375
|
+
@template = ::File.expand_path('elasticsearch/elasticsearch-template.json', ::File.dirname(__FILE__))
|
376
|
+
if !File.exists?(@template)
|
377
|
+
raise "You must specify 'template => ...' in your elasticsearch output (I looked for '#{@template}')"
|
378
|
+
end
|
379
|
+
end
|
380
|
+
template_json = IO.read(@template).gsub(/\n/,'')
|
381
|
+
template = LogStash::Json.load(template_json)
|
382
|
+
template['template'] = wildcard_substitute(@index)
|
383
|
+
@logger.info("Using mapping template", :template => template)
|
384
|
+
return template
|
385
|
+
end # def get_template
|
386
|
+
|
387
|
+
protected
|
388
|
+
def start_local_elasticsearch
|
389
|
+
@logger.info("Starting embedded Elasticsearch local node.")
|
390
|
+
builder = org.elasticsearch.node.NodeBuilder.nodeBuilder
|
391
|
+
# Disable 'local only' - LOGSTASH-277
|
392
|
+
#builder.local(true)
|
393
|
+
builder.settings.put("cluster.name", @cluster) if @cluster
|
394
|
+
builder.settings.put("node.name", @node_name) if @node_name
|
395
|
+
builder.settings.put("network.host", @bind_host) if @bind_host
|
396
|
+
builder.settings.put("http.port", @embedded_http_port)
|
397
|
+
|
398
|
+
@embedded_elasticsearch = builder.node
|
399
|
+
@embedded_elasticsearch.start
|
400
|
+
end # def start_local_elasticsearch
|
401
|
+
|
402
|
+
private
|
403
|
+
def generate_jks cert_path
|
404
|
+
|
405
|
+
require 'securerandom'
|
406
|
+
require 'tempfile'
|
407
|
+
require 'java'
|
408
|
+
import java.io.FileInputStream
|
409
|
+
import java.io.FileOutputStream
|
410
|
+
import java.security.KeyStore
|
411
|
+
import java.security.cert.CertificateFactory
|
412
|
+
|
413
|
+
jks = java.io.File.createTempFile("cert", ".jks")
|
414
|
+
|
415
|
+
ks = KeyStore.getInstance "JKS"
|
416
|
+
ks.load nil, nil
|
417
|
+
cf = CertificateFactory.getInstance "X.509"
|
418
|
+
cert = cf.generateCertificate FileInputStream.new(cert_path)
|
419
|
+
ks.setCertificateEntry "cacert", cert
|
420
|
+
pwd = SecureRandom.urlsafe_base64(9)
|
421
|
+
ks.store FileOutputStream.new(jks), pwd.to_java.toCharArray
|
422
|
+
[jks.path, pwd]
|
423
|
+
end
|
424
|
+
|
425
|
+
public
|
426
|
+
def receive(event)
|
427
|
+
return unless output?(event)
|
428
|
+
|
429
|
+
# Set the 'type' value for the index.
|
430
|
+
if @index_type
|
431
|
+
type = event.sprintf(@index_type)
|
432
|
+
else
|
433
|
+
type = event["type"] || "logs"
|
434
|
+
end
|
435
|
+
|
436
|
+
index = event.sprintf(@index)
|
437
|
+
|
438
|
+
document_id = @document_id ? event.sprintf(@document_id) : nil
|
439
|
+
buffer_receive([event.sprintf(@action), { :_id => document_id, :_index => index, :_type => type }, event.to_hash])
|
440
|
+
end # def receive
|
441
|
+
|
442
|
+
def flush(actions, teardown=false)
|
443
|
+
begin
|
444
|
+
@logger.debug? and @logger.debug "Sending bulk of actions to client[#{@client_idx}]: #{@host[@client_idx]}"
|
445
|
+
@current_client.bulk(actions)
|
446
|
+
rescue => e
|
447
|
+
@logger.error "Got error to send bulk of actions to elasticsearch server at #{@host[@client_idx]} : #{e.message}"
|
448
|
+
raise e
|
449
|
+
ensure
|
450
|
+
unless @protocol == "node"
|
451
|
+
@logger.debug? and @logger.debug "Shifting current elasticsearch client"
|
452
|
+
shift_client
|
453
|
+
end
|
454
|
+
end
|
455
|
+
# TODO(sissel): Handle errors. Since bulk requests could mostly succeed
|
456
|
+
# (aka partially fail), we need to figure out what documents need to be
|
457
|
+
# retried.
|
458
|
+
#
|
459
|
+
# In the worst case, a failing flush (exception) will incur a retry from Stud::Buffer.
|
460
|
+
end # def flush
|
461
|
+
|
462
|
+
def teardown
|
463
|
+
if @cacert # remove temporary jks store created from the cacert
|
464
|
+
File.delete(@truststore)
|
465
|
+
end
|
466
|
+
buffer_flush(:final => true)
|
467
|
+
end
|
468
|
+
|
469
|
+
@@plugins = Gem::Specification.find_all{|spec| spec.name =~ /logstash-output-elasticsearch-/ }
|
470
|
+
|
471
|
+
@@plugins.each do |plugin|
|
472
|
+
name = plugin.name.split('-')[-1]
|
473
|
+
require "logstash/outputs/elasticsearch/#{name}"
|
474
|
+
end
|
475
|
+
|
476
|
+
end # class LogStash::Outputs::Elasticsearch
|