logstash-output-elasticsearch 0.1.8-java
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +4 -0
- data/Gemfile +3 -0
- data/LICENSE +13 -0
- data/Rakefile +1 -0
- data/lib/logstash/outputs/elasticsearch.rb +476 -0
- data/lib/logstash/outputs/elasticsearch/elasticsearch-template.json +42 -0
- data/lib/logstash/outputs/elasticsearch/protocol.rb +253 -0
- data/logstash-output-elasticsearch.gemspec +42 -0
- data/spec/outputs/elasticsearch_spec.rb +517 -0
- metadata +206 -0
@@ -0,0 +1,42 @@
|
|
1
|
+
{
|
2
|
+
"template" : "logstash-*",
|
3
|
+
"settings" : {
|
4
|
+
"index.refresh_interval" : "5s"
|
5
|
+
},
|
6
|
+
"mappings" : {
|
7
|
+
"_default_" : {
|
8
|
+
"_all" : {"enabled" : true},
|
9
|
+
"dynamic_templates" : [ {
|
10
|
+
"message_field" : {
|
11
|
+
"match" : "message",
|
12
|
+
"match_mapping_type" : "string",
|
13
|
+
"mapping" : {
|
14
|
+
"type" : "string", "index" : "analyzed", "omit_norms" : true
|
15
|
+
}
|
16
|
+
}
|
17
|
+
}, {
|
18
|
+
"string_fields" : {
|
19
|
+
"match" : "*",
|
20
|
+
"match_mapping_type" : "string",
|
21
|
+
"mapping" : {
|
22
|
+
"type" : "string", "index" : "analyzed", "omit_norms" : true,
|
23
|
+
"fields" : {
|
24
|
+
"raw" : {"type": "string", "index" : "not_analyzed", "ignore_above" : 256}
|
25
|
+
}
|
26
|
+
}
|
27
|
+
}
|
28
|
+
} ],
|
29
|
+
"properties" : {
|
30
|
+
"@version": { "type": "string", "index": "not_analyzed" },
|
31
|
+
"geoip" : {
|
32
|
+
"type" : "object",
|
33
|
+
"dynamic": true,
|
34
|
+
"path": "full",
|
35
|
+
"properties" : {
|
36
|
+
"location" : { "type" : "geo_point" }
|
37
|
+
}
|
38
|
+
}
|
39
|
+
}
|
40
|
+
}
|
41
|
+
}
|
42
|
+
}
|
@@ -0,0 +1,253 @@
|
|
1
|
+
require "logstash/outputs/elasticsearch"
|
2
|
+
require "cabin"
|
3
|
+
|
4
|
+
module LogStash::Outputs::Elasticsearch
|
5
|
+
module Protocols
|
6
|
+
class Base
|
7
|
+
private
|
8
|
+
def initialize(options={})
|
9
|
+
# host(s), port, cluster
|
10
|
+
@logger = Cabin::Channel.get
|
11
|
+
end
|
12
|
+
|
13
|
+
def client
|
14
|
+
return @client if @client
|
15
|
+
@client = build_client(@options)
|
16
|
+
return @client
|
17
|
+
end # def client
|
18
|
+
|
19
|
+
|
20
|
+
def template_install(name, template, force=false)
|
21
|
+
if template_exists?(name) && !force
|
22
|
+
@logger.debug("Found existing Elasticsearch template. Skipping template management", :name => name)
|
23
|
+
return
|
24
|
+
end
|
25
|
+
template_put(name, template)
|
26
|
+
end
|
27
|
+
|
28
|
+
# Do a bulk request with the given actions.
|
29
|
+
#
|
30
|
+
# 'actions' is expected to be an array of bulk requests as string json
|
31
|
+
# values.
|
32
|
+
#
|
33
|
+
# Each 'action' becomes a single line in the bulk api call. For more
|
34
|
+
# details on the format of each.
|
35
|
+
def bulk(actions)
|
36
|
+
raise NotImplemented, "You must implement this yourself"
|
37
|
+
# bulk([
|
38
|
+
# '{ "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } }',
|
39
|
+
# '{ "field1" : "value1" }'
|
40
|
+
#])
|
41
|
+
end
|
42
|
+
|
43
|
+
public(:initialize, :template_install)
|
44
|
+
end
|
45
|
+
|
46
|
+
class HTTPClient < Base
|
47
|
+
private
|
48
|
+
|
49
|
+
DEFAULT_OPTIONS = {
|
50
|
+
:port => 9200
|
51
|
+
}
|
52
|
+
|
53
|
+
def initialize(options={})
|
54
|
+
super
|
55
|
+
require "elasticsearch" # gem 'elasticsearch-ruby'
|
56
|
+
# manticore http transport
|
57
|
+
require "elasticsearch/transport/transport/http/manticore"
|
58
|
+
@options = DEFAULT_OPTIONS.merge(options)
|
59
|
+
@client = client
|
60
|
+
end
|
61
|
+
|
62
|
+
def build_client(options)
|
63
|
+
uri = "#{options[:protocol]}://#{options[:host]}:#{options[:port]}"
|
64
|
+
|
65
|
+
client_options = {
|
66
|
+
:host => [uri],
|
67
|
+
:transport_options => options[:client_settings]
|
68
|
+
}
|
69
|
+
client_options[:transport_class] = ::Elasticsearch::Transport::Transport::HTTP::Manticore
|
70
|
+
client_options[:ssl] = client_options[:transport_options].delete(:ssl)
|
71
|
+
|
72
|
+
if options[:user] && options[:password] then
|
73
|
+
token = Base64.strict_encode64(options[:user] + ":" + options[:password])
|
74
|
+
client_options[:headers] = { "Authorization" => "Basic #{token}" }
|
75
|
+
end
|
76
|
+
|
77
|
+
Elasticsearch::Client.new client_options
|
78
|
+
end
|
79
|
+
|
80
|
+
def bulk(actions)
|
81
|
+
@client.bulk(:body => actions.collect do |action, args, source|
|
82
|
+
if source
|
83
|
+
next [ { action => args }, source ]
|
84
|
+
else
|
85
|
+
next { action => args }
|
86
|
+
end
|
87
|
+
end.flatten)
|
88
|
+
end # def bulk
|
89
|
+
|
90
|
+
def template_exists?(name)
|
91
|
+
@client.indices.get_template(:name => name)
|
92
|
+
return true
|
93
|
+
rescue Elasticsearch::Transport::Transport::Errors::NotFound
|
94
|
+
return false
|
95
|
+
end # def template_exists?
|
96
|
+
|
97
|
+
def template_put(name, template)
|
98
|
+
@client.indices.put_template(:name => name, :body => template)
|
99
|
+
end # template_put
|
100
|
+
|
101
|
+
public(:bulk)
|
102
|
+
end # class HTTPClient
|
103
|
+
|
104
|
+
class NodeClient < Base
|
105
|
+
private
|
106
|
+
|
107
|
+
DEFAULT_OPTIONS = {
|
108
|
+
:port => 9300,
|
109
|
+
}
|
110
|
+
|
111
|
+
def initialize(options={})
|
112
|
+
super
|
113
|
+
require "java"
|
114
|
+
@options = DEFAULT_OPTIONS.merge(options)
|
115
|
+
setup(@options)
|
116
|
+
@client = client
|
117
|
+
end # def initialize
|
118
|
+
|
119
|
+
def settings
|
120
|
+
return @settings
|
121
|
+
end
|
122
|
+
|
123
|
+
def setup(options={})
|
124
|
+
@settings = org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder
|
125
|
+
if options[:host]
|
126
|
+
@settings.put("discovery.zen.ping.multicast.enabled", false)
|
127
|
+
@settings.put("discovery.zen.ping.unicast.hosts", hosts(options))
|
128
|
+
end
|
129
|
+
|
130
|
+
@settings.put("node.client", true)
|
131
|
+
@settings.put("http.enabled", false)
|
132
|
+
|
133
|
+
if options[:client_settings]
|
134
|
+
options[:client_settings].each do |key, value|
|
135
|
+
@settings.put(key, value)
|
136
|
+
end
|
137
|
+
end
|
138
|
+
|
139
|
+
return @settings
|
140
|
+
end
|
141
|
+
|
142
|
+
def hosts(options)
|
143
|
+
# http://www.elasticsearch.org/guide/reference/modules/discovery/zen/
|
144
|
+
result = Array.new
|
145
|
+
if options[:host].class == Array
|
146
|
+
options[:host].each do |host|
|
147
|
+
if host.to_s =~ /^.+:.+$/
|
148
|
+
# For host in format: host:port, ignore options[:port]
|
149
|
+
result << host
|
150
|
+
else
|
151
|
+
if options[:port].to_s =~ /^\d+-\d+$/
|
152
|
+
# port ranges are 'host[port1-port2]'
|
153
|
+
result << Range.new(*options[:port].split("-")).collect { |p| "#{host}:#{p}" }
|
154
|
+
else
|
155
|
+
result << "#{host}:#{options[:port]}"
|
156
|
+
end
|
157
|
+
end
|
158
|
+
end
|
159
|
+
else
|
160
|
+
if options[:host].to_s =~ /^.+:.+$/
|
161
|
+
# For host in format: host:port, ignore options[:port]
|
162
|
+
result << options[:host]
|
163
|
+
else
|
164
|
+
if options[:port].to_s =~ /^\d+-\d+$/
|
165
|
+
# port ranges are 'host[port1-port2]' according to
|
166
|
+
# http://www.elasticsearch.org/guide/reference/modules/discovery/zen/
|
167
|
+
# However, it seems to only query the first port.
|
168
|
+
# So generate our own list of unicast hosts to scan.
|
169
|
+
range = Range.new(*options[:port].split("-"))
|
170
|
+
result << range.collect { |p| "#{options[:host]}:#{p}" }
|
171
|
+
else
|
172
|
+
result << "#{options[:host]}:#{options[:port]}"
|
173
|
+
end
|
174
|
+
end
|
175
|
+
end
|
176
|
+
result.flatten.join(",")
|
177
|
+
end # def hosts
|
178
|
+
|
179
|
+
def build_client(options)
|
180
|
+
nodebuilder = org.elasticsearch.node.NodeBuilder.nodeBuilder
|
181
|
+
return nodebuilder.settings(@settings).node.client
|
182
|
+
end # def build_client
|
183
|
+
|
184
|
+
def bulk(actions)
|
185
|
+
# Actions an array of [ action, action_metadata, source ]
|
186
|
+
prep = @client.prepareBulk
|
187
|
+
actions.each do |action, args, source|
|
188
|
+
prep.add(build_request(action, args, source))
|
189
|
+
end
|
190
|
+
response = prep.execute.actionGet()
|
191
|
+
|
192
|
+
# TODO(sissel): What format should the response be in?
|
193
|
+
end # def bulk
|
194
|
+
|
195
|
+
def build_request(action, args, source)
|
196
|
+
case action
|
197
|
+
when "index"
|
198
|
+
request = org.elasticsearch.action.index.IndexRequest.new(args[:_index])
|
199
|
+
request.id(args[:_id]) if args[:_id]
|
200
|
+
request.source(source)
|
201
|
+
when "delete"
|
202
|
+
request = org.elasticsearch.action.delete.DeleteRequest.new(args[:_index])
|
203
|
+
request.id(args[:_id])
|
204
|
+
#when "update"
|
205
|
+
#when "create"
|
206
|
+
end # case action
|
207
|
+
|
208
|
+
request.type(args[:_type]) if args[:_type]
|
209
|
+
return request
|
210
|
+
end # def build_request
|
211
|
+
|
212
|
+
def template_exists?(name)
|
213
|
+
request = org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesRequestBuilder.new(@client.admin.indices, name)
|
214
|
+
response = request.get
|
215
|
+
return !response.getIndexTemplates.isEmpty
|
216
|
+
end # def template_exists?
|
217
|
+
|
218
|
+
def template_put(name, template)
|
219
|
+
request = org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequestBuilder.new(@client.admin.indices, name)
|
220
|
+
request.setSource(LogStash::Json.dump(template))
|
221
|
+
|
222
|
+
# execute the request and get the response, if it fails, we'll get an exception.
|
223
|
+
request.get
|
224
|
+
end # template_put
|
225
|
+
|
226
|
+
public(:initialize, :bulk)
|
227
|
+
end # class NodeClient
|
228
|
+
|
229
|
+
class TransportClient < NodeClient
|
230
|
+
private
|
231
|
+
def build_client(options)
|
232
|
+
client = org.elasticsearch.client.transport.TransportClient.new(settings.build)
|
233
|
+
|
234
|
+
if options[:host]
|
235
|
+
client.addTransportAddress(
|
236
|
+
org.elasticsearch.common.transport.InetSocketTransportAddress.new(
|
237
|
+
options[:host], options[:port].to_i
|
238
|
+
)
|
239
|
+
)
|
240
|
+
end
|
241
|
+
|
242
|
+
return client
|
243
|
+
end # def build_client
|
244
|
+
end # class TransportClient
|
245
|
+
end # module Protocols
|
246
|
+
|
247
|
+
module Requests
|
248
|
+
class GetIndexTemplates; end
|
249
|
+
class Bulk; end
|
250
|
+
class Index; end
|
251
|
+
class Delete; end
|
252
|
+
end
|
253
|
+
end
|
@@ -0,0 +1,42 @@
|
|
1
|
+
Gem::Specification.new do |s|
|
2
|
+
|
3
|
+
s.name = 'logstash-output-elasticsearch'
|
4
|
+
s.version = '0.1.8'
|
5
|
+
s.licenses = ['Apache License (2.0)']
|
6
|
+
s.summary = "Logstash Output to Elasticsearch"
|
7
|
+
s.description = "Output events to elasticsearch"
|
8
|
+
s.authors = ["Elasticsearch"]
|
9
|
+
s.email = 'info@elasticsearch.com'
|
10
|
+
s.homepage = "http://logstash.net/"
|
11
|
+
s.require_paths = ["lib"]
|
12
|
+
|
13
|
+
# Files
|
14
|
+
s.files = `git ls-files`.split($\)
|
15
|
+
|
16
|
+
# Tests
|
17
|
+
s.test_files = s.files.grep(%r{^(test|spec|features)/})
|
18
|
+
|
19
|
+
# Special flag to let us know this is actually a logstash plugin
|
20
|
+
s.metadata = { "logstash_plugin" => "true", "logstash_group" => "output" }
|
21
|
+
|
22
|
+
# Jar dependencies
|
23
|
+
s.requirements << "jar 'org.elasticsearch:elasticsearch', '1.4.0'"
|
24
|
+
|
25
|
+
# Gem dependencies
|
26
|
+
s.add_runtime_dependency 'elasticsearch', ['>= 1.0.6', '~> 1.0']
|
27
|
+
s.add_runtime_dependency 'stud', ['>= 0.0.17', '~> 0.0']
|
28
|
+
s.add_runtime_dependency 'cabin', ['~> 0.6']
|
29
|
+
s.add_runtime_dependency 'logstash', '>= 1.4.0', '< 2.0.0'
|
30
|
+
s.add_runtime_dependency 'jar-dependencies'
|
31
|
+
|
32
|
+
s.add_development_dependency 'ftw', ['>= 0.0.40', '~> 0']
|
33
|
+
s.add_development_dependency 'logstash-input-generator'
|
34
|
+
|
35
|
+
|
36
|
+
if RUBY_PLATFORM == 'java'
|
37
|
+
s.platform = RUBY_PLATFORM
|
38
|
+
s.add_runtime_dependency "manticore", '~> 0.3'
|
39
|
+
end
|
40
|
+
|
41
|
+
s.add_development_dependency 'logstash-devutils'
|
42
|
+
end
|
@@ -0,0 +1,517 @@
|
|
1
|
+
require "logstash/devutils/rspec/spec_helper"
|
2
|
+
require "ftw"
|
3
|
+
require "logstash/plugin"
|
4
|
+
require "logstash/json"
|
5
|
+
|
6
|
+
describe "outputs/elasticsearch" do
|
7
|
+
|
8
|
+
it "should register" do
|
9
|
+
output = LogStash::Plugin.lookup("output", "elasticsearch").new("embedded" => "false", "protocol" => "transport", "manage_template" => "false")
|
10
|
+
|
11
|
+
# register will try to load jars and raise if it cannot find jars
|
12
|
+
expect {output.register}.to_not raise_error
|
13
|
+
end
|
14
|
+
|
15
|
+
describe "ship lots of events w/ default index_type", :elasticsearch => true do
|
16
|
+
# Generate a random index name
|
17
|
+
index = 10.times.collect { rand(10).to_s }.join("")
|
18
|
+
type = 10.times.collect { rand(10).to_s }.join("")
|
19
|
+
|
20
|
+
# Write about 10000 events. Add jitter to increase likeliness of finding
|
21
|
+
# boundary-related bugs.
|
22
|
+
event_count = 10000 + rand(500)
|
23
|
+
flush_size = rand(200) + 1
|
24
|
+
|
25
|
+
config <<-CONFIG
|
26
|
+
input {
|
27
|
+
generator {
|
28
|
+
message => "hello world"
|
29
|
+
count => #{event_count}
|
30
|
+
type => "#{type}"
|
31
|
+
}
|
32
|
+
}
|
33
|
+
output {
|
34
|
+
elasticsearch {
|
35
|
+
host => "127.0.0.1"
|
36
|
+
index => "#{index}"
|
37
|
+
flush_size => #{flush_size}
|
38
|
+
}
|
39
|
+
}
|
40
|
+
CONFIG
|
41
|
+
|
42
|
+
agent do
|
43
|
+
# Try a few times to check if we have the correct number of events stored
|
44
|
+
# in ES.
|
45
|
+
#
|
46
|
+
# We try multiple times to allow final agent flushes as well as allowing
|
47
|
+
# elasticsearch to finish processing everything.
|
48
|
+
ftw = FTW::Agent.new
|
49
|
+
ftw.post!("http://localhost:9200/#{index}/_refresh")
|
50
|
+
|
51
|
+
# Wait until all events are available.
|
52
|
+
Stud::try(10.times) do
|
53
|
+
data = ""
|
54
|
+
response = ftw.get!("http://127.0.0.1:9200/#{index}/_count?q=*")
|
55
|
+
response.read_body { |chunk| data << chunk }
|
56
|
+
result = LogStash::Json.load(data)
|
57
|
+
count = result["count"]
|
58
|
+
insist { count } == event_count
|
59
|
+
end
|
60
|
+
|
61
|
+
response = ftw.get!("http://127.0.0.1:9200/#{index}/_search?q=*&size=1000")
|
62
|
+
data = ""
|
63
|
+
response.read_body { |chunk| data << chunk }
|
64
|
+
result = LogStash::Json.load(data)
|
65
|
+
result["hits"]["hits"].each do |doc|
|
66
|
+
# With no 'index_type' set, the document type should be the type
|
67
|
+
# set on the input
|
68
|
+
insist { doc["_type"] } == type
|
69
|
+
insist { doc["_index"] } == index
|
70
|
+
insist { doc["_source"]["message"] } == "hello world"
|
71
|
+
end
|
72
|
+
end
|
73
|
+
end
|
74
|
+
|
75
|
+
describe "testing index_type", :elasticsearch => true do
|
76
|
+
describe "no type value" do
|
77
|
+
# Generate a random index name
|
78
|
+
index = 10.times.collect { rand(10).to_s }.join("")
|
79
|
+
event_count = 100 + rand(100)
|
80
|
+
flush_size = rand(200) + 1
|
81
|
+
|
82
|
+
config <<-CONFIG
|
83
|
+
input {
|
84
|
+
generator {
|
85
|
+
message => "hello world"
|
86
|
+
count => #{event_count}
|
87
|
+
}
|
88
|
+
}
|
89
|
+
output {
|
90
|
+
elasticsearch {
|
91
|
+
host => "127.0.0.1"
|
92
|
+
index => "#{index}"
|
93
|
+
flush_size => #{flush_size}
|
94
|
+
}
|
95
|
+
}
|
96
|
+
CONFIG
|
97
|
+
|
98
|
+
agent do
|
99
|
+
ftw = FTW::Agent.new
|
100
|
+
ftw.post!("http://localhost:9200/#{index}/_refresh")
|
101
|
+
|
102
|
+
# Wait until all events are available.
|
103
|
+
Stud::try(10.times) do
|
104
|
+
data = ""
|
105
|
+
response = ftw.get!("http://127.0.0.1:9200/#{index}/_count?q=*")
|
106
|
+
response.read_body { |chunk| data << chunk }
|
107
|
+
result = LogStash::Json.load(data)
|
108
|
+
count = result["count"]
|
109
|
+
insist { count } == event_count
|
110
|
+
end
|
111
|
+
|
112
|
+
response = ftw.get!("http://127.0.0.1:9200/#{index}/_search?q=*&size=1000")
|
113
|
+
data = ""
|
114
|
+
response.read_body { |chunk| data << chunk }
|
115
|
+
result = LogStash::Json.load(data)
|
116
|
+
result["hits"]["hits"].each do |doc|
|
117
|
+
insist { doc["_type"] } == "logs"
|
118
|
+
end
|
119
|
+
end
|
120
|
+
end
|
121
|
+
|
122
|
+
describe "default event type value" do
|
123
|
+
# Generate a random index name
|
124
|
+
index = 10.times.collect { rand(10).to_s }.join("")
|
125
|
+
event_count = 100 + rand(100)
|
126
|
+
flush_size = rand(200) + 1
|
127
|
+
|
128
|
+
config <<-CONFIG
|
129
|
+
input {
|
130
|
+
generator {
|
131
|
+
message => "hello world"
|
132
|
+
count => #{event_count}
|
133
|
+
type => "generated"
|
134
|
+
}
|
135
|
+
}
|
136
|
+
output {
|
137
|
+
elasticsearch {
|
138
|
+
host => "127.0.0.1"
|
139
|
+
index => "#{index}"
|
140
|
+
flush_size => #{flush_size}
|
141
|
+
}
|
142
|
+
}
|
143
|
+
CONFIG
|
144
|
+
|
145
|
+
agent do
|
146
|
+
ftw = FTW::Agent.new
|
147
|
+
ftw.post!("http://localhost:9200/#{index}/_refresh")
|
148
|
+
|
149
|
+
# Wait until all events are available.
|
150
|
+
Stud::try(10.times) do
|
151
|
+
data = ""
|
152
|
+
response = ftw.get!("http://127.0.0.1:9200/#{index}/_count?q=*")
|
153
|
+
response.read_body { |chunk| data << chunk }
|
154
|
+
result = LogStash::Json.load(data)
|
155
|
+
count = result["count"]
|
156
|
+
insist { count } == event_count
|
157
|
+
end
|
158
|
+
|
159
|
+
response = ftw.get!("http://127.0.0.1:9200/#{index}/_search?q=*&size=1000")
|
160
|
+
data = ""
|
161
|
+
response.read_body { |chunk| data << chunk }
|
162
|
+
result = LogStash::Json.load(data)
|
163
|
+
result["hits"]["hits"].each do |doc|
|
164
|
+
insist { doc["_type"] } == "generated"
|
165
|
+
end
|
166
|
+
end
|
167
|
+
end
|
168
|
+
end
|
169
|
+
|
170
|
+
describe "action => ...", :elasticsearch => true do
|
171
|
+
index_name = 10.times.collect { rand(10).to_s }.join("")
|
172
|
+
|
173
|
+
config <<-CONFIG
|
174
|
+
input {
|
175
|
+
generator {
|
176
|
+
message => "hello world"
|
177
|
+
count => 100
|
178
|
+
}
|
179
|
+
}
|
180
|
+
output {
|
181
|
+
elasticsearch {
|
182
|
+
host => "127.0.0.1"
|
183
|
+
index => "#{index_name}"
|
184
|
+
}
|
185
|
+
}
|
186
|
+
CONFIG
|
187
|
+
|
188
|
+
|
189
|
+
agent do
|
190
|
+
ftw = FTW::Agent.new
|
191
|
+
ftw.post!("http://localhost:9200/#{index_name}/_refresh")
|
192
|
+
|
193
|
+
# Wait until all events are available.
|
194
|
+
Stud::try(10.times) do
|
195
|
+
data = ""
|
196
|
+
response = ftw.get!("http://127.0.0.1:9200/#{index_name}/_count?q=*")
|
197
|
+
response.read_body { |chunk| data << chunk }
|
198
|
+
result = LogStash::Json.load(data)
|
199
|
+
count = result["count"]
|
200
|
+
insist { count } == 100
|
201
|
+
end
|
202
|
+
|
203
|
+
response = ftw.get!("http://127.0.0.1:9200/#{index_name}/_search?q=*&size=1000")
|
204
|
+
data = ""
|
205
|
+
response.read_body { |chunk| data << chunk }
|
206
|
+
result = LogStash::Json.load(data)
|
207
|
+
result["hits"]["hits"].each do |doc|
|
208
|
+
insist { doc["_type"] } == "logs"
|
209
|
+
end
|
210
|
+
end
|
211
|
+
|
212
|
+
describe "default event type value", :elasticsearch => true do
|
213
|
+
# Generate a random index name
|
214
|
+
index = 10.times.collect { rand(10).to_s }.join("")
|
215
|
+
event_count = 100 + rand(100)
|
216
|
+
flush_size = rand(200) + 1
|
217
|
+
|
218
|
+
config <<-CONFIG
|
219
|
+
input {
|
220
|
+
generator {
|
221
|
+
message => "hello world"
|
222
|
+
count => #{event_count}
|
223
|
+
type => "generated"
|
224
|
+
}
|
225
|
+
}
|
226
|
+
output {
|
227
|
+
elasticsearch {
|
228
|
+
host => "127.0.0.1"
|
229
|
+
index => "#{index}"
|
230
|
+
flush_size => #{flush_size}
|
231
|
+
}
|
232
|
+
}
|
233
|
+
CONFIG
|
234
|
+
|
235
|
+
agent do
|
236
|
+
ftw = FTW::Agent.new
|
237
|
+
ftw.post!("http://localhost:9200/#{index}/_refresh")
|
238
|
+
|
239
|
+
# Wait until all events are available.
|
240
|
+
Stud::try(10.times) do
|
241
|
+
data = ""
|
242
|
+
response = ftw.get!("http://127.0.0.1:9200/#{index}/_count?q=*")
|
243
|
+
response.read_body { |chunk| data << chunk }
|
244
|
+
result = LogStash::Json.load(data)
|
245
|
+
count = result["count"]
|
246
|
+
insist { count } == event_count
|
247
|
+
end
|
248
|
+
|
249
|
+
response = ftw.get!("http://127.0.0.1:9200/#{index}/_search?q=*&size=1000")
|
250
|
+
data = ""
|
251
|
+
response.read_body { |chunk| data << chunk }
|
252
|
+
result = LogStash::Json.load(data)
|
253
|
+
result["hits"]["hits"].each do |doc|
|
254
|
+
insist { doc["_type"] } == "generated"
|
255
|
+
end
|
256
|
+
end
|
257
|
+
end
|
258
|
+
end
|
259
|
+
|
260
|
+
describe "wildcard substitution in index templates", :todo => true do
|
261
|
+
require "logstash/outputs/elasticsearch"
|
262
|
+
|
263
|
+
let(:template) { '{"template" : "not important, will be updated by :index"}' }
|
264
|
+
|
265
|
+
def settings_with_index(index)
|
266
|
+
return {
|
267
|
+
"manage_template" => true,
|
268
|
+
"template_overwrite" => true,
|
269
|
+
"protocol" => "http",
|
270
|
+
"host" => "localhost",
|
271
|
+
"index" => "#{index}"
|
272
|
+
}
|
273
|
+
end
|
274
|
+
|
275
|
+
it "should substitude placeholders" do
|
276
|
+
IO.stub(:read).with(anything) { template }
|
277
|
+
es_output = LogStash::Outputs::ElasticSearch.new(settings_with_index("index-%{YYYY}"))
|
278
|
+
insist { es_output.get_template['template'] } == "index-*"
|
279
|
+
end
|
280
|
+
|
281
|
+
it "should do nothing to an index with no placeholder" do
|
282
|
+
IO.stub(:read).with(anything) { template }
|
283
|
+
es_output = LogStash::Outputs::ElasticSearch.new(settings_with_index("index"))
|
284
|
+
insist { es_output.get_template['template'] } == "index"
|
285
|
+
end
|
286
|
+
end
|
287
|
+
|
288
|
+
describe "index template expected behavior", :elasticsearch => true do
|
289
|
+
["node", "transport", "http"].each do |protocol|
|
290
|
+
context "with protocol => #{protocol}" do
|
291
|
+
subject do
|
292
|
+
require "logstash/outputs/elasticsearch"
|
293
|
+
settings = {
|
294
|
+
"manage_template" => true,
|
295
|
+
"template_overwrite" => true,
|
296
|
+
"protocol" => protocol,
|
297
|
+
"host" => "localhost"
|
298
|
+
}
|
299
|
+
next LogStash::Outputs::ElasticSearch.new(settings)
|
300
|
+
end
|
301
|
+
|
302
|
+
before :each do
|
303
|
+
# Delete all templates first.
|
304
|
+
require "elasticsearch"
|
305
|
+
|
306
|
+
# Clean ES of data before we start.
|
307
|
+
@es = Elasticsearch::Client.new
|
308
|
+
@es.indices.delete_template(:name => "*")
|
309
|
+
|
310
|
+
# This can fail if there are no indexes, ignore failure.
|
311
|
+
@es.indices.delete(:index => "*") rescue nil
|
312
|
+
|
313
|
+
subject.register
|
314
|
+
|
315
|
+
subject.receive(LogStash::Event.new("message" => "sample message here"))
|
316
|
+
subject.receive(LogStash::Event.new("somevalue" => 100))
|
317
|
+
subject.receive(LogStash::Event.new("somevalue" => 10))
|
318
|
+
subject.receive(LogStash::Event.new("somevalue" => 1))
|
319
|
+
subject.receive(LogStash::Event.new("country" => "us"))
|
320
|
+
subject.receive(LogStash::Event.new("country" => "at"))
|
321
|
+
subject.receive(LogStash::Event.new("geoip" => { "location" => [ 0.0, 0.0 ] }))
|
322
|
+
subject.buffer_flush(:final => true)
|
323
|
+
@es.indices.refresh
|
324
|
+
|
325
|
+
# Wait or fail until everything's indexed.
|
326
|
+
Stud::try(20.times) do
|
327
|
+
r = @es.search
|
328
|
+
insist { r["hits"]["total"] } == 7
|
329
|
+
end
|
330
|
+
end
|
331
|
+
|
332
|
+
it "permits phrase searching on string fields" do
|
333
|
+
results = @es.search(:q => "message:\"sample message\"")
|
334
|
+
insist { results["hits"]["total"] } == 1
|
335
|
+
insist { results["hits"]["hits"][0]["_source"]["message"] } == "sample message here"
|
336
|
+
end
|
337
|
+
|
338
|
+
it "numbers dynamically map to a numeric type and permit range queries" do
|
339
|
+
results = @es.search(:q => "somevalue:[5 TO 105]")
|
340
|
+
insist { results["hits"]["total"] } == 2
|
341
|
+
|
342
|
+
values = results["hits"]["hits"].collect { |r| r["_source"]["somevalue"] }
|
343
|
+
insist { values }.include?(10)
|
344
|
+
insist { values }.include?(100)
|
345
|
+
reject { values }.include?(1)
|
346
|
+
end
|
347
|
+
|
348
|
+
it "creates .raw field fro any string field which is not_analyzed" do
|
349
|
+
results = @es.search(:q => "message.raw:\"sample message here\"")
|
350
|
+
insist { results["hits"]["total"] } == 1
|
351
|
+
insist { results["hits"]["hits"][0]["_source"]["message"] } == "sample message here"
|
352
|
+
|
353
|
+
# partial or terms should not work.
|
354
|
+
results = @es.search(:q => "message.raw:\"sample\"")
|
355
|
+
insist { results["hits"]["total"] } == 0
|
356
|
+
end
|
357
|
+
|
358
|
+
it "make [geoip][location] a geo_point" do
|
359
|
+
results = @es.search(:body => { "filter" => { "geo_distance" => { "distance" => "1000km", "geoip.location" => { "lat" => 0.5, "lon" => 0.5 } } } })
|
360
|
+
insist { results["hits"]["total"] } == 1
|
361
|
+
insist { results["hits"]["hits"][0]["_source"]["geoip"]["location"] } == [ 0.0, 0.0 ]
|
362
|
+
end
|
363
|
+
|
364
|
+
it "should index stopwords like 'at' " do
|
365
|
+
results = @es.search(:body => { "facets" => { "t" => { "terms" => { "field" => "country" } } } })["facets"]["t"]
|
366
|
+
terms = results["terms"].collect { |t| t["term"] }
|
367
|
+
|
368
|
+
insist { terms }.include?("us")
|
369
|
+
|
370
|
+
# 'at' is a stopword, make sure stopwords are not ignored.
|
371
|
+
insist { terms }.include?("at")
|
372
|
+
end
|
373
|
+
end
|
374
|
+
end
|
375
|
+
end
|
376
|
+
|
377
|
+
describe "elasticsearch protocol", :elasticsearch => true do
|
378
|
+
# ElasticSearch related jars
|
379
|
+
#LogStash::Environment.load_elasticsearch_jars!
|
380
|
+
# Load elasticsearch protocol
|
381
|
+
require "logstash/outputs/elasticsearch/protocol"
|
382
|
+
|
383
|
+
describe "elasticsearch node client" do
|
384
|
+
# Test ElasticSearch Node Client
|
385
|
+
# Reference: http://www.elasticsearch.org/guide/reference/modules/discovery/zen/
|
386
|
+
|
387
|
+
it "should support hosts in both string and array" do
|
388
|
+
# Because we defined *hosts* method in NodeClient as private,
|
389
|
+
# we use *obj.send :method,[args...]* to call method *hosts*
|
390
|
+
client = LogStash::Outputs::Elasticsearch::Protocols::NodeClient.new
|
391
|
+
|
392
|
+
# Node client should support host in string
|
393
|
+
# Case 1: default :host in string
|
394
|
+
insist { client.send :hosts, :host => "host",:port => 9300 } == "host:9300"
|
395
|
+
# Case 2: :port =~ /^\d+_\d+$/
|
396
|
+
insist { client.send :hosts, :host => "host",:port => "9300-9302"} == "host:9300,host:9301,host:9302"
|
397
|
+
# Case 3: :host =~ /^.+:.+$/
|
398
|
+
insist { client.send :hosts, :host => "host:9303",:port => 9300 } == "host:9303"
|
399
|
+
# Case 4: :host =~ /^.+:.+$/ and :port =~ /^\d+_\d+$/
|
400
|
+
insist { client.send :hosts, :host => "host:9303",:port => "9300-9302"} == "host:9303"
|
401
|
+
|
402
|
+
# Node client should support host in array
|
403
|
+
# Case 5: :host in array with single item
|
404
|
+
insist { client.send :hosts, :host => ["host"],:port => 9300 } == ("host:9300")
|
405
|
+
# Case 6: :host in array with more than one items
|
406
|
+
insist { client.send :hosts, :host => ["host1","host2"],:port => 9300 } == "host1:9300,host2:9300"
|
407
|
+
# Case 7: :host in array with more than one items and :port =~ /^\d+_\d+$/
|
408
|
+
insist { client.send :hosts, :host => ["host1","host2"],:port => "9300-9302" } == "host1:9300,host1:9301,host1:9302,host2:9300,host2:9301,host2:9302"
|
409
|
+
# Case 8: :host in array with more than one items and some :host =~ /^.+:.+$/
|
410
|
+
insist { client.send :hosts, :host => ["host1","host2:9303"],:port => 9300 } == "host1:9300,host2:9303"
|
411
|
+
# Case 9: :host in array with more than one items, :port =~ /^\d+_\d+$/ and some :host =~ /^.+:.+$/
|
412
|
+
insist { client.send :hosts, :host => ["host1","host2:9303"],:port => "9300-9302" } == "host1:9300,host1:9301,host1:9302,host2:9303"
|
413
|
+
end
|
414
|
+
end
|
415
|
+
end
|
416
|
+
|
417
|
+
describe "Authentication option" do
|
418
|
+
["node", "transport"].each do |protocol|
|
419
|
+
context "with protocol => #{protocol}" do
|
420
|
+
subject do
|
421
|
+
require "logstash/outputs/elasticsearch"
|
422
|
+
settings = {
|
423
|
+
"protocol" => protocol,
|
424
|
+
"node_name" => "logstash",
|
425
|
+
"cluster" => "elasticsearch",
|
426
|
+
"host" => "node01",
|
427
|
+
"user" => "test",
|
428
|
+
"password" => "test"
|
429
|
+
}
|
430
|
+
next LogStash::Outputs::ElasticSearch.new(settings)
|
431
|
+
end
|
432
|
+
|
433
|
+
it "should fail in register" do
|
434
|
+
expect {subject.register}.to raise_error
|
435
|
+
end
|
436
|
+
end
|
437
|
+
end
|
438
|
+
end
|
439
|
+
|
440
|
+
describe "SSL option" do
|
441
|
+
["node", "transport"].each do |protocol|
|
442
|
+
context "with protocol => #{protocol}" do
|
443
|
+
subject do
|
444
|
+
require "logstash/outputs/elasticsearch"
|
445
|
+
settings = {
|
446
|
+
"protocol" => protocol,
|
447
|
+
"node_name" => "logstash",
|
448
|
+
"cluster" => "elasticsearch",
|
449
|
+
"host" => "node01",
|
450
|
+
"ssl" => true
|
451
|
+
}
|
452
|
+
next LogStash::Outputs::ElasticSearch.new(settings)
|
453
|
+
end
|
454
|
+
|
455
|
+
it "should fail in register" do
|
456
|
+
expect {subject.register}.to raise_error
|
457
|
+
end
|
458
|
+
end
|
459
|
+
end
|
460
|
+
end
|
461
|
+
|
462
|
+
describe "send messages to ElasticSearch using HTTPS", :elasticsearch_secure => true do
|
463
|
+
subject do
|
464
|
+
require "logstash/outputs/elasticsearch"
|
465
|
+
settings = {
|
466
|
+
"protocol" => "http",
|
467
|
+
"node_name" => "logstash",
|
468
|
+
"cluster" => "elasticsearch",
|
469
|
+
"host" => "node01",
|
470
|
+
"user" => "user",
|
471
|
+
"password" => "changeme",
|
472
|
+
"ssl" => true,
|
473
|
+
"cacert" => "/tmp/ca/certs/cacert.pem",
|
474
|
+
# or
|
475
|
+
#"truststore" => "/tmp/ca/truststore.jks",
|
476
|
+
#"truststore_password" => "testeteste"
|
477
|
+
}
|
478
|
+
next LogStash::Outputs::ElasticSearch.new(settings)
|
479
|
+
end
|
480
|
+
|
481
|
+
before :each do
|
482
|
+
subject.register
|
483
|
+
end
|
484
|
+
|
485
|
+
it "sends events to ES" do
|
486
|
+
expect {
|
487
|
+
subject.receive(LogStash::Event.new("message" => "sample message here"))
|
488
|
+
subject.buffer_flush(:final => true)
|
489
|
+
}.to_not raise_error
|
490
|
+
end
|
491
|
+
end
|
492
|
+
|
493
|
+
describe "connect using HTTP Authentication", :elasticsearch_secure => true do
|
494
|
+
subject do
|
495
|
+
require "logstash/outputs/elasticsearch"
|
496
|
+
settings = {
|
497
|
+
"protocol" => "http",
|
498
|
+
"cluster" => "elasticsearch",
|
499
|
+
"host" => "node01",
|
500
|
+
"user" => "user",
|
501
|
+
"password" => "changeme",
|
502
|
+
}
|
503
|
+
next LogStash::Outputs::ElasticSearch.new(settings)
|
504
|
+
end
|
505
|
+
|
506
|
+
before :each do
|
507
|
+
subject.register
|
508
|
+
end
|
509
|
+
|
510
|
+
it "sends events to ES" do
|
511
|
+
expect {
|
512
|
+
subject.receive(LogStash::Event.new("message" => "sample message here"))
|
513
|
+
subject.buffer_flush(:final => true)
|
514
|
+
}.to_not raise_error
|
515
|
+
end
|
516
|
+
end
|
517
|
+
end
|