logstash-output-elasticsearch 0.1.19-java → 0.2.0-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 88af0f35fb188226ed8a3af96835a57aef17cde6
4
- data.tar.gz: 4e826e7a0dc993a21ca18a894d59fc2a73670f3d
3
+ metadata.gz: 6a7d8af0601791a0fa557ff0e61b995aaa514858
4
+ data.tar.gz: 7dfd959db33857c44ccc0085709d5e0967a87add
5
5
  SHA512:
6
- metadata.gz: 542687e54823d84d895ac468a484ba80f76ff7f31909e424d26f065db1d5b167c20950f7003b6722b39ef9d6eb6aed9a546c5366ef1d249c5daf0b1fc992180b
7
- data.tar.gz: f86b66b834d212bbd43ad77b48c273e1e960ff57c216c3b1129edfc1591d3377e660753d084f262cfc391a3999c1b8ae380a7a1cb42460fccb4109c20e384f7c
6
+ metadata.gz: 3c6af511fd72b689d3fedbae44198bf4f0b2823856289cc541905f8ae74b1929897a251deb2bc0b598cb809df3575d6d5c45f48dcee9706b7cd62b4a78f786f6
7
+ data.tar.gz: b0d2e8d2d12341ee0c975339e34b7dbfcb8614c1457ca59905fe3b7728159e73ca9f3071213e7c4b62cd2f3f9bc273ea0fdb0eb834549f76b6ba28bc64635764
@@ -5,7 +5,7 @@
5
5
  },
6
6
  "mappings" : {
7
7
  "_default_" : {
8
- "_all" : {"enabled" : true},
8
+ "_all" : {"enabled" : true, "omit_norms" : true},
9
9
  "dynamic_templates" : [ {
10
10
  "message_field" : {
11
11
  "match" : "message",
@@ -31,7 +31,6 @@
31
31
  "geoip" : {
32
32
  "type" : "object",
33
33
  "dynamic": true,
34
- "path": "full",
35
34
  "properties" : {
36
35
  "location" : { "type" : "geo_point" }
37
36
  }
@@ -228,19 +228,23 @@ module LogStash::Outputs::Elasticsearch
228
228
  when "index"
229
229
  request = org.elasticsearch.action.index.IndexRequest.new(args[:_index])
230
230
  request.id(args[:_id]) if args[:_id]
231
+ request.routing(args[:_routing]) if args[:_routing]
231
232
  request.source(source)
232
233
  when "delete"
233
234
  request = org.elasticsearch.action.delete.DeleteRequest.new(args[:_index])
234
235
  request.id(args[:_id])
236
+ request.routing(args[:_routing]) if args[:_routing]
235
237
  when "create"
236
238
  request = org.elasticsearch.action.index.IndexRequest.new(args[:_index])
237
239
  request.id(args[:_id]) if args[:_id]
240
+ request.routing(args[:_routing]) if args[:_routing]
238
241
  request.source(source)
239
242
  request.opType("create")
240
243
  when "create_unless_exists"
241
244
  unless args[:_id].nil?
242
245
  request = org.elasticsearch.action.index.IndexRequest.new(args[:_index])
243
246
  request.id(args[:_id])
247
+ request.routing(args[:_routing]) if args[:_routing]
244
248
  request.source(source)
245
249
  request.opType("create")
246
250
  else
@@ -81,7 +81,11 @@ class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
81
81
 
82
82
  # The document ID for the index. Useful for overwriting existing entries in
83
83
  # Elasticsearch with the same ID.
84
- config :document_id, :validate => :string, :default => nil
84
+ config :document_id, :validate => :string
85
+
86
+ # A routing override to be applied to all processed events.
87
+ # This can be dynamic using the `%{foo}` syntax.
88
+ config :routing, :validate => :string
85
89
 
86
90
  # The name of your cluster if you set it on the Elasticsearch side. Useful
87
91
  # for discovery.
@@ -299,6 +303,7 @@ class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
299
303
 
300
304
  if @embedded
301
305
  raise(LogStash::ConfigurationError, "The 'embedded => true' setting is only valid for the elasticsearch output under JRuby. You are running #{RUBY_DESCRIPTION}") unless LogStash::Environment.jruby?
306
+ @logger.warn("The 'embedded => true' setting is enabled. This is not recommended for production use!!!")
302
307
  # LogStash::Environment.load_elasticsearch_jars!
303
308
 
304
309
  # Default @host with embedded to localhost. This should help avoid
@@ -399,7 +404,8 @@ class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
399
404
  index = event.sprintf(@index)
400
405
 
401
406
  document_id = @document_id ? event.sprintf(@document_id) : nil
402
- buffer_receive([event.sprintf(@action), { :_id => document_id, :_index => index, :_type => type }, event])
407
+ routing = @routing ? event.sprintf(@routing) : nil
408
+ buffer_receive([event.sprintf(@action), { :_id => document_id, :_index => index, :_type => type, :_routing => routing }, event])
403
409
  end # def receive
404
410
 
405
411
  public
@@ -1,7 +1,7 @@
1
1
  Gem::Specification.new do |s|
2
2
 
3
3
  s.name = 'logstash-output-elasticsearch'
4
- s.version = '0.1.19'
4
+ s.version = '0.2.0'
5
5
  s.licenses = ['apache-2.0']
6
6
  s.summary = "Logstash Output to Elasticsearch"
7
7
  s.description = "Output events to elasticsearch"
@@ -20,7 +20,7 @@ Gem::Specification.new do |s|
20
20
  s.metadata = { "logstash_plugin" => "true", "logstash_group" => "output" }
21
21
 
22
22
  # Jar dependencies
23
- s.requirements << "jar 'org.elasticsearch:elasticsearch', '1.4.0'"
23
+ s.requirements << "jar 'org.elasticsearch:elasticsearch', '1.5.0'"
24
24
 
25
25
  # Gem dependencies
26
26
  s.add_runtime_dependency 'concurrent-ruby'
@@ -73,6 +73,230 @@ describe "outputs/elasticsearch" do
73
73
  end
74
74
  end
75
75
 
76
+ describe "ship lots of events w/ default index_type and fixed routing key using http protocol", :elasticsearch => true do
77
+ # Generate a random index name
78
+ index = 10.times.collect { rand(10).to_s }.join("")
79
+ type = 10.times.collect { rand(10).to_s }.join("")
80
+
81
+ # Write 900 events so that we can verify these have been routed correctly.
82
+ event_count = 900
83
+ flush_size = rand(200) + 1
84
+
85
+ config <<-CONFIG
86
+ input {
87
+ generator {
88
+ message => "hello world"
89
+ count => #{event_count}
90
+ type => "#{type}"
91
+ }
92
+ }
93
+ output {
94
+ elasticsearch {
95
+ host => "127.0.0.1"
96
+ index => "#{index}"
97
+ flush_size => #{flush_size}
98
+ routing => "test"
99
+ protocol => "http"
100
+ }
101
+ }
102
+ CONFIG
103
+
104
+ agent do
105
+ # Try a few times to check if we have the correct number of events stored
106
+ # in ES.
107
+ #
108
+ # We try multiple times to allow final agent flushes as well as allowing
109
+ # elasticsearch to finish processing everything.
110
+ ftw = FTW::Agent.new
111
+ ftw.post!("http://localhost:9200/#{index}/_refresh")
112
+
113
+ # Wait until all events are available.
114
+ Stud::try(10.times) do
115
+ data = ""
116
+ response = ftw.get!("http://127.0.0.1:9200/#{index}/_count?q=*")
117
+ response.read_body { |chunk| data << chunk }
118
+ result = LogStash::Json.load(data)
119
+ count = result["count"]
120
+ insist { count } == event_count
121
+ end
122
+
123
+ response = ftw.get!("http://127.0.0.1:9200/#{index}/_count?q=*&routing=test")
124
+ data = ""
125
+ response.read_body { |chunk| data << chunk }
126
+ result = LogStash::Json.load(data)
127
+ count = result["count"]
128
+ insist { count } == event_count
129
+ end
130
+ end
131
+
132
+ describe "ship lots of events w/ default index_type and dynamic routing key using http protocol", :elasticsearch => true do
133
+ # Generate a random index name
134
+ index = 10.times.collect { rand(10).to_s }.join("")
135
+ type = 10.times.collect { rand(10).to_s }.join("")
136
+
137
+ # Write 900 events so that we can verify these have been routed correctly.
138
+ event_count = 900
139
+ flush_size = rand(200) + 1
140
+
141
+ config <<-CONFIG
142
+ input {
143
+ generator {
144
+ message => "test"
145
+ count => #{event_count}
146
+ type => "#{type}"
147
+ }
148
+ }
149
+ output {
150
+ elasticsearch {
151
+ host => "127.0.0.1"
152
+ index => "#{index}"
153
+ flush_size => #{flush_size}
154
+ routing => "%{message}"
155
+ protocol => "http"
156
+ }
157
+ }
158
+ CONFIG
159
+
160
+ agent do
161
+ # Try a few times to check if we have the correct number of events stored
162
+ # in ES.
163
+ #
164
+ # We try multiple times to allow final agent flushes as well as allowing
165
+ # elasticsearch to finish processing everything.
166
+ ftw = FTW::Agent.new
167
+ ftw.post!("http://localhost:9200/#{index}/_refresh")
168
+
169
+ # Wait until all events are available.
170
+ Stud::try(10.times) do
171
+ data = ""
172
+ response = ftw.get!("http://127.0.0.1:9200/#{index}/_count?q=*")
173
+ response.read_body { |chunk| data << chunk }
174
+ result = LogStash::Json.load(data)
175
+ count = result["count"]
176
+ insist { count } == event_count
177
+ end
178
+
179
+ response = ftw.get!("http://127.0.0.1:9200/#{index}/_count?q=*&routing=test")
180
+ data = ""
181
+ response.read_body { |chunk| data << chunk }
182
+ result = LogStash::Json.load(data)
183
+ count = result["count"]
184
+ insist { count } == event_count
185
+ end
186
+ end
187
+
188
+ describe "ship lots of events w/ default index_type and fixed routing key using transport protocol", :elasticsearch => true do
189
+ # Generate a random index name
190
+ index = 10.times.collect { rand(10).to_s }.join("")
191
+ type = 10.times.collect { rand(10).to_s }.join("")
192
+
193
+ # Write 900 events so that we can verify these have been routed correctly.
194
+ event_count = 900
195
+ flush_size = rand(200) + 1
196
+
197
+ config <<-CONFIG
198
+ input {
199
+ generator {
200
+ message => "hello world"
201
+ count => #{event_count}
202
+ type => "#{type}"
203
+ }
204
+ }
205
+ output {
206
+ elasticsearch {
207
+ host => "127.0.0.1"
208
+ index => "#{index}"
209
+ flush_size => #{flush_size}
210
+ routing => "test"
211
+ protocol => "transport"
212
+ }
213
+ }
214
+ CONFIG
215
+
216
+ agent do
217
+ # Try a few times to check if we have the correct number of events stored
218
+ # in ES.
219
+ #
220
+ # We try multiple times to allow final agent flushes as well as allowing
221
+ # elasticsearch to finish processing everything.
222
+ ftw = FTW::Agent.new
223
+ ftw.post!("http://localhost:9200/#{index}/_refresh")
224
+
225
+ # Wait until all events are available.
226
+ Stud::try(10.times) do
227
+ data = ""
228
+ response = ftw.get!("http://127.0.0.1:9200/#{index}/_count?q=*")
229
+ response.read_body { |chunk| data << chunk }
230
+ result = LogStash::Json.load(data)
231
+ count = result["count"]
232
+ insist { count } == event_count
233
+ end
234
+
235
+ response = ftw.get!("http://127.0.0.1:9200/#{index}/_count?q=*&routing=test")
236
+ data = ""
237
+ response.read_body { |chunk| data << chunk }
238
+ result = LogStash::Json.load(data)
239
+ count = result["count"]
240
+ insist { count } == event_count
241
+ end
242
+ end
243
+
244
+ describe "ship lots of events w/ default index_type and fixed routing key using node protocol", :elasticsearch => true do
245
+ # Generate a random index name
246
+ index = 10.times.collect { rand(10).to_s }.join("")
247
+ type = 10.times.collect { rand(10).to_s }.join("")
248
+
249
+ # Write 900 events so that we can verify these have been routed correctly.
250
+ event_count = 900
251
+ flush_size = rand(200) + 1
252
+
253
+ config <<-CONFIG
254
+ input {
255
+ generator {
256
+ message => "hello world"
257
+ count => #{event_count}
258
+ type => "#{type}"
259
+ }
260
+ }
261
+ output {
262
+ elasticsearch {
263
+ host => "127.0.0.1"
264
+ index => "#{index}"
265
+ flush_size => #{flush_size}
266
+ routing => "test"
267
+ protocol => "node"
268
+ }
269
+ }
270
+ CONFIG
271
+
272
+ agent do
273
+ # Try a few times to check if we have the correct number of events stored
274
+ # in ES.
275
+ #
276
+ # We try multiple times to allow final agent flushes as well as allowing
277
+ # elasticsearch to finish processing everything.
278
+ ftw = FTW::Agent.new
279
+ ftw.post!("http://localhost:9200/#{index}/_refresh")
280
+
281
+ # Wait until all events are available.
282
+ Stud::try(10.times) do
283
+ data = ""
284
+ response = ftw.get!("http://127.0.0.1:9200/#{index}/_count?q=*")
285
+ response.read_body { |chunk| data << chunk }
286
+ result = LogStash::Json.load(data)
287
+ count = result["count"]
288
+ insist { count } == event_count
289
+ end
290
+
291
+ response = ftw.get!("http://127.0.0.1:9200/#{index}/_count?q=*&routing=test")
292
+ data = ""
293
+ response.read_body { |chunk| data << chunk }
294
+ result = LogStash::Json.load(data)
295
+ count = result["count"]
296
+ insist { count } == event_count
297
+ end
298
+ end
299
+
76
300
  describe "node client create actions", :elasticsearch => true do
77
301
  require "logstash/outputs/elasticsearch"
78
302
  require "elasticsearch"
@@ -475,9 +699,9 @@ describe "outputs/elasticsearch" do
475
699
  describe "failures in bulk class expected behavior", :elasticsearch => true do
476
700
  let(:template) { '{"template" : "not important, will be updated by :index"}' }
477
701
  let(:event1) { LogStash::Event.new("somevalue" => 100, "@timestamp" => "2014-11-17T20:37:17.223Z", "@metadata" => {"retry_count" => 0}) }
478
- let(:action1) { ["index", {:_id=>nil, :_index=>"logstash-2014.11.17", :_type=>"logs"}, event1] }
702
+ let(:action1) { ["index", {:_id=>nil, :_routing=>nil, :_index=>"logstash-2014.11.17", :_type=>"logs"}, event1] }
479
703
  let(:event2) { LogStash::Event.new("geoip" => { "location" => [ 0.0, 0.0] }, "@timestamp" => "2014-11-17T20:37:17.223Z", "@metadata" => {"retry_count" => 0}) }
480
- let(:action2) { ["index", {:_id=>nil, :_index=>"logstash-2014.11.17", :_type=>"logs"}, event2] }
704
+ let(:action2) { ["index", {:_id=>nil, :_routing=>nil, :_index=>"logstash-2014.11.17", :_type=>"logs"}, event2] }
481
705
  let(:invalid_event) { LogStash::Event.new("geoip" => { "location" => "notlatlon" }, "@timestamp" => "2014-11-17T20:37:17.223Z") }
482
706
  let(:max_retries) { 3 }
483
707
 
@@ -486,8 +710,6 @@ describe "outputs/elasticsearch" do
486
710
  .any_instance.stub(:bulk).and_return(*resp)
487
711
  LogStash::Outputs::Elasticsearch::Protocols::NodeClient
488
712
  .any_instance.stub(:bulk).and_return(*resp)
489
- LogStash::Outputs::Elasticsearch::Protocols::TransportClient
490
- .any_instance.stub(:bulk).and_return(*resp)
491
713
  end
492
714
 
493
715
  ["node", "transport", "http"].each do |protocol|
@@ -539,7 +761,7 @@ describe "outputs/elasticsearch" do
539
761
  end
540
762
  subject.register
541
763
  subject.receive(event1)
542
- subject.buffer_flush(:final => true)
764
+ subject.teardown
543
765
  end
544
766
 
545
767
  it "should retry actions with response status of 503" do
@@ -587,9 +809,10 @@ describe "outputs/elasticsearch" do
587
809
  subject.register
588
810
  subject.receive(invalid_event)
589
811
  expect(subject).not_to receive(:retry_push)
590
- subject.buffer_flush(:final => true)
812
+ subject.teardown
591
813
 
592
814
  @es.indices.refresh
815
+ sleep(5)
593
816
  Stud::try(10.times) do
594
817
  r = @es.search
595
818
  insist { r["hits"]["total"] } == 0
@@ -600,9 +823,10 @@ describe "outputs/elasticsearch" do
600
823
  subject.register
601
824
  subject.receive(event1)
602
825
  expect(subject).not_to receive(:retry_push)
603
- subject.buffer_flush(:final => true)
826
+ subject.teardown
604
827
 
605
828
  @es.indices.refresh
829
+ sleep(5)
606
830
  Stud::try(10.times) do
607
831
  r = @es.search
608
832
  insist { r["hits"]["total"] } == 1
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-output-elasticsearch
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.19
4
+ version: 0.2.0
5
5
  platform: java
6
6
  authors:
7
7
  - Elasticsearch
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2015-03-26 00:00:00.000000000 Z
11
+ date: 2015-04-07 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: concurrent-ruby
@@ -235,7 +235,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
235
235
  - !ruby/object:Gem::Version
236
236
  version: '0'
237
237
  requirements:
238
- - jar 'org.elasticsearch:elasticsearch', '1.4.0'
238
+ - jar 'org.elasticsearch:elasticsearch', '1.5.0'
239
239
  rubyforge_project:
240
240
  rubygems_version: 2.1.9
241
241
  signing_key: