logstash-output-elasticsearch 2.4.1-java → 2.4.2-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 9f8d5b26eb15f7a372ac43583a962009103f7a4a
4
- data.tar.gz: 0b98bd6bf747ec4c576ec7426d67d93a7dc583a9
3
+ metadata.gz: 22b9b67e7eada407308a8da6bb3ab1c9dabe4e11
4
+ data.tar.gz: e434712af48381f69dbd6eb75a3556614ea40e93
5
5
  SHA512:
6
- metadata.gz: dba81882f5f3fa79f7dd4b9b307f8a95dd53d5ec1876112226fe3e67a2991bad3a03260d704e13e905b194e9bdb05f7284453a1415b03631e9ac9a472fea78e7
7
- data.tar.gz: bc25879d936c938d8c21fe85cc98b45fd7f9671a2ea0cd3c2e7120ec910b5a8ea6719ec8160b45af87800b24810a2cf59572a9fa13980332a3eccd5ef02be3f5
6
+ metadata.gz: b4046883d73f45496ad777cf3e3708a619e4172a07d7c8c682e459a859eac562ce130346f0164de8b8477ce14080d3bfc1bf4b5584111ac861ee0eff961d4b89
7
+ data.tar.gz: 1ece9c0def2d670ebd7ffd49308c6838aa1b0c4f1c12a3aef226cda00ebf0f3e348673b68720cc396ad4ddd45f9fa7200d6017afc3867cd6bc5342c17a9c488a
data/CHANGELOG.md CHANGED
@@ -1,3 +1,6 @@
1
+ ## 2.4.2
2
+ - Make flush_size actually cap the batch size in LS 2.2+
3
+
1
4
  ## 2.4.1
2
5
  - Used debug level instead of info when emitting flush log message
3
6
  - Updated docs about template
@@ -25,7 +25,9 @@ module LogStash; module Outputs; class ElasticSearch;
25
25
 
26
26
  # Receive an array of events and immediately attempt to index them (no buffering)
27
27
  def multi_receive(events)
28
- retrying_submit(events.map {|e| event_action_tuple(e) })
28
+ events.each_slice(@flush_size) do |slice|
29
+ retrying_submit(slice.map {|e| event_action_tuple(e) })
30
+ end
29
31
  end
30
32
 
31
33
  # Convert the event into a 3-tuple of action, params, and event
@@ -190,4 +192,4 @@ module LogStash; module Outputs; class ElasticSearch;
190
192
  raise e
191
193
  end
192
194
  end
193
- end; end; end
195
+ end; end; end
@@ -5,7 +5,9 @@ module LogStash; module Outputs; class ElasticSearch
5
5
  # The default value will partition your indices by day so you can more easily
6
6
  # delete old data or only search specific date ranges.
7
7
  # Indexes may not contain uppercase characters.
8
- # For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}
8
+ # For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}.
9
+ # LS uses Joda to format the index pattern from event timestamp.
10
+ # Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here].
9
11
  mod.config :index, :validate => :string, :default => "logstash-%{+YYYY.MM.dd}"
10
12
 
11
13
  # The index type to write events to. Generally you should try to write only
@@ -44,15 +46,15 @@ module LogStash; module Outputs; class ElasticSearch
44
46
  # If not set, the included template will be used.
45
47
  mod.config :template, :validate => :path
46
48
 
47
- # The template_overwrite option will always overwrite the indicated template
48
- # in Elasticsearch with either the one indicated by template or the included one.
49
- # This option is set to false by default. If you always want to stay up to date
50
- # with the template provided by Logstash, this option could be very useful to you.
51
- # Likewise, if you have your own template file managed by puppet, for example, and
49
+ # The template_overwrite option will always overwrite the indicated template
50
+ # in Elasticsearch with either the one indicated by template or the included one.
51
+ # This option is set to false by default. If you always want to stay up to date
52
+ # with the template provided by Logstash, this option could be very useful to you.
53
+ # Likewise, if you have your own template file managed by puppet, for example, and
52
54
  # you wanted to be able to update it regularly, this option could help there as well.
53
- #
54
- # Please note that if you are using your own customized version of the Logstash
55
- # template (logstash), setting this to true will make Logstash to overwrite
55
+ #
56
+ # Please note that if you are using your own customized version of the Logstash
57
+ # template (logstash), setting this to true will make Logstash to overwrite
56
58
  # the "logstash" template (i.e. removing all customized settings)
57
59
  mod.config :template_overwrite, :validate => :boolean, :default => false
58
60
 
@@ -83,6 +85,15 @@ module LogStash; module Outputs; class ElasticSearch
83
85
  mod.config :port, :obsolete => "Please use the 'hosts' setting instead. Hosts entries can be in 'host:port' format."
84
86
 
85
87
  # This plugin uses the bulk index API for improved indexing performance.
88
+ # In Logstashes >= 2.2 this setting defines the maximum sized bulk request Logstash will make
89
+ # You you may want to increase this to be in line with your pipeline's batch size.
90
+ # If you specify a number larger than the batch size of your pipeline it will have no effect,
91
+ # save for the case where a filter increases the size of an inflight batch by outputting
92
+ # events.
93
+ #
94
+ # In Logstashes <= 2.1 this plugin uses its own internal buffer of events.
95
+ # This config option sets that size. In these older logstashes this size may
96
+ # have a significant impact on heap usage, whereas in 2.2+ it will never increase it.
86
97
  # To make efficient bulk API calls, we will buffer a certain number of
87
98
  # events before flushing that out to Elasticsearch. This setting
88
99
  # controls how many events will be buffered before sending a batch
@@ -139,4 +150,4 @@ module LogStash; module Outputs; class ElasticSearch
139
150
  mod.config :retry_max_items, :validate => :number, :default => 500, :deprecated => true
140
151
  end
141
152
  end
142
- end end end
153
+ end end end
@@ -1,7 +1,7 @@
1
1
  Gem::Specification.new do |s|
2
2
 
3
3
  s.name = 'logstash-output-elasticsearch'
4
- s.version = '2.4.1'
4
+ s.version = '2.4.2'
5
5
  s.licenses = ['apache-2.0']
6
6
  s.summary = "Logstash Output to Elasticsearch"
7
7
  s.description = "Output events to elasticsearch"
@@ -92,6 +92,28 @@ describe "outputs/elasticsearch" do
92
92
  expect(manticore_host).to include("9202")
93
93
  end
94
94
  end
95
+
96
+ describe "#multi_receive" do
97
+ let(:events) { [double("one"), double("two"), double("three")] }
98
+ let(:events_tuples) { [double("one t"), double("two t"), double("three t")] }
99
+ let(:options) { super.merge("flush_size" => 2) }
100
+
101
+ before do
102
+ allow(eso).to receive(:retrying_submit).with(anything)
103
+ events.each_with_index do |e,i|
104
+ et = events_tuples[i]
105
+ allow(eso).to receive(:event_action_tuple).with(e).and_return(et)
106
+ end
107
+ eso.multi_receive(events)
108
+ end
109
+
110
+ it "should receive an array of events and invoke retrying_submit with them, split by flush_size" do
111
+ expect(eso).to have_received(:retrying_submit).with(events_tuples.slice(0,2))
112
+ expect(eso).to have_received(:retrying_submit).with(events_tuples.slice(2,3))
113
+ end
114
+
115
+ end
116
+
95
117
  end
96
118
 
97
119
  # TODO(sissel): Improve this. I'm not a fan of using message expectations (expect().to receive...)
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-output-elasticsearch
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.4.1
4
+ version: 2.4.2
5
5
  platform: java
6
6
  authors:
7
7
  - Elastic
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2016-01-29 00:00:00.000000000 Z
11
+ date: 2016-02-02 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: concurrent-ruby