logstash-output-elasticsearch 3.0.2-java → 4.1.0-java
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +16 -3
- data/Gemfile +1 -1
- data/lib/logstash/outputs/elasticsearch/common.rb +90 -58
- data/lib/logstash/outputs/elasticsearch/common_configs.rb +12 -32
- data/lib/logstash/outputs/elasticsearch/http_client/manticore_adapter.rb +63 -0
- data/lib/logstash/outputs/elasticsearch/http_client/pool.rb +378 -0
- data/lib/logstash/outputs/elasticsearch/http_client.rb +70 -64
- data/lib/logstash/outputs/elasticsearch/http_client_builder.rb +15 -4
- data/lib/logstash/outputs/elasticsearch/template_manager.rb +1 -1
- data/lib/logstash/outputs/elasticsearch.rb +27 -4
- data/logstash-output-elasticsearch.gemspec +3 -5
- data/spec/es_spec_helper.rb +1 -0
- data/spec/fixtures/5x_node_resp.json +2 -0
- data/spec/integration/outputs/create_spec.rb +2 -5
- data/spec/integration/outputs/index_spec.rb +1 -1
- data/spec/integration/outputs/parent_spec.rb +1 -3
- data/spec/integration/outputs/pipeline_spec.rb +1 -2
- data/spec/integration/outputs/retry_spec.rb +51 -49
- data/spec/integration/outputs/routing_spec.rb +1 -1
- data/spec/integration/outputs/secure_spec.rb +4 -8
- data/spec/integration/outputs/templates_spec.rb +12 -8
- data/spec/integration/outputs/update_spec.rb +13 -27
- data/spec/unit/outputs/elasticsearch/http_client/manticore_adapter_spec.rb +25 -0
- data/spec/unit/outputs/elasticsearch/http_client/pool_spec.rb +142 -0
- data/spec/unit/outputs/elasticsearch/http_client_spec.rb +8 -22
- data/spec/unit/outputs/elasticsearch_proxy_spec.rb +5 -6
- data/spec/unit/outputs/elasticsearch_spec.rb +33 -30
- data/spec/unit/outputs/elasticsearch_ssl_spec.rb +10 -6
- metadata +72 -87
- data/lib/logstash/outputs/elasticsearch/buffer.rb +0 -124
- data/spec/unit/buffer_spec.rb +0 -118
@@ -1,12 +1,12 @@
|
|
1
1
|
require "logstash/outputs/elasticsearch"
|
2
2
|
require "cabin"
|
3
3
|
require "base64"
|
4
|
-
require
|
5
|
-
require
|
4
|
+
require 'logstash/outputs/elasticsearch/http_client/pool'
|
5
|
+
require 'logstash/outputs/elasticsearch/http_client/manticore_adapter'
|
6
6
|
|
7
7
|
module LogStash; module Outputs; class ElasticSearch;
|
8
8
|
class HttpClient
|
9
|
-
attr_reader :client, :options, :
|
9
|
+
attr_reader :client, :options, :logger, :pool, :action_count, :recv_count
|
10
10
|
# This is here in case we use DEFAULT_OPTIONS in the future
|
11
11
|
# DEFAULT_OPTIONS = {
|
12
12
|
# :setting => value
|
@@ -17,71 +17,50 @@ module LogStash; module Outputs; class ElasticSearch;
|
|
17
17
|
# Again, in case we use DEFAULT_OPTIONS in the future, uncomment this.
|
18
18
|
# @options = DEFAULT_OPTIONS.merge(options)
|
19
19
|
@options = options
|
20
|
-
@
|
20
|
+
@pool = build_pool(@options)
|
21
21
|
# mutex to prevent requests and sniffing to access the
|
22
22
|
# connection pool at the same time
|
23
|
-
@request_mutex = Mutex.new
|
24
|
-
start_sniffing!
|
25
23
|
end
|
26
24
|
|
27
25
|
def template_install(name, template, force=false)
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
return
|
32
|
-
end
|
33
|
-
template_put(name, template)
|
26
|
+
if template_exists?(name) && !force
|
27
|
+
@logger.debug("Found existing Elasticsearch template. Skipping template management", :name => name)
|
28
|
+
return
|
34
29
|
end
|
30
|
+
template_put(name, template)
|
35
31
|
end
|
36
32
|
|
37
33
|
def bulk(actions)
|
38
|
-
@
|
39
|
-
|
40
|
-
|
41
|
-
def non_threadsafe_bulk(actions)
|
34
|
+
@action_count ||= 0
|
35
|
+
@action_count += actions.size
|
36
|
+
|
42
37
|
return if actions.empty?
|
43
38
|
bulk_body = actions.collect do |action, args, source|
|
44
39
|
args, source = update_action_builder(args, source) if action == 'update'
|
45
40
|
|
46
41
|
if source && action != 'delete'
|
47
|
-
next [ { action => args
|
42
|
+
next [ { action => args }, source ]
|
48
43
|
else
|
49
44
|
next { action => args }
|
50
45
|
end
|
51
|
-
end.
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
def start_sniffing!
|
57
|
-
if options[:sniffing]
|
58
|
-
@sniffer_thread = Thread.new do
|
59
|
-
loop do
|
60
|
-
@request_mutex.synchronize { sniff! }
|
61
|
-
sleep (options[:sniffing_delay].to_f || 30)
|
62
|
-
end
|
63
|
-
end
|
46
|
+
end.
|
47
|
+
flatten.
|
48
|
+
reduce("") do |acc,line|
|
49
|
+
acc << LogStash::Json.dump(line)
|
50
|
+
acc << "\n"
|
64
51
|
end
|
65
|
-
end
|
66
52
|
|
67
|
-
|
68
|
-
@
|
53
|
+
# Discard the URL
|
54
|
+
url, response = @pool.post("_bulk", nil, bulk_body)
|
55
|
+
LogStash::Json.load(response.body)
|
69
56
|
end
|
70
57
|
|
71
|
-
def
|
72
|
-
|
73
|
-
hosts_by_name = client.transport.hosts.map {|h| h["name"]}.sort
|
74
|
-
@logger.debug({"count" => hosts_by_name.count, "hosts" => hosts_by_name})
|
75
|
-
rescue StandardError => e
|
76
|
-
@logger.error("Error while sniffing connection",
|
77
|
-
:message => e.message,
|
78
|
-
:class => e.class.name,
|
79
|
-
:backtrace => e.backtrace)
|
58
|
+
def close
|
59
|
+
@pool.close
|
80
60
|
end
|
81
61
|
|
82
62
|
private
|
83
63
|
|
84
|
-
# Builds a client and returns an Elasticsearch::Client
|
85
64
|
#
|
86
65
|
# The `options` is a hash where the following symbol keys have meaning:
|
87
66
|
#
|
@@ -100,7 +79,7 @@ module LogStash; module Outputs; class ElasticSearch;
|
|
100
79
|
# * `:path` - String. The leading path for prefixing Elasticsearch
|
101
80
|
# requests. This is sometimes used if you are proxying Elasticsearch access
|
102
81
|
# through a special http path, such as using mod_rewrite.
|
103
|
-
def
|
82
|
+
def build_pool(options)
|
104
83
|
hosts = options[:hosts] || ["127.0.0.1"]
|
105
84
|
client_settings = options[:client_settings] || {}
|
106
85
|
timeout = options[:timeout] || 0
|
@@ -108,25 +87,51 @@ module LogStash; module Outputs; class ElasticSearch;
|
|
108
87
|
host_ssl_opt = client_settings[:ssl].nil? ? nil : client_settings[:ssl][:enabled]
|
109
88
|
urls = hosts.map {|host| host_to_url(host, host_ssl_opt, client_settings[:path])}
|
110
89
|
|
111
|
-
|
112
|
-
:
|
113
|
-
:
|
114
|
-
:
|
115
|
-
:socket_timeout => timeout,
|
116
|
-
:request_timeout => timeout,
|
117
|
-
:proxy => client_settings[:proxy]
|
118
|
-
},
|
119
|
-
:transport_class => ::Elasticsearch::Transport::Transport::HTTP::Manticore
|
90
|
+
adapter_options = {
|
91
|
+
:socket_timeout => timeout,
|
92
|
+
:request_timeout => timeout,
|
93
|
+
:proxy => client_settings[:proxy]
|
120
94
|
}
|
121
95
|
|
122
|
-
|
123
|
-
|
124
|
-
|
96
|
+
# Having this explicitly set to nil is an error
|
97
|
+
if client_settings[:pool_max]
|
98
|
+
adapter_options[:pool_max] = client_settings[:pool_max]
|
99
|
+
end
|
100
|
+
|
101
|
+
# Having this explicitly set to nil is an error
|
102
|
+
if client_settings[:pool_max_per_route]
|
103
|
+
adapter_options[:pool_max_per_route] = client_settings[:pool_max_per_route]
|
125
104
|
end
|
126
105
|
|
127
|
-
|
106
|
+
adapter_options[:ssl] = client_settings[:ssl] if client_settings[:ssl]
|
107
|
+
|
108
|
+
if options[:user]
|
109
|
+
adapter_options[:auth] = {
|
110
|
+
:user => options[:user],
|
111
|
+
:password => options[:password],
|
112
|
+
:eager => true
|
113
|
+
}
|
114
|
+
end
|
115
|
+
|
116
|
+
adapter_class = ::LogStash::Outputs::ElasticSearch::HttpClient::ManticoreAdapter
|
117
|
+
adapter = adapter_class.new(@logger, adapter_options)
|
118
|
+
|
119
|
+
pool_options = {
|
120
|
+
:sniffing => options[:sniffing],
|
121
|
+
:sniffer_delay => options[:sniffer_delay],
|
122
|
+
:healthcheck_path => options[:healthcheck_path],
|
123
|
+
:resurrect_delay => options[:resurrect_delay]
|
124
|
+
}
|
125
|
+
|
126
|
+
ssl_options = options[:client_settings] ? options[:client_settings][:ssl] : {}
|
127
|
+
pool_options[:scheme] = ssl_options && ssl_options[:enabled] ? 'https' : 'http'
|
128
|
+
|
129
|
+
if options[:user]
|
130
|
+
pool_options[:auth] = {:user => options[:user], :password => options[:password]}
|
131
|
+
end
|
128
132
|
|
129
|
-
|
133
|
+
pool_class = ::LogStash::Outputs::ElasticSearch::HttpClient::Pool
|
134
|
+
pool_class.new(@logger, adapter, urls, pool_options)
|
130
135
|
end
|
131
136
|
|
132
137
|
HOSTNAME_PORT_REGEX=/\A(?<hostname>([A-Za-z0-9\.\-]+)|\[[0-9A-Fa-f\:]+\])(:(?<port>\d+))?\Z/
|
@@ -183,18 +188,19 @@ module LogStash; module Outputs; class ElasticSearch;
|
|
183
188
|
"this may be logged to disk thus leaking credentials. Use the 'user' and 'password' options respectively"
|
184
189
|
end
|
185
190
|
|
186
|
-
url
|
191
|
+
url
|
187
192
|
end
|
188
193
|
|
189
194
|
def template_exists?(name)
|
190
|
-
@
|
191
|
-
|
192
|
-
rescue Elasticsearch::Transport::Transport::Errors::NotFound
|
193
|
-
return false
|
195
|
+
url, response = @pool.head("/_template/#{name}")
|
196
|
+
response.code >= 200 && response.code <= 299
|
194
197
|
end
|
195
198
|
|
196
199
|
def template_put(name, template)
|
197
|
-
|
200
|
+
path = "_template/#{name}"
|
201
|
+
logger.info("Installing elasticsearch template to #{path}")
|
202
|
+
url, response = @pool.put(path, nil, LogStash::Json.dump(template))
|
203
|
+
response
|
198
204
|
end
|
199
205
|
|
200
206
|
# Build a bulk item for an elasticsearch update action
|
@@ -1,14 +1,22 @@
|
|
1
1
|
module LogStash; module Outputs; class ElasticSearch;
|
2
2
|
module HttpClientBuilder
|
3
3
|
def self.build(logger, hosts, params)
|
4
|
-
client_settings = {
|
4
|
+
client_settings = {
|
5
|
+
:pool_max => params["pool_max"],
|
6
|
+
:pool_max_per_route => params["pool_max_per_route"],
|
7
|
+
}
|
5
8
|
|
6
9
|
common_options = {
|
7
10
|
:client_settings => client_settings,
|
8
|
-
:
|
9
|
-
:
|
11
|
+
:resurrect_delay => params["resurrect_delay"],
|
12
|
+
:healthcheck_path => params["healthcheck_path"]
|
10
13
|
}
|
11
14
|
|
15
|
+
if params["sniffing"]
|
16
|
+
common_options[:sniffing] = true
|
17
|
+
common_options[:sniffer_delay] = params["sniffing_delay"]
|
18
|
+
end
|
19
|
+
|
12
20
|
common_options[:timeout] = params["timeout"] if params["timeout"]
|
13
21
|
|
14
22
|
if params["path"]
|
@@ -22,7 +30,7 @@ module LogStash; module Outputs; class ElasticSearch;
|
|
22
30
|
common_options.merge! setup_basic_auth(logger, params)
|
23
31
|
|
24
32
|
# Update API setup
|
25
|
-
raise(
|
33
|
+
raise( LogStash::ConfigurationError,
|
26
34
|
"doc_as_upsert and scripted_upsert are mutually exclusive."
|
27
35
|
) if params["doc_as_upsert"] and params["scripted_upsert"]
|
28
36
|
|
@@ -63,7 +71,10 @@ module LogStash; module Outputs; class ElasticSearch;
|
|
63
71
|
end
|
64
72
|
|
65
73
|
def self.setup_ssl(logger, params)
|
74
|
+
# If we have HTTPS hosts we act like SSL is enabled
|
75
|
+
params["ssl"] = true if params["hosts"].any? {|h| h.start_with?("https://")}
|
66
76
|
return {} if params["ssl"].nil?
|
77
|
+
|
67
78
|
return {:ssl => {:enabled => false}} if params["ssl"] == false
|
68
79
|
|
69
80
|
cacert, truststore, truststore_password, keystore, keystore_password =
|
@@ -41,11 +41,13 @@ require "uri" # for escaping user input
|
|
41
41
|
# a global setting for the JVM.
|
42
42
|
#
|
43
43
|
# As an example, to set your DNS TTL to 1 second you would set
|
44
|
-
# the `LS_JAVA_OPTS` environment variable to `-
|
44
|
+
# the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`.
|
45
45
|
#
|
46
46
|
# Keep in mind that a connection with keepalive enabled will
|
47
47
|
# not reevaluate its DNS value while the keepalive is in effect.
|
48
48
|
class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
|
49
|
+
declare_threadsafe!
|
50
|
+
|
49
51
|
require "logstash/outputs/elasticsearch/http_client"
|
50
52
|
require "logstash/outputs/elasticsearch/http_client_builder"
|
51
53
|
require "logstash/outputs/elasticsearch/common_configs"
|
@@ -126,18 +128,39 @@ class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
|
|
126
128
|
# Note, this is NOT a SOCKS proxy, but a plain HTTP proxy
|
127
129
|
config :proxy
|
128
130
|
|
129
|
-
# Set the timeout for network operations and requests sent Elasticsearch. If
|
131
|
+
# Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If
|
130
132
|
# a timeout occurs, the request will be retried.
|
131
133
|
config :timeout, :validate => :number
|
132
134
|
|
135
|
+
# While the output tries to reuse connections efficiently we have a maximum.
|
136
|
+
# This sets the maximum number of open connections the output will create.
|
137
|
+
# Setting this too low may mean frequently closing / opening connections
|
138
|
+
# which is bad.
|
139
|
+
config :pool_max, :validate => :number, :default => 1000
|
140
|
+
|
141
|
+
# While the output tries to reuse connections efficiently we have a maximum per endpoint.
|
142
|
+
# This sets the maximum number of open connections per endpoint the output will create.
|
143
|
+
# Setting this too low may mean frequently closing / opening connections
|
144
|
+
# which is bad.
|
145
|
+
config :pool_max_per_route, :validate => :number, :default => 100
|
146
|
+
|
147
|
+
# When a backend is marked down a HEAD request will be sent to this path in the
|
148
|
+
# background to see if it has come back again before it is once again eligible
|
149
|
+
# to service requests. If you have custom firewall rules you may need to change
|
150
|
+
# this
|
151
|
+
config :healthcheck_path, :validate => :string, :default => "/"
|
152
|
+
|
153
|
+
# How frequently, in seconds, to wait between resurrection attempts.
|
154
|
+
# Resurrection is the process by which backend endpoints marked 'down' are checked
|
155
|
+
# to see if they have come back to life
|
156
|
+
config :resurrect_delay, :validate => :number, :default => 5
|
157
|
+
|
133
158
|
def build_client
|
134
159
|
@client = ::LogStash::Outputs::ElasticSearch::HttpClientBuilder.build(@logger, @hosts, params)
|
135
160
|
end
|
136
161
|
|
137
162
|
def close
|
138
163
|
@stopping.make_true
|
139
|
-
@client.stop_sniffing!
|
140
|
-
@buffer.stop
|
141
164
|
end
|
142
165
|
|
143
166
|
@@plugins = Gem::Specification.find_all{|spec| spec.name =~ /logstash-output-elasticsearch-/ }
|
@@ -1,7 +1,7 @@
|
|
1
1
|
Gem::Specification.new do |s|
|
2
2
|
|
3
3
|
s.name = 'logstash-output-elasticsearch'
|
4
|
-
s.version = '
|
4
|
+
s.version = '4.1.0'
|
5
5
|
s.licenses = ['apache-2.0']
|
6
6
|
s.summary = "Logstash Output to Elasticsearch"
|
7
7
|
s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program"
|
@@ -19,13 +19,9 @@ Gem::Specification.new do |s|
|
|
19
19
|
# Special flag to let us know this is actually a logstash plugin
|
20
20
|
s.metadata = { "logstash_plugin" => "true", "logstash_group" => "output" }
|
21
21
|
|
22
|
-
# Gem dependencies
|
23
|
-
s.add_runtime_dependency 'concurrent-ruby'
|
24
|
-
s.add_runtime_dependency 'elasticsearch', ['>= 1.0.13', '~> 1.0']
|
25
22
|
s.add_runtime_dependency 'stud', ['>= 0.0.17', '~> 0.0']
|
26
23
|
s.add_runtime_dependency 'cabin', ['~> 0.6']
|
27
24
|
s.add_runtime_dependency "logstash-core-plugin-api", "~> 2.0"
|
28
|
-
|
29
25
|
s.add_development_dependency 'ftw', '~> 0.0.42'
|
30
26
|
s.add_development_dependency 'logstash-codec-plain'
|
31
27
|
|
@@ -37,4 +33,6 @@ Gem::Specification.new do |s|
|
|
37
33
|
s.add_development_dependency 'logstash-devutils'
|
38
34
|
s.add_development_dependency 'longshoreman'
|
39
35
|
s.add_development_dependency 'flores'
|
36
|
+
# Still used in some specs, we should remove this ASAP
|
37
|
+
s.add_development_dependency 'elasticsearch'
|
40
38
|
end
|
data/spec/es_spec_helper.rb
CHANGED
@@ -0,0 +1,2 @@
|
|
1
|
+
|
2
|
+
{"cluster_name":"elasticsearch","nodes":{"vW8V2o_KSnOa-i97FACWFw":{"name":"Blockbuster","transport_address":"127.0.0.1:9300","host":"127.0.0.1","ip":"127.0.0.1","version":"5.0.0-alpha2","build_hash":"e3126df","http_address":"127.0.0.1:9200","roles":["master","data","ingest"],"settings":{"client":{"type":"node"},"cluster":{"name":"elasticsearch"},"node":{"name":"Blockbuster"},"path":{"logs":"/Users/andrewvc/Downloads/elasticsearch-5.0.0-alpha2/logs","home":"/Users/andrewvc/Downloads/elasticsearch-5.0.0-alpha2"},"config":{"ignore_system_properties":"true"}},"os":{"refresh_interval_in_millis":1000,"name":"Mac OS X","arch":"x86_64","version":"10.11.4","available_processors":8,"allocated_processors":8},"process":{"refresh_interval_in_millis":1000,"id":19048,"mlockall":false},"jvm":{"pid":19048,"version":"1.8.0_51","vm_name":"Java HotSpot(TM) 64-Bit Server VM","vm_version":"25.51-b03","vm_vendor":"Oracle Corporation","start_time_in_millis":1463781724873,"mem":{"heap_init_in_bytes":268435456,"heap_max_in_bytes":1037959168,"non_heap_init_in_bytes":2555904,"non_heap_max_in_bytes":0,"direct_max_in_bytes":1037959168},"gc_collectors":["ParNew","ConcurrentMarkSweep"],"memory_pools":["Code Cache","Metaspace","Compressed Class Space","Par Eden Space","Par Survivor Space","CMS Old Gen"],"using_compressed_ordinary_object_pointers":"true"},"thread_pool":{"force_merge":{"type":"fixed","min":1,"max":1,"queue_size":-1},"fetch_shard_started":{"type":"scaling","min":1,"max":16,"keep_alive":"5m","queue_size":-1},"listener":{"type":"fixed","min":4,"max":4,"queue_size":-1},"index":{"type":"fixed","min":8,"max":8,"queue_size":200},"refresh":{"type":"scaling","min":1,"max":4,"keep_alive":"5m","queue_size":-1},"generic":{"type":"cached","keep_alive":"30s","queue_size":-1},"warmer":{"type":"scaling","min":1,"max":4,"keep_alive":"5m","queue_size":-1},"search":{"type":"fixed","min":13,"max":13,"queue_size":1000},"flush":{"type":"scaling","min":1,"max":4,"keep_alive":"5m","queue_size":-1},"fetch_shard_store":{"type":"scaling","min":1,"max":16,"keep_alive":"5m","queue_size":-1},"management":{"type":"scaling","min":1,"max":5,"keep_alive":"5m","queue_size":-1},"get":{"type":"fixed","min":8,"max":8,"queue_size":1000},"bulk":{"type":"fixed","min":8,"max":8,"queue_size":50},"snapshot":{"type":"scaling","min":1,"max":4,"keep_alive":"5m","queue_size":-1}},"transport":{"bound_address":["[fe80::1]:9300","[::1]:9300","127.0.0.1:9300"],"publish_address":"127.0.0.1:9300","profiles":{}},"http":{"bound_address":["[fe80::1]:9200","[::1]:9200","127.0.0.1:9200"],"publish_address":"127.0.0.1:9200","max_content_length_in_bytes":104857600},"plugins":[],"modules":[{"name":"ingest-grok","version":"5.0.0-alpha2","description":"Ingest processor that uses grok patterns to split text","classname":"org.elasticsearch.ingest.grok.IngestGrokPlugin"},{"name":"lang-expression","version":"5.0.0-alpha2","description":"Lucene expressions integration for Elasticsearch","classname":"org.elasticsearch.script.expression.ExpressionPlugin"},{"name":"lang-groovy","version":"5.0.0-alpha2","description":"Groovy scripting integration for Elasticsearch","classname":"org.elasticsearch.script.groovy.GroovyPlugin"},{"name":"lang-mustache","version":"5.0.0-alpha2","description":"Mustache scripting integration for Elasticsearch","classname":"org.elasticsearch.script.mustache.MustachePlugin"},{"name":"lang-painless","version":"5.0.0-alpha2","description":"An easy, safe and fast scripting language for Elasticsearch","classname":"org.elasticsearch.painless.PainlessPlugin"},{"name":"reindex","version":"5.0.0-alpha2","description":"The Reindex module adds APIs to reindex from one index to another or update documents in place.","classname":"org.elasticsearch.index.reindex.ReindexPlugin"}],"ingest":{"processors":[{"type":"append"},{"type":"convert"},{"type":"date"},{"type":"fail"},{"type":"foreach"},{"type":"grok"},{"type":"gsub"},{"type":"join"},{"type":"lowercase"},{"type":"remove"},{"type":"rename"},{"type":"set"},{"type":"split"},{"type":"trim"},{"type":"uppercase"}]}},"BIAIepXSTYufETY06CUpYw":{"name":"Spyne","transport_address":"127.0.0.1:9301","host":"127.0.0.1","ip":"127.0.0.1","version":"5.0.0-alpha2","build_hash":"e3126df","http_address":"127.0.0.1:9201","roles":["master","data","ingest"],"settings":{"client":{"type":"node"},"cluster":{"name":"elasticsearch"},"node":{"name":"Spyne"},"path":{"logs":"/Users/andrewvc/Downloads/elasticsearch-5.0.0-alpha2/logs","home":"/Users/andrewvc/Downloads/elasticsearch-5.0.0-alpha2"},"config":{"ignore_system_properties":"true"}},"os":{"refresh_interval_in_millis":1000,"name":"Mac OS X","arch":"x86_64","version":"10.11.4","available_processors":8,"allocated_processors":8},"process":{"refresh_interval_in_millis":1000,"id":19029,"mlockall":false},"jvm":{"pid":19029,"version":"1.8.0_51","vm_name":"Java HotSpot(TM) 64-Bit Server VM","vm_version":"25.51-b03","vm_vendor":"Oracle Corporation","start_time_in_millis":1463781692985,"mem":{"heap_init_in_bytes":268435456,"heap_max_in_bytes":1037959168,"non_heap_init_in_bytes":2555904,"non_heap_max_in_bytes":0,"direct_max_in_bytes":1037959168},"gc_collectors":["ParNew","ConcurrentMarkSweep"],"memory_pools":["Code Cache","Metaspace","Compressed Class Space","Par Eden Space","Par Survivor Space","CMS Old Gen"],"using_compressed_ordinary_object_pointers":"true"},"thread_pool":{"force_merge":{"type":"fixed","min":1,"max":1,"queue_size":-1},"fetch_shard_started":{"type":"scaling","min":1,"max":16,"keep_alive":"5m","queue_size":-1},"listener":{"type":"fixed","min":4,"max":4,"queue_size":-1},"index":{"type":"fixed","min":8,"max":8,"queue_size":200},"refresh":{"type":"scaling","min":1,"max":4,"keep_alive":"5m","queue_size":-1},"generic":{"type":"cached","keep_alive":"30s","queue_size":-1},"warmer":{"type":"scaling","min":1,"max":4,"keep_alive":"5m","queue_size":-1},"search":{"type":"fixed","min":13,"max":13,"queue_size":1000},"flush":{"type":"scaling","min":1,"max":4,"keep_alive":"5m","queue_size":-1},"fetch_shard_store":{"type":"scaling","min":1,"max":16,"keep_alive":"5m","queue_size":-1},"management":{"type":"scaling","min":1,"max":5,"keep_alive":"5m","queue_size":-1},"get":{"type":"fixed","min":8,"max":8,"queue_size":1000},"bulk":{"type":"fixed","min":8,"max":8,"queue_size":50},"snapshot":{"type":"scaling","min":1,"max":4,"keep_alive":"5m","queue_size":-1}},"transport":{"bound_address":["[fe80::1]:9301","[::1]:9301","127.0.0.1:9301"],"publish_address":"127.0.0.1:9301","profiles":{}},"http":{"bound_address":["[fe80::1]:9201","[::1]:9201","127.0.0.1:9201"],"publish_address":"127.0.0.1:9201","max_content_length_in_bytes":104857600},"plugins":[],"modules":[{"name":"ingest-grok","version":"5.0.0-alpha2","description":"Ingest processor that uses grok patterns to split text","classname":"org.elasticsearch.ingest.grok.IngestGrokPlugin"},{"name":"lang-expression","version":"5.0.0-alpha2","description":"Lucene expressions integration for Elasticsearch","classname":"org.elasticsearch.script.expression.ExpressionPlugin"},{"name":"lang-groovy","version":"5.0.0-alpha2","description":"Groovy scripting integration for Elasticsearch","classname":"org.elasticsearch.script.groovy.GroovyPlugin"},{"name":"lang-mustache","version":"5.0.0-alpha2","description":"Mustache scripting integration for Elasticsearch","classname":"org.elasticsearch.script.mustache.MustachePlugin"},{"name":"lang-painless","version":"5.0.0-alpha2","description":"An easy, safe and fast scripting language for Elasticsearch","classname":"org.elasticsearch.painless.PainlessPlugin"},{"name":"reindex","version":"5.0.0-alpha2","description":"The Reindex module adds APIs to reindex from one index to another or update documents in place.","classname":"org.elasticsearch.index.reindex.ReindexPlugin"}],"ingest":{"processors":[{"type":"append"},{"type":"convert"},{"type":"date"},{"type":"fail"},{"type":"foreach"},{"type":"grok"},{"type":"gsub"},{"type":"join"},{"type":"lowercase"},{"type":"remove"},{"type":"rename"},{"type":"set"},{"type":"split"},{"type":"trim"},{"type":"uppercase"}]}}}}
|
@@ -2,7 +2,6 @@ require_relative "../../../spec/es_spec_helper"
|
|
2
2
|
|
3
3
|
describe "client create actions", :integration => true do
|
4
4
|
require "logstash/outputs/elasticsearch"
|
5
|
-
require "elasticsearch"
|
6
5
|
|
7
6
|
def get_es_output(action, id = nil)
|
8
7
|
settings = {
|
@@ -29,8 +28,7 @@ describe "client create actions", :integration => true do
|
|
29
28
|
it "should create new documents with or without id" do
|
30
29
|
subject = get_es_output("create", "id123")
|
31
30
|
subject.register
|
32
|
-
subject.
|
33
|
-
subject.flush
|
31
|
+
subject.multi_receive([LogStash::Event.new("message" => "sample message here")])
|
34
32
|
@es.indices.refresh
|
35
33
|
# Wait or fail until everything's indexed.
|
36
34
|
Stud::try(3.times) do
|
@@ -42,8 +40,7 @@ describe "client create actions", :integration => true do
|
|
42
40
|
it "should create new documents without id" do
|
43
41
|
subject = get_es_output("create")
|
44
42
|
subject.register
|
45
|
-
subject.
|
46
|
-
subject.flush
|
43
|
+
subject.multi_receive([LogStash::Event.new("message" => "sample message here")])
|
47
44
|
@es.indices.refresh
|
48
45
|
# Wait or fail until everything's indexed.
|
49
46
|
Stud::try(3.times) do
|
@@ -11,7 +11,7 @@ shared_examples "an indexer" do
|
|
11
11
|
before do
|
12
12
|
subject.register
|
13
13
|
event_count.times do
|
14
|
-
subject.
|
14
|
+
subject.multi_receive([LogStash::Event.new("message" => "Hello World!", "type" => type)])
|
15
15
|
end
|
16
16
|
end
|
17
17
|
|
@@ -19,9 +19,7 @@ shared_examples "a parent indexer" do
|
|
19
19
|
ftw.put!("#{index_url}/#{type}_parent/test", :body => pdoc.to_json)
|
20
20
|
|
21
21
|
subject.register
|
22
|
-
event_count.times
|
23
|
-
subject.receive(LogStash::Event.new("link_to" => "test", "message" => "Hello World!", "type" => type))
|
24
|
-
end
|
22
|
+
subject.multi_receive(event_count.times.map { LogStash::Event.new("link_to" => "test", "message" => "Hello World!", "type" => type) })
|
25
23
|
end
|
26
24
|
|
27
25
|
|
@@ -49,8 +49,7 @@ describe "Ingest pipeline execution behavior", :integration => true, :version_5x
|
|
49
49
|
#@es.ingest.put_pipeline :id => 'apache_pipeline', :body => pipeline_defintion
|
50
50
|
|
51
51
|
subject.register
|
52
|
-
subject.
|
53
|
-
subject.flush
|
52
|
+
subject.multi_receive([LogStash::Event.new("message" => '183.60.215.50 - - [01/Jun/2015:18:00:00 +0000] "GET /scripts/netcat-webserver HTTP/1.1" 200 182 "-" "Mozilla/5.0 (compatible; EasouSpider; +http://www.easou.com/search/spider.html)"')])
|
54
53
|
@es.indices.refresh
|
55
54
|
|
56
55
|
#Wait or fail until everything's indexed.
|
@@ -10,6 +10,8 @@ describe "failures in bulk class expected behavior", :integration => true do
|
|
10
10
|
let(:invalid_event) { LogStash::Event.new("geoip" => { "location" => "notlatlon" }, "@timestamp" => "2014-11-17T20:37:17.223Z") }
|
11
11
|
|
12
12
|
def mock_actions_with_response(*resp)
|
13
|
+
raise ArgumentError, "Cannot mock actions until subject is registered and has a client!" unless subject.client
|
14
|
+
|
13
15
|
expanded_responses = resp.map do |resp|
|
14
16
|
items = resp["statuses"] && resp["statuses"].map do |status|
|
15
17
|
{"create" => {"status" => status, "error" => "Error for #{status}"}}
|
@@ -21,7 +23,7 @@ describe "failures in bulk class expected behavior", :integration => true do
|
|
21
23
|
}
|
22
24
|
end
|
23
25
|
|
24
|
-
|
26
|
+
allow(subject.client).to receive(:bulk).and_return(*expanded_responses)
|
25
27
|
end
|
26
28
|
|
27
29
|
subject! do
|
@@ -30,8 +32,8 @@ describe "failures in bulk class expected behavior", :integration => true do
|
|
30
32
|
"index" => "logstash-2014.11.17",
|
31
33
|
"template_overwrite" => true,
|
32
34
|
"hosts" => get_host_port(),
|
33
|
-
"
|
34
|
-
"
|
35
|
+
"retry_max_interval" => 64,
|
36
|
+
"retry_initial_interval" => 2
|
35
37
|
}
|
36
38
|
next LogStash::Outputs::ElasticSearch.new(settings)
|
37
39
|
end
|
@@ -39,6 +41,7 @@ describe "failures in bulk class expected behavior", :integration => true do
|
|
39
41
|
before :each do
|
40
42
|
# Delete all templates first.
|
41
43
|
require "elasticsearch"
|
44
|
+
allow(Stud).to receive(:stoppable_sleep)
|
42
45
|
|
43
46
|
# Clean ES of data before we start.
|
44
47
|
@es = get_client
|
@@ -51,14 +54,11 @@ describe "failures in bulk class expected behavior", :integration => true do
|
|
51
54
|
subject.close
|
52
55
|
end
|
53
56
|
|
54
|
-
it "should
|
55
|
-
mock_actions_with_response({"errors" => false})
|
57
|
+
it "should retry exactly once if all bulk actions are successful" do
|
56
58
|
expect(subject).to receive(:submit).with([action1, action2]).once.and_call_original
|
57
59
|
subject.register
|
58
|
-
|
59
|
-
subject.
|
60
|
-
subject.flush
|
61
|
-
sleep(2)
|
60
|
+
mock_actions_with_response({"errors" => false})
|
61
|
+
subject.multi_receive([event1, event2])
|
62
62
|
end
|
63
63
|
|
64
64
|
it "retry exceptions within the submit body" do
|
@@ -73,25 +73,19 @@ describe "failures in bulk class expected behavior", :integration => true do
|
|
73
73
|
end
|
74
74
|
end
|
75
75
|
|
76
|
-
subject.
|
77
|
-
subject.flush
|
76
|
+
subject.multi_receive([event1])
|
78
77
|
end
|
79
78
|
|
80
|
-
it "should retry actions with response status of 503" do
|
81
|
-
mock_actions_with_response({"errors" => true, "statuses" => [200, 200, 503, 503]},
|
82
|
-
{"errors" => true, "statuses" => [200, 503]},
|
83
|
-
{"errors" => false})
|
84
|
-
expect(subject).to receive(:submit).with([action1, action1, action1, action2]).ordered.once.and_call_original
|
79
|
+
it "should retry actions with response status of 503" do expect(subject).to receive(:submit).with([action1, action1, action1, action2]).ordered.once.and_call_original
|
85
80
|
expect(subject).to receive(:submit).with([action1, action2]).ordered.once.and_call_original
|
86
81
|
expect(subject).to receive(:submit).with([action2]).ordered.once.and_call_original
|
87
82
|
|
88
83
|
subject.register
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
subject.
|
94
|
-
sleep(3)
|
84
|
+
mock_actions_with_response({"errors" => true, "statuses" => [200, 200, 503, 503]},
|
85
|
+
{"errors" => true, "statuses" => [200, 503]},
|
86
|
+
{"errors" => false})
|
87
|
+
|
88
|
+
subject.multi_receive([event1, event1, event1, event2])
|
95
89
|
end
|
96
90
|
|
97
91
|
it "should retry actions with response status of 429" do
|
@@ -101,63 +95,71 @@ describe "failures in bulk class expected behavior", :integration => true do
|
|
101
95
|
{"errors" => false})
|
102
96
|
expect(subject).to receive(:submit).with([action1]).twice.and_call_original
|
103
97
|
|
104
|
-
subject.
|
105
|
-
subject.flush
|
106
|
-
sleep(3)
|
98
|
+
subject.multi_receive([event1])
|
107
99
|
end
|
108
100
|
|
109
101
|
it "should retry an event infinitely until a non retryable status occurs" do
|
102
|
+
expect(subject).to receive(:submit).with([action1]).exactly(6).times.and_call_original
|
103
|
+
subject.register
|
104
|
+
|
110
105
|
mock_actions_with_response({"errors" => true, "statuses" => [429]},
|
111
106
|
{"errors" => true, "statuses" => [429]},
|
112
107
|
{"errors" => true, "statuses" => [429]},
|
113
108
|
{"errors" => true, "statuses" => [429]},
|
114
109
|
{"errors" => true, "statuses" => [429]},
|
115
110
|
{"errors" => true, "statuses" => [500]})
|
116
|
-
|
111
|
+
|
112
|
+
subject.multi_receive([event1])
|
113
|
+
end
|
114
|
+
|
115
|
+
it "should sleep for an exponentially increasing amount of time on each retry, capped by the max" do
|
116
|
+
[2, 4, 8, 16, 32, 64, 64].each_with_index do |interval,i|
|
117
|
+
expect(Stud).to receive(:stoppable_sleep).with(interval).ordered
|
118
|
+
end
|
119
|
+
|
117
120
|
subject.register
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
+
|
122
|
+
mock_actions_with_response({"errors" => true, "statuses" => [429]},
|
123
|
+
{"errors" => true, "statuses" => [429]},
|
124
|
+
{"errors" => true, "statuses" => [429]},
|
125
|
+
{"errors" => true, "statuses" => [429]},
|
126
|
+
{"errors" => true, "statuses" => [429]},
|
127
|
+
{"errors" => true, "statuses" => [429]},
|
128
|
+
{"errors" => true, "statuses" => [429]},
|
129
|
+
{"errors" => true, "statuses" => [500]})
|
130
|
+
|
131
|
+
subject.multi_receive([event1])
|
121
132
|
end
|
122
133
|
|
123
134
|
it "non-retryable errors like mapping errors (400) should be dropped and not be retried (unfortunately)" do
|
124
135
|
subject.register
|
125
|
-
subject.receive(invalid_event)
|
126
136
|
expect(subject).to receive(:submit).once.and_call_original
|
137
|
+
subject.multi_receive([invalid_event])
|
127
138
|
subject.close
|
128
139
|
|
129
140
|
@es.indices.refresh
|
130
|
-
|
131
|
-
|
132
|
-
r = @es.search
|
133
|
-
insist { r["hits"]["total"] } == 0
|
134
|
-
end
|
141
|
+
r = @es.search
|
142
|
+
expect(r["hits"]["total"]).to eql(0)
|
135
143
|
end
|
136
144
|
|
137
145
|
it "successful requests should not be appended to retry queue" do
|
138
|
-
subject.register
|
139
|
-
subject.receive(event1)
|
140
146
|
expect(subject).to receive(:submit).once.and_call_original
|
147
|
+
|
148
|
+
subject.register
|
149
|
+
subject.multi_receive([event1])
|
141
150
|
subject.close
|
142
151
|
@es.indices.refresh
|
143
|
-
|
144
|
-
|
145
|
-
r = @es.search
|
146
|
-
insist { r["hits"]["total"] } == 1
|
147
|
-
end
|
152
|
+
r = @es.search
|
153
|
+
expect(r["hits"]["total"]).to eql(1)
|
148
154
|
end
|
149
155
|
|
150
156
|
it "should only index proper events" do
|
151
157
|
subject.register
|
152
|
-
subject.
|
153
|
-
subject.receive(event1)
|
158
|
+
subject.multi_receive([invalid_event, event1])
|
154
159
|
subject.close
|
155
160
|
|
156
161
|
@es.indices.refresh
|
157
|
-
|
158
|
-
|
159
|
-
r = @es.search
|
160
|
-
insist { r["hits"]["total"] } == 1
|
161
|
-
end
|
162
|
+
r = @es.search
|
163
|
+
expect(r["hits"]["total"]).to eql(1)
|
162
164
|
end
|
163
165
|
end
|