logstash-output-amazon_es 2.0.1-java → 6.4.0-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. checksums.yaml +5 -5
  2. data/CONTRIBUTORS +12 -0
  3. data/Gemfile +8 -0
  4. data/LICENSE +10 -199
  5. data/README.md +34 -65
  6. data/lib/logstash/outputs/amazon_es.rb +218 -423
  7. data/lib/logstash/outputs/amazon_es/common.rb +347 -0
  8. data/lib/logstash/outputs/amazon_es/common_configs.rb +141 -0
  9. data/lib/logstash/outputs/amazon_es/elasticsearch-template-es2x.json +95 -0
  10. data/lib/logstash/outputs/amazon_es/elasticsearch-template-es5x.json +46 -0
  11. data/lib/logstash/outputs/amazon_es/elasticsearch-template-es6x.json +45 -0
  12. data/lib/logstash/outputs/amazon_es/elasticsearch-template-es7x.json +46 -0
  13. data/lib/logstash/outputs/amazon_es/http_client.rb +359 -74
  14. data/lib/logstash/outputs/amazon_es/http_client/manticore_adapter.rb +169 -0
  15. data/lib/logstash/outputs/amazon_es/http_client/pool.rb +457 -0
  16. data/lib/logstash/outputs/amazon_es/http_client_builder.rb +164 -0
  17. data/lib/logstash/outputs/amazon_es/template_manager.rb +36 -0
  18. data/logstash-output-amazon_es.gemspec +13 -22
  19. data/spec/es_spec_helper.rb +37 -0
  20. data/spec/unit/http_client_builder_spec.rb +189 -0
  21. data/spec/unit/outputs/elasticsearch/http_client/manticore_adapter_spec.rb +105 -0
  22. data/spec/unit/outputs/elasticsearch/http_client/pool_spec.rb +198 -0
  23. data/spec/unit/outputs/elasticsearch/http_client_spec.rb +222 -0
  24. data/spec/unit/outputs/elasticsearch/template_manager_spec.rb +25 -0
  25. data/spec/unit/outputs/elasticsearch_spec.rb +615 -0
  26. data/spec/unit/outputs/error_whitelist_spec.rb +60 -0
  27. metadata +49 -110
  28. data/lib/logstash/outputs/amazon_es/aws_transport.rb +0 -109
  29. data/lib/logstash/outputs/amazon_es/aws_v4_signer.rb +0 -7
  30. data/lib/logstash/outputs/amazon_es/aws_v4_signer_impl.rb +0 -62
  31. data/lib/logstash/outputs/amazon_es/elasticsearch-template.json +0 -41
  32. data/spec/amazon_es_spec_helper.rb +0 -69
  33. data/spec/unit/outputs/amazon_es_spec.rb +0 -50
  34. data/spec/unit/outputs/elasticsearch/protocol_spec.rb +0 -36
  35. data/spec/unit/outputs/elasticsearch_proxy_spec.rb +0 -58
@@ -0,0 +1,169 @@
1
+ require 'manticore'
2
+ require 'cgi'
3
+ require 'aws-sdk-core'
4
+ require 'uri'
5
+
6
+ module LogStash; module Outputs; class ElasticSearch; class HttpClient;
7
+ DEFAULT_HEADERS = { "content-type" => "application/json" }
8
+
9
+ CredentialConfig = Struct.new(
10
+ :access_key_id,
11
+ :secret_access_key,
12
+ :session_token,
13
+ :profile,
14
+ :instance_profile_credentials_retries,
15
+ :instance_profile_credentials_timeout,
16
+ :region)
17
+
18
+ class ManticoreAdapter
19
+ attr_reader :manticore, :logger
20
+
21
+ def initialize(logger, options={})
22
+ @logger = logger
23
+ options = options.clone || {}
24
+ options[:ssl] = options[:ssl] || {}
25
+
26
+ # We manage our own retries directly, so let's disable them here
27
+ options[:automatic_retries] = 0
28
+ # We definitely don't need cookies
29
+ options[:cookies] = false
30
+
31
+ @client_params = {:headers => DEFAULT_HEADERS.merge(options[:headers]|| {}),}
32
+
33
+ @port = options[:port] || 9200
34
+ @protocol = options[:protocol] || 'http'
35
+ @region = options[:region] || 'us-east-1'
36
+ @aws_access_key_id = options[:aws_access_key_id] || nil
37
+ @aws_secret_access_key = options[:aws_secret_access_key] || nil
38
+ @session_token = options[:session_token] || nil
39
+ @profile = options[:profile] || 'default'
40
+ @instance_cred_retries = options[:instance_profile_credentials_retries] || 0
41
+ @instance_cred_timeout = options[:instance_profile_credentials_timeout] || 1
42
+
43
+ if options[:proxy]
44
+ options[:proxy] = manticore_proxy_hash(options[:proxy])
45
+ end
46
+
47
+ @manticore = ::Manticore::Client.new(options)
48
+ end
49
+
50
+ # Transform the proxy option to a hash. Manticore's support for non-hash
51
+ # proxy options is broken. This was fixed in https://github.com/cheald/manticore/commit/34a00cee57a56148629ed0a47c329181e7319af5
52
+ # but this is not yet released
53
+ def manticore_proxy_hash(proxy_uri)
54
+ [:scheme, :port, :user, :password, :path].reduce(:host => proxy_uri.host) do |acc,opt|
55
+ value = proxy_uri.send(opt)
56
+ acc[opt] = value unless value.nil? || (value.is_a?(String) && value.empty?)
57
+ acc
58
+ end
59
+ end
60
+
61
+ def client
62
+ @manticore
63
+ end
64
+
65
+
66
+
67
+ # Performs the request by invoking {Transport::Base#perform_request} with a block.
68
+ #
69
+ # @return [Response]
70
+ # @see Transport::Base#perform_request
71
+ #
72
+ def perform_request(url, method, path, params={}, body=nil)
73
+ # Perform 2-level deep merge on the params, so if the passed params and client params will both have hashes stored on a key they
74
+ # will be merged as well, instead of choosing just one of the values
75
+ params = (params || {}).merge(@client_params) { |key, oldval, newval|
76
+ (oldval.is_a?(Hash) && newval.is_a?(Hash)) ? oldval.merge(newval) : newval
77
+ }
78
+ params[:headers] = params[:headers].clone
79
+
80
+
81
+ params[:body] = body if body
82
+
83
+ if url.user
84
+ params[:auth] = {
85
+ :user => CGI.unescape(url.user),
86
+ # We have to unescape the password here since manticore won't do it
87
+ # for us unless its part of the URL
88
+ :password => CGI.unescape(url.password),
89
+ :eager => true
90
+ }
91
+ end
92
+
93
+ request_uri = format_url(url, path)
94
+
95
+ if @protocol == "https"
96
+ url = URI::HTTPS.build({:host=>URI(request_uri.to_s).host, :port=>@port.to_s, :path=>path})
97
+ else
98
+ url = URI::HTTP.build({:host=>URI(request_uri.to_s).host, :port=>@port.to_s, :path=>path})
99
+ end
100
+
101
+
102
+ key = Seahorse::Client::Http::Request.new(options={:endpoint=>url, :http_method => method.to_s.upcase,
103
+ :headers => params[:headers],:body => params[:body]})
104
+
105
+
106
+
107
+ credential_config = CredentialConfig.new(@aws_access_key_id, @aws_secret_access_key, @session_token, @profile, @instance_cred_retries, @instance_cred_timeout, @region)
108
+
109
+
110
+ credentials = Aws::CredentialProviderChain.new(credential_config).resolve
111
+ aws_signer = Aws::Signers::V4.new(credentials, 'es', @region )
112
+
113
+
114
+ signed_key = aws_signer.sign(key)
115
+ params[:headers] = params[:headers].merge(signed_key.headers)
116
+
117
+
118
+
119
+ resp = @manticore.send(method.downcase, request_uri.to_s, params)
120
+
121
+ # Manticore returns lazy responses by default
122
+ # We want to block for our usage, this will wait for the repsonse
123
+ # to finish
124
+ resp.call
125
+ # 404s are excluded because they are valid codes in the case of
126
+ # template installation. We might need a better story around this later
127
+ # but for our current purposes this is correct
128
+ if resp.code < 200 || resp.code > 299 && resp.code != 404
129
+ raise ::LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError.new(resp.code, request_uri, body, resp.body)
130
+ end
131
+
132
+ resp
133
+ end
134
+
135
+ def format_url(url, path_and_query=nil)
136
+ request_uri = url.clone
137
+
138
+ # We excise auth info from the URL in case manticore itself tries to stick
139
+ # sensitive data in a thrown exception or log data
140
+ request_uri.user = nil
141
+ request_uri.password = nil
142
+
143
+ return request_uri.to_s if path_and_query.nil?
144
+
145
+ parsed_path_and_query = java.net.URI.new(path_and_query)
146
+
147
+ query = request_uri.query
148
+ parsed_query = parsed_path_and_query.query
149
+
150
+ new_query_parts = [request_uri.query, parsed_path_and_query.query].select do |part|
151
+ part && !part.empty? # Skip empty nil and ""
152
+ end
153
+
154
+ request_uri.query = new_query_parts.join("&") unless new_query_parts.empty?
155
+
156
+ request_uri.path = "#{request_uri.path}/#{parsed_path_and_query.path}".gsub(/\/{2,}/, "/")
157
+
158
+ request_uri
159
+ end
160
+
161
+ def close
162
+ @manticore.close
163
+ end
164
+
165
+ def host_unreachable_exceptions
166
+ [::Manticore::Timeout,::Manticore::SocketException, ::Manticore::ClientProtocolException, ::Manticore::ResolutionFailure, Manticore::SocketTimeout]
167
+ end
168
+ end
169
+ end; end; end; end
@@ -0,0 +1,457 @@
1
+ module LogStash; module Outputs; class ElasticSearch; class HttpClient;
2
+ class Pool
3
+ class NoConnectionAvailableError < Error; end
4
+ class BadResponseCodeError < Error
5
+ attr_reader :url, :response_code, :request_body, :response_body
6
+
7
+ def initialize(response_code, url, request_body, response_body)
8
+ @response_code = response_code
9
+ @url = url
10
+ @request_body = request_body
11
+ @response_body = response_body
12
+ end
13
+
14
+ def message
15
+ "Got response code '#{response_code}' contacting Elasticsearch at URL '#{@url}'"
16
+ end
17
+ end
18
+ class HostUnreachableError < Error;
19
+ attr_reader :original_error, :url
20
+
21
+ def initialize(original_error, url)
22
+ @original_error = original_error
23
+ @url = url
24
+ end
25
+
26
+ def message
27
+ "Elasticsearch Unreachable: [#{@url}][#{original_error.class}] #{original_error.message}"
28
+ end
29
+ end
30
+
31
+ attr_reader :logger, :adapter, :sniffing, :sniffer_delay, :resurrect_delay, :healthcheck_path, :sniffing_path, :bulk_path
32
+
33
+ ROOT_URI_PATH = '/'.freeze
34
+
35
+ DEFAULT_OPTIONS = {
36
+ :healthcheck_path => ROOT_URI_PATH,
37
+ :sniffing_path => "/_nodes/http",
38
+ :bulk_path => "/_bulk",
39
+ :scheme => 'http',
40
+ :resurrect_delay => 5,
41
+ :sniffing => false,
42
+ :sniffer_delay => 10,
43
+ }.freeze
44
+
45
+ def initialize(logger, adapter, initial_urls=[], options={})
46
+ @logger = logger
47
+ @adapter = adapter
48
+ @metric = options[:metric]
49
+ @initial_urls = initial_urls
50
+
51
+ raise ArgumentError, "No URL Normalizer specified!" unless options[:url_normalizer]
52
+ @url_normalizer = options[:url_normalizer]
53
+ DEFAULT_OPTIONS.merge(options).tap do |merged|
54
+ @bulk_path = merged[:bulk_path]
55
+ @sniffing_path = merged[:sniffing_path]
56
+ @healthcheck_path = merged[:healthcheck_path]
57
+ @resurrect_delay = merged[:resurrect_delay]
58
+ @sniffing = merged[:sniffing]
59
+ @sniffer_delay = merged[:sniffer_delay]
60
+ end
61
+
62
+ # Used for all concurrent operations in this class
63
+ @state_mutex = Mutex.new
64
+
65
+ # Holds metadata about all URLs
66
+ @url_info = {}
67
+ @stopping = false
68
+ end
69
+
70
+ def start
71
+ update_urls(@initial_urls)
72
+ start_resurrectionist
73
+ start_sniffer if @sniffing
74
+ end
75
+
76
+ def close
77
+ @state_mutex.synchronize { @stopping = true }
78
+
79
+ logger.debug "Stopping sniffer"
80
+ stop_sniffer
81
+
82
+ logger.debug "Stopping resurrectionist"
83
+ stop_resurrectionist
84
+
85
+ logger.debug "Waiting for in use manticore connections"
86
+ wait_for_in_use_connections
87
+
88
+ logger.debug("Closing adapter #{@adapter}")
89
+ @adapter.close
90
+ end
91
+
92
+ def wait_for_in_use_connections
93
+ until in_use_connections.empty?
94
+ logger.info "Blocked on shutdown to in use connections #{@state_mutex.synchronize {@url_info}}"
95
+ sleep 1
96
+ end
97
+ end
98
+
99
+ def in_use_connections
100
+ @state_mutex.synchronize { @url_info.values.select {|v| v[:in_use] > 0 } }
101
+ end
102
+
103
+ def alive_urls_count
104
+ @state_mutex.synchronize { @url_info.values.select {|v| !v[:state] == :alive }.count }
105
+ end
106
+
107
+ def url_info
108
+ @state_mutex.synchronize { @url_info }
109
+ end
110
+
111
+ def maximum_seen_major_version
112
+ @state_mutex.synchronize do
113
+ @maximum_seen_major_version
114
+ end
115
+ end
116
+
117
+ def urls
118
+ url_info.keys
119
+ end
120
+
121
+ def until_stopped(task_name, delay)
122
+ last_done = Time.now
123
+ until @state_mutex.synchronize { @stopping }
124
+ begin
125
+ now = Time.now
126
+ if (now - last_done) >= delay
127
+ last_done = now
128
+ yield
129
+ end
130
+ sleep 1
131
+ rescue => e
132
+ logger.warn(
133
+ "Error while performing #{task_name}",
134
+ :error_message => e.message,
135
+ :class => e.class.name,
136
+ :backtrace => e.backtrace
137
+ )
138
+ end
139
+ end
140
+ end
141
+
142
+ def start_sniffer
143
+ @sniffer = Thread.new do
144
+ until_stopped("sniffing", sniffer_delay) do
145
+ begin
146
+ sniff!
147
+ rescue NoConnectionAvailableError => e
148
+ @state_mutex.synchronize { # Synchronize around @url_info
149
+ logger.warn("Elasticsearch output attempted to sniff for new connections but cannot. No living connections are detected. Pool contains the following current URLs", :url_info => @url_info) }
150
+ end
151
+ end
152
+ end
153
+ end
154
+
155
+ # Sniffs the cluster then updates the internal URLs
156
+ def sniff!
157
+ update_urls(check_sniff)
158
+ end
159
+
160
+ ES1_SNIFF_RE_URL = /\[([^\/]*)?\/?([^:]*):([0-9]+)\]/
161
+ ES2_SNIFF_RE_URL = /([^\/]*)?\/?([^:]*):([0-9]+)/
162
+ # Sniffs and returns the results. Does not update internal URLs!
163
+ def check_sniff
164
+ _, url_meta, resp = perform_request(:get, @sniffing_path)
165
+ @metric.increment(:sniff_requests)
166
+ parsed = LogStash::Json.load(resp.body)
167
+ nodes = parsed['nodes']
168
+ if !nodes || nodes.empty?
169
+ @logger.warn("Sniff returned no nodes! Will not update hosts.")
170
+ return nil
171
+ else
172
+ case major_version(url_meta[:version])
173
+ when 5, 6
174
+ sniff_5x_and_above(nodes)
175
+ when 2, 1
176
+ sniff_2x_1x(nodes)
177
+ else
178
+ @logger.warn("Could not determine version for nodes in ES cluster!")
179
+ return nil
180
+ end
181
+ end
182
+ end
183
+
184
+ def major_version(version_string)
185
+ version_string.split('.').first.to_i
186
+ end
187
+
188
+ def sniff_5x_and_above(nodes)
189
+ nodes.map do |id,info|
190
+ # Skip master-only nodes
191
+ next if info["roles"] && info["roles"] == ["master"]
192
+
193
+ if info["http"]
194
+ uri = LogStash::Util::SafeURI.new(info["http"]["publish_address"])
195
+ end
196
+ end.compact
197
+ end
198
+
199
+ def sniff_2x_1x(nodes)
200
+ nodes.map do |id,info|
201
+ # TODO Make sure this works with shield. Does that listed
202
+ # stuff as 'https_address?'
203
+
204
+ addr_str = info['http_address'].to_s
205
+ next unless addr_str # Skip hosts with HTTP disabled
206
+
207
+ # Only connect to nodes that serve data
208
+ # this will skip connecting to client, tribe, and master only nodes
209
+ # Note that if 'attributes' is NOT set, then that's just a regular node
210
+ # with master + data + client enabled, so we allow that
211
+ attributes = info['attributes']
212
+ next if attributes && attributes['data'] == 'false'
213
+
214
+ matches = addr_str.match(ES1_SNIFF_RE_URL) || addr_str.match(ES2_SNIFF_RE_URL)
215
+ if matches
216
+ host = matches[1].empty? ? matches[2] : matches[1]
217
+ port = matches[3]
218
+ ::LogStash::Util::SafeURI.new("#{host}:#{port}")
219
+ end
220
+ end.compact
221
+ end
222
+
223
+ def stop_sniffer
224
+ @sniffer.join if @sniffer
225
+ end
226
+
227
+ def sniffer_alive?
228
+ @sniffer ? @sniffer.alive? : nil
229
+ end
230
+
231
+ def start_resurrectionist
232
+ @resurrectionist = Thread.new do
233
+ until_stopped("resurrection", @resurrect_delay) do
234
+ healthcheck!
235
+ end
236
+ end
237
+ end
238
+
239
+ def healthcheck!
240
+ # Try to keep locking granularity low such that we don't affect IO...
241
+ @state_mutex.synchronize { @url_info.select {|url,meta| meta[:state] != :alive } }.each do |url,meta|
242
+ begin
243
+ logger.info("Running health check to see if an Elasticsearch connection is working",
244
+ :healthcheck_url => url, :path => @healthcheck_path)
245
+ response = perform_request_to_url(url, :head, @healthcheck_path)
246
+ # If no exception was raised it must have succeeded!
247
+ logger.warn("Restored connection to ES instance", :url => url.sanitized.to_s)
248
+ # We reconnected to this node, check its ES version
249
+ es_version = get_es_version(url)
250
+ @state_mutex.synchronize do
251
+ meta[:version] = es_version
252
+ major = major_version(es_version)
253
+ if !@maximum_seen_major_version
254
+ @logger.info("ES Output version determined", :es_version => major)
255
+ set_new_major_version(major)
256
+ elsif major > @maximum_seen_major_version
257
+ @logger.warn("Detected a node with a higher major version than previously observed. This could be the result of an amazon_es cluster upgrade.", :previous_major => @maximum_seen_major_version, :new_major => major, :node_url => url)
258
+ set_new_major_version(major)
259
+ end
260
+ meta[:state] = :alive
261
+ end
262
+ rescue HostUnreachableError, BadResponseCodeError => e
263
+ logger.warn("Attempted to resurrect connection to dead ES instance, but got an error.", url: url.sanitized.to_s, error_type: e.class, error: e.message)
264
+ end
265
+ end
266
+ end
267
+
268
+ def stop_resurrectionist
269
+ @resurrectionist.join if @resurrectionist
270
+ end
271
+
272
+ def resurrectionist_alive?
273
+ @resurrectionist ? @resurrectionist.alive? : nil
274
+ end
275
+
276
+ def perform_request(method, path, params={}, body=nil)
277
+ with_connection do |url, url_meta|
278
+ resp = perform_request_to_url(url, method, path, params, body)
279
+ [url, url_meta, resp]
280
+ end
281
+ end
282
+
283
+ [:get, :put, :post, :delete, :patch, :head].each do |method|
284
+ define_method(method) do |path, params={}, body=nil|
285
+ _, _, response = perform_request(method, path, params, body)
286
+ response
287
+ end
288
+ end
289
+
290
+ def perform_request_to_url(url, method, path, params={}, body=nil)
291
+ res = @adapter.perform_request(url, method, path, params, body)
292
+ rescue *@adapter.host_unreachable_exceptions => e
293
+ raise HostUnreachableError.new(e, url), "Could not reach host #{e.class}: #{e.message}"
294
+ end
295
+
296
+ def normalize_url(uri)
297
+ u = @url_normalizer.call(uri)
298
+ if !u.is_a?(::LogStash::Util::SafeURI)
299
+ raise "URL Normalizer returned a '#{u.class}' rather than a SafeURI! This shouldn't happen!"
300
+ end
301
+ u
302
+ end
303
+
304
+ def update_urls(new_urls)
305
+ return if new_urls.nil?
306
+
307
+ # Normalize URLs
308
+ new_urls = new_urls.map(&method(:normalize_url))
309
+
310
+ # Used for logging nicely
311
+ state_changes = {:removed => [], :added => []}
312
+ @state_mutex.synchronize do
313
+ # Add new connections
314
+ new_urls.each do |url|
315
+ # URI objects don't have real hash equality! So, since this isn't perf sensitive we do a linear scan
316
+ unless @url_info.keys.include?(url)
317
+ state_changes[:added] << url
318
+ add_url(url)
319
+ end
320
+ end
321
+
322
+ # Delete connections not in the new list
323
+ @url_info.each do |url,_|
324
+ unless new_urls.include?(url)
325
+ state_changes[:removed] << url
326
+ remove_url(url)
327
+ end
328
+ end
329
+ end
330
+
331
+ if state_changes[:removed].size > 0 || state_changes[:added].size > 0
332
+ if logger.info?
333
+ logger.info("Elasticsearch pool URLs updated", :changes => state_changes)
334
+ end
335
+ end
336
+
337
+ # Run an inline healthcheck anytime URLs are updated
338
+ # This guarantees that during startup / post-startup
339
+ # sniffing we don't have idle periods waiting for the
340
+ # periodic sniffer to allow new hosts to come online
341
+ healthcheck!
342
+ end
343
+
344
+ def size
345
+ @state_mutex.synchronize { @url_info.size }
346
+ end
347
+
348
+ def es_versions
349
+ @state_mutex.synchronize { @url_info.size }
350
+ end
351
+
352
+ def add_url(url)
353
+ @url_info[url] ||= empty_url_meta
354
+ end
355
+
356
+ def remove_url(url)
357
+ @url_info.delete(url)
358
+ end
359
+
360
+ def empty_url_meta
361
+ {
362
+ :in_use => 0,
363
+ :state => :unknown
364
+ }
365
+ end
366
+
367
+ def with_connection
368
+ url, url_meta = get_connection
369
+
370
+ # Custom error class used here so that users may retry attempts if they receive this error
371
+ # should they choose to
372
+ raise NoConnectionAvailableError, "No Available connections" unless url
373
+ yield url, url_meta
374
+ rescue HostUnreachableError => e
375
+ # Mark the connection as dead here since this is likely not transient
376
+ mark_dead(url, e)
377
+ raise e
378
+ rescue BadResponseCodeError => e
379
+ # These aren't discarded from the pool because these are often very transient
380
+ # errors
381
+ raise e
382
+ rescue => e
383
+ logger.warn("UNEXPECTED POOL ERROR", :e => e)
384
+ raise e
385
+ ensure
386
+ return_connection(url)
387
+ end
388
+
389
+ def mark_dead(url, error)
390
+ @state_mutex.synchronize do
391
+ meta = @url_info[url]
392
+ # In case a sniff happened removing the metadata just before there's nothing to mark
393
+ # This is an extreme edge case, but it can happen!
394
+ return unless meta
395
+ logger.warn("Marking url as dead. Last error: [#{error.class}] #{error.message}",
396
+ :url => url, :error_message => error.message, :error_class => error.class.name)
397
+ meta[:state] = :dead
398
+ meta[:last_error] = error
399
+ meta[:last_errored_at] = Time.now
400
+ end
401
+ end
402
+
403
+ def url_meta(url)
404
+ @state_mutex.synchronize do
405
+ @url_info[url]
406
+ end
407
+ end
408
+
409
+ def get_connection
410
+ @state_mutex.synchronize do
411
+ # The goal here is to pick a random connection from the least-in-use connections
412
+ # We want some randomness so that we don't hit the same node over and over, but
413
+ # we also want more 'fair' behavior in the event of high concurrency
414
+ eligible_set = nil
415
+ lowest_value_seen = nil
416
+ @url_info.each do |url,meta|
417
+ meta_in_use = meta[:in_use]
418
+ next if meta[:state] == :dead
419
+
420
+ if lowest_value_seen.nil? || meta_in_use < lowest_value_seen
421
+ lowest_value_seen = meta_in_use
422
+ eligible_set = [[url, meta]]
423
+ elsif lowest_value_seen == meta_in_use
424
+ eligible_set << [url, meta]
425
+ end
426
+ end
427
+
428
+ return nil if eligible_set.nil?
429
+
430
+ pick, pick_meta = eligible_set.sample
431
+ pick_meta[:in_use] += 1
432
+
433
+ [pick, pick_meta]
434
+ end
435
+ end
436
+
437
+ def return_connection(url)
438
+ @state_mutex.synchronize do
439
+ if @url_info[url] # Guard against the condition where the connection has already been deleted
440
+ @url_info[url][:in_use] -= 1
441
+ end
442
+ end
443
+ end
444
+
445
+ def get_es_version(url)
446
+ request = perform_request_to_url(url, :get, ROOT_URI_PATH)
447
+ LogStash::Json.load(request.body)["version"]["number"]
448
+ end
449
+
450
+ def set_new_major_version(version)
451
+ @maximum_seen_major_version = version
452
+ if @maximum_seen_major_version >= 6
453
+ @logger.warn("Detected a 6.x and above cluster: the `type` event field won't be used to determine the document _type", :es_version => @maximum_seen_major_version)
454
+ end
455
+ end
456
+ end
457
+ end; end; end; end;