fluent-plugin-test 0.0.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1162 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ #
3
+ # The fluent-plugin-opensearch Contributors require contributions made to
4
+ # this file be licensed under the Apache-2.0 license or a
5
+ # compatible open source license.
6
+ #
7
+ # Modifications Copyright fluent-plugin-opensearch Contributors. See
8
+ # GitHub history for details.
9
+ #
10
+ # Licensed to Uken Inc. under one or more contributor
11
+ # license agreements. See the NOTICE file distributed with
12
+ # this work for additional information regarding copyright
13
+ # ownership. Uken Inc. licenses this file to you under
14
+ # the Apache License, Version 2.0 (the "License"); you may
15
+ # not use this file except in compliance with the License.
16
+ # You may obtain a copy of the License at
17
+ #
18
+ # http://www.apache.org/licenses/LICENSE-2.0
19
+ #
20
+ # Unless required by applicable law or agreed to in writing,
21
+ # software distributed under the License is distributed on an
22
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
23
+ # KIND, either express or implied. See the License for the
24
+ # specific language governing permissions and limitations
25
+ # under the License.
26
+
27
+ require 'date'
28
+ require 'excon'
29
+ require 'opensearch'
30
+ require 'set'
31
+ require 'json'
32
+ require 'uri'
33
+ require 'base64'
34
+ begin
35
+ require 'strptime'
36
+ rescue LoadError
37
+ end
38
+ require 'resolv'
39
+
40
+ require 'fluent/plugin/output'
41
+ require 'fluent/event'
42
+ require 'fluent/error'
43
+ require 'fluent/time'
44
+ require 'fluent/unique_id'
45
+ require 'fluent/log-ext'
46
+ require 'zlib'
47
+ require_relative 'opensearch_constants'
48
+ require_relative 'opensearch_error'
49
+ require_relative 'opensearch_error_handler'
50
+ require_relative 'opensearch_index_template'
51
+ require_relative 'opensearch_tls'
52
+ require_relative 'opensearch_fallback_selector'
53
+ begin
54
+ require_relative 'oj_serializer'
55
+ rescue LoadError
56
+ end
57
+ require 'aws-sdk-core'
58
+ require 'faraday_middleware/aws_sigv4'
59
+ require 'faraday/excon'
60
+
61
+ module Fluent::Plugin
62
+ class OpenSearchOutput < Output
63
+ class RecoverableRequestFailure < StandardError; end
64
+ class UnrecoverableRequestFailure < Fluent::UnrecoverableError; end
65
+ class RetryStreamEmitFailure < StandardError; end
66
+
67
+ # MissingIdFieldError is raised for records that do not
68
+ # include the field for the unique record identifier
69
+ class MissingIdFieldError < StandardError; end
70
+
71
+ # RetryStreamError privides a stream to be
72
+ # put back in the pipeline for cases where a bulk request
73
+ # failed (e.g some records succeed while others failed)
74
+ class RetryStreamError < StandardError
75
+ attr_reader :retry_stream
76
+ def initialize(retry_stream)
77
+ @retry_stream = retry_stream
78
+ end
79
+ end
80
+
81
+ RequestInfo = Struct.new(:host, :index, :target_index, :alias)
82
+
83
+ attr_reader :template_names
84
+ attr_reader :ssl_version_options
85
+ attr_reader :compressable_connection
86
+
87
+ helpers :event_emitter, :compat_parameters, :record_accessor, :timer
88
+
89
+ Fluent::Plugin.register_output('opensearch', self)
90
+
91
+ DEFAULT_BUFFER_TYPE = "memory"
92
+ DEFAULT_OPENSEARCH_VERSION = 1
93
+ DEFAULT_TYPE_NAME = "_doc".freeze
94
+ DEFAULT_RELOAD_AFTER = -1
95
+ DEFAULT_TARGET_BULK_BYTES = -1
96
+ DEFAULT_POLICY_ID = "logstash-policy"
97
+
98
+ config_param :host, :string, :default => 'localhost'
99
+ config_param :port, :integer, :default => 9200
100
+ config_param :user, :string, :default => nil
101
+ config_param :password, :string, :default => nil, :secret => true
102
+ config_param :path, :string, :default => nil
103
+ config_param :scheme, :enum, :list => [:https, :http], :default => :http
104
+ config_param :hosts, :string, :default => nil
105
+ config_param :target_index_key, :string, :default => nil
106
+ config_param :time_key_format, :string, :default => nil
107
+ config_param :time_precision, :integer, :default => 9
108
+ config_param :include_timestamp, :bool, :default => false
109
+ config_param :logstash_format, :bool, :default => false
110
+ config_param :logstash_prefix, :string, :default => "logstash"
111
+ config_param :logstash_prefix_separator, :string, :default => '-'
112
+ config_param :logstash_dateformat, :string, :default => "%Y.%m.%d"
113
+ config_param :utc_index, :bool, :default => true
114
+ config_param :suppress_type_name, :bool, :default => false
115
+ config_param :index_name, :string, :default => "fluentd"
116
+ config_param :id_key, :string, :default => nil
117
+ config_param :write_operation, :string, :default => "index"
118
+ config_param :parent_key, :string, :default => nil
119
+ config_param :routing_key, :string, :default => nil
120
+ config_param :request_timeout, :time, :default => 5
121
+ config_param :reload_connections, :bool, :default => true
122
+ config_param :reload_on_failure, :bool, :default => false
123
+ config_param :retry_tag, :string, :default=>nil
124
+ config_param :resurrect_after, :time, :default => 60
125
+ config_param :time_key, :string, :default => nil
126
+ config_param :time_key_exclude_timestamp, :bool, :default => false
127
+ config_param :ssl_verify , :bool, :default => true
128
+ config_param :client_key, :string, :default => nil
129
+ config_param :client_cert, :string, :default => nil
130
+ config_param :client_key_pass, :string, :default => nil, :secret => true
131
+ config_param :ca_file, :string, :default => nil
132
+ config_param :remove_keys, :string, :default => nil
133
+ config_param :remove_keys_on_update, :string, :default => ""
134
+ config_param :remove_keys_on_update_key, :string, :default => nil
135
+ config_param :flatten_hashes, :bool, :default => false
136
+ config_param :flatten_hashes_separator, :string, :default => "_"
137
+ config_param :template_name, :string, :default => nil
138
+ config_param :template_file, :string, :default => nil
139
+ config_param :template_overwrite, :bool, :default => false
140
+ config_param :customize_template, :hash, :default => nil
141
+ config_param :index_date_pattern, :string, :default => "now/d"
142
+ config_param :index_separator, :string, :default => "-"
143
+ config_param :application_name, :string, :default => "default"
144
+ config_param :templates, :hash, :default => nil
145
+ config_param :max_retry_putting_template, :integer, :default => 10
146
+ config_param :fail_on_putting_template_retry_exceed, :bool, :default => true
147
+ config_param :fail_on_detecting_os_version_retry_exceed, :bool, :default => true
148
+ config_param :max_retry_get_os_version, :integer, :default => 15
149
+ config_param :include_tag_key, :bool, :default => false
150
+ config_param :tag_key, :string, :default => 'tag'
151
+ config_param :time_parse_error_tag, :string, :default => 'opensearch_plugin.output.time.error'
152
+ config_param :reconnect_on_error, :bool, :default => false
153
+ config_param :pipeline, :string, :default => nil
154
+ config_param :with_transporter_log, :bool, :default => false
155
+ config_param :emit_error_for_missing_id, :bool, :default => false
156
+ config_param :sniffer_class_name, :string, :default => nil
157
+ config_param :selector_class_name, :string, :default => nil
158
+ config_param :reload_after, :integer, :default => DEFAULT_RELOAD_AFTER
159
+ config_param :include_index_in_url, :bool, :default => false
160
+ config_param :http_backend, :enum, list: [:excon, :typhoeus], :default => :excon
161
+ config_param :http_backend_excon_nonblock, :bool, :default => true
162
+ config_param :validate_client_version, :bool, :default => false
163
+ config_param :prefer_oj_serializer, :bool, :default => false
164
+ config_param :unrecoverable_error_types, :array, :default => ["out_of_memory_error", "rejected_execution_exception"]
165
+ config_param :unrecoverable_record_types, :array, :default => ["json_parse_exception"]
166
+ config_param :emit_error_label_event, :bool, :default => true
167
+ config_param :verify_os_version_at_startup, :bool, :default => true
168
+ config_param :default_opensearch_version, :integer, :default => DEFAULT_OPENSEARCH_VERSION
169
+ config_param :log_os_400_reason, :bool, :default => false
170
+ config_param :custom_headers, :hash, :default => {}
171
+ config_param :suppress_doc_wrap, :bool, :default => false
172
+ config_param :ignore_exceptions, :array, :default => [], value_type: :string, :desc => "Ignorable exception list"
173
+ config_param :exception_backup, :bool, :default => true, :desc => "Chunk backup flag when ignore exception occured"
174
+ config_param :bulk_message_request_threshold, :size, :default => DEFAULT_TARGET_BULK_BYTES
175
+ config_param :compression_level, :enum, list: [:no_compression, :best_speed, :best_compression, :default_compression], :default => :no_compression
176
+ config_param :truncate_caches_interval, :time, :default => nil
177
+ config_param :use_legacy_template, :bool, :default => true
178
+ config_param :catch_transport_exception_on_retry, :bool, :default => true
179
+ config_param :target_index_affinity, :bool, :default => false
180
+
181
+ config_section :metadata, param_name: :metainfo, multi: false do
182
+ config_param :include_chunk_id, :bool, :default => false
183
+ config_param :chunk_id_key, :string, :default => "chunk_id".freeze
184
+ end
185
+
186
+ config_section :endpoint, multi: false do
187
+ config_param :region, :string
188
+ config_param :url do |c|
189
+ c.chomp("/")
190
+ end
191
+ config_param :access_key_id, :string, :default => ""
192
+ config_param :secret_access_key, :string, :default => "", secret: true
193
+ config_param :assume_role_arn, :string, :default => nil
194
+ config_param :ecs_container_credentials_relative_uri, :string, :default => nil #Set with AWS_CONTAINER_CREDENTIALS_RELATIVE_URI environment variable value
195
+ config_param :assume_role_session_name, :string, :default => "fluentd"
196
+ config_param :assume_role_web_identity_token_file, :string, :default => nil
197
+ config_param :sts_credentials_region, :string, :default => nil
198
+ config_param :refresh_credentials_interval, :time, :default => "5h"
199
+ config_param :aws_service_name, :enum, list: [:es, :aoss], :default => :es
200
+ end
201
+
202
+ config_section :buffer do
203
+ config_set_default :@type, DEFAULT_BUFFER_TYPE
204
+ config_set_default :chunk_keys, ['tag']
205
+ config_set_default :timekey_use_utc, true
206
+ end
207
+
208
+ include Fluent::OpenSearchIndexTemplate
209
+ include Fluent::Plugin::OpenSearchConstants
210
+ include Fluent::Plugin::OpenSearchTLS
211
+
212
+ def initialize
213
+ super
214
+ end
215
+
216
+ ######################################################################################################
217
+ # This creating AWS credentials code part is heavily based on fluent-plugin-aws-elasticsearch-service:
218
+ # https://github.com/atomita/fluent-plugin-aws-elasticsearch-service/blob/master/lib/fluent/plugin/out_aws-elasticsearch-service.rb#L73-L134
219
+ ######################################################################################################
220
+ def aws_credentials(conf)
221
+ credentials = nil
222
+ unless conf[:access_key_id].empty? || conf[:secret_access_key].empty?
223
+ credentials = Aws::Credentials.new(conf[:access_key_id], conf[:secret_access_key])
224
+ else
225
+ if conf[:assume_role_arn].nil?
226
+ aws_container_credentials_relative_uri = conf[:ecs_container_credentials_relative_uri] || ENV["AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"]
227
+ if aws_container_credentials_relative_uri.nil?
228
+ credentials = Aws::SharedCredentials.new({retries: 2}).credentials rescue nil
229
+ credentials ||= Aws::InstanceProfileCredentials.new.credentials rescue nil
230
+ credentials ||= Aws::ECSCredentials.new.credentials
231
+ else
232
+ credentials = Aws::ECSCredentials.new({
233
+ credential_path: aws_container_credentials_relative_uri
234
+ }).credentials
235
+ end
236
+ else
237
+ if conf[:assume_role_web_identity_token_file].nil?
238
+ credentials = Aws::AssumeRoleCredentials.new({
239
+ role_arn: conf[:assume_role_arn],
240
+ role_session_name: conf[:assume_role_session_name],
241
+ region: sts_creds_region(conf)
242
+ }).credentials
243
+ else
244
+ credentials = Aws::AssumeRoleWebIdentityCredentials.new({
245
+ role_arn: conf[:assume_role_arn],
246
+ web_identity_token_file: conf[:assume_role_web_identity_token_file],
247
+ region: sts_creds_region(conf)
248
+ }).credentials
249
+ end
250
+ end
251
+ end
252
+ raise "No valid AWS credentials found." unless credentials.set?
253
+
254
+ credentials
255
+ end
256
+
257
+ def sts_creds_region(conf)
258
+ conf[:sts_credentials_region] || conf[:region]
259
+ end
260
+ ###############################
261
+ # AWS credential part is ended.
262
+ ###############################
263
+
264
+ def configure(conf)
265
+ compat_parameters_convert(conf, :buffer)
266
+
267
+ super
268
+
269
+ if @endpoint
270
+ # here overrides default value of reload_connections to false because
271
+ # AWS Elasticsearch Service doesn't return addresses of nodes and Elasticsearch client
272
+ # fails to reload connections properly. This ends up "temporarily failed to flush the buffer"
273
+ # error repeating forever. See this discussion for details:
274
+ # https://discuss.elastic.co/t/elasitcsearch-ruby-raises-cannot-get-new-connection-from-pool-error/36252
275
+ @reload_connections = false
276
+ end
277
+
278
+ if placeholder_substitution_needed_for_template?
279
+ # nop.
280
+ elsif not @buffer_config.chunk_keys.include? "tag" and
281
+ not @buffer_config.chunk_keys.include? "_index"
282
+ raise Fluent::ConfigError, "'tag' or '_index' in chunk_keys is required."
283
+ end
284
+ @time_parser = create_time_parser
285
+ @backend_options = backend_options
286
+ @ssl_version_options = set_tls_minmax_version_config(@ssl_version, @ssl_max_version, @ssl_min_version)
287
+
288
+ if @remove_keys
289
+ @remove_keys = @remove_keys.split(/\s*,\s*/)
290
+ end
291
+
292
+ if @target_index_key && @target_index_key.is_a?(String)
293
+ @target_index_key = @target_index_key.split '.'
294
+ end
295
+
296
+ if @remove_keys_on_update && @remove_keys_on_update.is_a?(String)
297
+ @remove_keys_on_update = @remove_keys_on_update.split ','
298
+ end
299
+
300
+ raise Fluent::ConfigError, "'max_retry_putting_template' must be greater than or equal to zero." if @max_retry_putting_template < 0
301
+ raise Fluent::ConfigError, "'max_retry_get_os_version' must be greater than or equal to zero." if @max_retry_get_os_version < 0
302
+
303
+ # Dump log when using host placeholders and template features at same time.
304
+ valid_host_placeholder = placeholder?(:host_placeholder, @host)
305
+ if valid_host_placeholder && (@template_name && @template_file || @templates)
306
+ if @verify_os_version_at_startup
307
+ raise Fluent::ConfigError, "host placeholder, template installation, and verify OpenSearch version at startup are exclusive feature at same time. Please specify verify_os_version_at_startup as `false` when host placeholder and template installation are enabled."
308
+ end
309
+ log.info "host placeholder and template installation makes your OpenSearch cluster a bit slow down(beta)."
310
+ end
311
+
312
+ @template_names = []
313
+ if !dry_run?
314
+ if @template_name && @template_file
315
+ if @logstash_format || placeholder_substitution_needed_for_template?
316
+ class << self
317
+ alias_method :template_installation, :template_installation_actual
318
+ end
319
+ else
320
+ template_installation_actual(@template_name, @customize_template, @application_name, @index_name)
321
+ end
322
+ end
323
+ if @templates
324
+ retry_operate(@max_retry_putting_template,
325
+ @fail_on_putting_template_retry_exceed,
326
+ @catch_transport_exception_on_retry) do
327
+ templates_hash_install(@templates, @template_overwrite)
328
+ end
329
+ end
330
+ end
331
+
332
+ @truncate_mutex = Mutex.new
333
+ if @truncate_caches_interval
334
+ timer_execute(:out_opensearch_truncate_caches, @truncate_caches_interval) do
335
+ log.info('Clean up the indices and template names cache')
336
+
337
+ @truncate_mutex.synchronize {
338
+ @template_names.clear
339
+ }
340
+ end
341
+ end
342
+ # If AWS credentials is set, consider to expire credentials information forcibly before expired.
343
+ @credential_mutex = Mutex.new
344
+ if @endpoint
345
+ @_aws_credentials = aws_credentials(@endpoint)
346
+
347
+ if @endpoint.refresh_credentials_interval
348
+ timer_execute(:out_opensearch_expire_credentials, @endpoint.refresh_credentials_interval) do
349
+ log.debug('Recreate the AWS credentials')
350
+
351
+ @credential_mutex.synchronize do
352
+ @_os = nil
353
+ begin
354
+ @_aws_credentials = aws_credentials(@endpoint)
355
+ rescue => e
356
+ log.error("Failed to get new AWS credentials: #{e}")
357
+ end
358
+ end
359
+ end
360
+ end
361
+ end
362
+
363
+ @serializer_class = nil
364
+ begin
365
+ require 'oj'
366
+ @dump_proc = Oj.method(:dump)
367
+ if @prefer_oj_serializer
368
+ @serializer_class = Fluent::Plugin::Serializer::Oj
369
+ OpenSearch::API.settings[:serializer] = Fluent::Plugin::Serializer::Oj
370
+ end
371
+ rescue LoadError
372
+ @dump_proc = Yajl.method(:dump)
373
+ end
374
+
375
+ raise Fluent::ConfigError, "`password` must be present if `user` is present" if @user && @password.nil?
376
+
377
+ if @user && m = @user.match(/%{(?<user>.*)}/)
378
+ @user = URI.encode_www_form_component(m["user"])
379
+ end
380
+ if @password && m = @password.match(/%{(?<password>.*)}/)
381
+ @password = URI.encode_www_form_component(m["password"])
382
+ end
383
+
384
+ @transport_logger = nil
385
+ if @with_transporter_log
386
+ @transport_logger = log
387
+ log_level = conf['@log_level'] || conf['log_level']
388
+ log.warn "Consider to specify log_level with @log_level." unless log_level
389
+ end
390
+ # Specify @sniffer_class before calling #client.
391
+ # #detect_os_major_version uses #client.
392
+ @sniffer_class = nil
393
+ begin
394
+ @sniffer_class = Object.const_get(@sniffer_class_name) if @sniffer_class_name
395
+ rescue Exception => ex
396
+ raise Fluent::ConfigError, "Could not load sniffer class #{@sniffer_class_name}: #{ex}"
397
+ end
398
+
399
+ @selector_class = nil
400
+ begin
401
+ @selector_class = Object.const_get(@selector_class_name) if @selector_class_name
402
+ rescue Exception => ex
403
+ raise Fluent::ConfigError, "Could not load selector class #{@selector_class_name}: #{ex}"
404
+ end
405
+
406
+ @last_seen_major_version = if major_version = handle_last_seen_os_major_version
407
+ major_version
408
+ else
409
+ @default_opensearch_version
410
+ end
411
+
412
+ if @validate_client_version && !dry_run?
413
+ if @last_seen_major_version != client_library_version.to_i
414
+ raise Fluent::ConfigError, <<-EOC
415
+ Detected OpenSearch #{@last_seen_major_version} but you use OpenSearch client #{client_library_version}.
416
+ Please consider to use #{@last_seen_major_version}.x series OpenSearch client.
417
+ EOC
418
+ end
419
+ end
420
+
421
+ if @last_seen_major_version >= 1
422
+ case @ssl_version
423
+ when :SSLv23, :TLSv1, :TLSv1_1
424
+ if @scheme == :https
425
+ log.warn "Detected OpenSearch 1.x or above and enabled insecure security:
426
+ You might have to specify `ssl_version TLSv1_2` in configuration."
427
+ end
428
+ end
429
+ end
430
+
431
+ if @ssl_version && @scheme == :https
432
+ if !@http_backend_excon_nonblock
433
+ log.warn "TLS handshake will be stucked with block connection.
434
+ Consider to set `http_backend_excon_nonblock` as true"
435
+ end
436
+ end
437
+
438
+ # Consider missing the prefix of "$." in nested key specifiers.
439
+ @id_key = convert_compat_id_key(@id_key) if @id_key
440
+ @parent_key = convert_compat_id_key(@parent_key) if @parent_key
441
+ @routing_key = convert_compat_id_key(@routing_key) if @routing_key
442
+
443
+ @routing_key_name = configure_routing_key_name
444
+ @meta_config_map = create_meta_config_map
445
+ @current_config = nil
446
+ @compressable_connection = false
447
+
448
+ @ignore_exception_classes = @ignore_exceptions.map do |exception|
449
+ unless Object.const_defined?(exception)
450
+ log.warn "Cannot find class #{exception}. Will ignore it."
451
+
452
+ nil
453
+ else
454
+ Object.const_get(exception)
455
+ end
456
+ end.compact
457
+
458
+ if @bulk_message_request_threshold < 0
459
+ class << self
460
+ alias_method :split_request?, :split_request_size_uncheck?
461
+ end
462
+ else
463
+ class << self
464
+ alias_method :split_request?, :split_request_size_check?
465
+ end
466
+ end
467
+ end
468
+
469
+ def dry_run?
470
+ if Fluent::Engine.respond_to?(:dry_run_mode)
471
+ Fluent::Engine.dry_run_mode
472
+ elsif Fluent::Engine.respond_to?(:supervisor_mode)
473
+ Fluent::Engine.supervisor_mode
474
+ end
475
+ end
476
+
477
+ def placeholder?(name, param)
478
+ placeholder_validities = []
479
+ placeholder_validators(name, param).each do |v|
480
+ begin
481
+ v.validate!
482
+ placeholder_validities << true
483
+ rescue Fluent::ConfigError => e
484
+ log.debug("'#{name} #{param}' is tested built-in placeholder(s) but there is no valid placeholder(s). error: #{e}")
485
+ placeholder_validities << false
486
+ end
487
+ end
488
+ placeholder_validities.include?(true)
489
+ end
490
+
491
+ def emit_error_label_event?
492
+ !!@emit_error_label_event
493
+ end
494
+
495
+ def compression
496
+ !(@compression_level == :no_compression)
497
+ end
498
+
499
+ def compression_strategy
500
+ case @compression_level
501
+ when :default_compression
502
+ Zlib::DEFAULT_COMPRESSION
503
+ when :best_compression
504
+ Zlib::BEST_COMPRESSION
505
+ when :best_speed
506
+ Zlib::BEST_SPEED
507
+ else
508
+ Zlib::NO_COMPRESSION
509
+ end
510
+ end
511
+
512
+ def backend_options
513
+ case @http_backend
514
+ when :excon
515
+ { client_key: @client_key, client_cert: @client_cert, client_key_pass: @client_key_pass, nonblock: @http_backend_excon_nonblock }
516
+ when :typhoeus
517
+ require 'faraday/typhoeus'
518
+ { sslkey: @client_key, sslcert: @client_cert, keypasswd: @client_key_pass }
519
+ end
520
+ rescue LoadError => ex
521
+ log.error_backtrace(ex.backtrace)
522
+ raise Fluent::ConfigError, "You must install #{@http_backend} gem. Exception: #{ex}"
523
+ end
524
+
525
+ def handle_last_seen_os_major_version
526
+ if @verify_os_version_at_startup && !dry_run?
527
+ retry_operate(@max_retry_get_os_version,
528
+ @fail_on_detecting_os_version_retry_exceed,
529
+ @catch_transport_exception_on_retry) do
530
+ detect_os_major_version
531
+ end
532
+ else
533
+ nil
534
+ end
535
+ end
536
+
537
+ def detect_os_major_version
538
+ @_os_info ||= client.info
539
+ begin
540
+ unless version = @_os_info.dig("version", "number")
541
+ version = @default_opensearch_version
542
+ end
543
+ rescue NoMethodError => e
544
+ log.warn "#{@_os_info} can not dig version information. Assuming OpenSearch #{@default_opensearch_version}", error: e
545
+ version = @default_opensearch_version
546
+ end
547
+ version.to_i
548
+ end
549
+
550
+ def client_library_version
551
+ OpenSearch::VERSION
552
+ end
553
+
554
+ def configure_routing_key_name
555
+ 'routing'.freeze
556
+ end
557
+
558
+ def convert_compat_id_key(key)
559
+ if key.include?('.') && !key.start_with?('$[')
560
+ key = "$.#{key}" unless key.start_with?('$.')
561
+ end
562
+ key
563
+ end
564
+
565
+ def create_meta_config_map
566
+ result = []
567
+ result << [record_accessor_create(@id_key), '_id'] if @id_key
568
+ result << [record_accessor_create(@parent_key), '_parent'] if @parent_key
569
+ result << [record_accessor_create(@routing_key), @routing_key_name] if @routing_key
570
+ result
571
+ end
572
+
573
+ # once fluent v0.14 is released we might be able to use
574
+ # Fluent::Parser::TimeParser, but it doesn't quite do what we want - if gives
575
+ # [sec,nsec] where as we want something we can call `strftime` on...
576
+ def create_time_parser
577
+ if @time_key_format
578
+ begin
579
+ # Strptime doesn't support all formats, but for those it does it's
580
+ # blazingly fast.
581
+ strptime = Strptime.new(@time_key_format)
582
+ Proc.new { |value|
583
+ value = convert_numeric_time_into_string(value, @time_key_format) if value.is_a?(Numeric)
584
+ strptime.exec(value).to_datetime
585
+ }
586
+ rescue
587
+ # Can happen if Strptime doesn't recognize the format; or
588
+ # if strptime couldn't be required (because it's not installed -- it's
589
+ # ruby 2 only)
590
+ Proc.new { |value|
591
+ value = convert_numeric_time_into_string(value, @time_key_format) if value.is_a?(Numeric)
592
+ DateTime.strptime(value, @time_key_format)
593
+ }
594
+ end
595
+ else
596
+ Proc.new { |value|
597
+ value = convert_numeric_time_into_string(value) if value.is_a?(Numeric)
598
+ DateTime.parse(value)
599
+ }
600
+ end
601
+ end
602
+
603
+ def convert_numeric_time_into_string(numeric_time, time_key_format = "%Y-%m-%d %H:%M:%S.%N%z")
604
+ numeric_time_parser = Fluent::NumericTimeParser.new(:float)
605
+ Time.at(numeric_time_parser.parse(numeric_time).to_r).strftime(time_key_format)
606
+ end
607
+
608
+ def parse_time(value, event_time, tag)
609
+ @time_parser.call(value)
610
+ rescue => e
611
+ if emit_error_label_event?
612
+ router.emit_error_event(@time_parse_error_tag, Fluent::Engine.now, {'tag' => tag, 'time' => event_time, 'format' => @time_key_format, 'value' => value}, e)
613
+ end
614
+ return Time.at(event_time).to_datetime
615
+ end
616
+
617
+ def client(host = nil, compress_connection = false)
618
+ # check here to see if we already have a client connection for the given host
619
+ connection_options = get_connection_options(host)
620
+
621
+ @_os = nil unless is_existing_connection(connection_options[:hosts])
622
+ @_os = nil unless @compressable_connection == compress_connection
623
+
624
+ @_os ||= begin
625
+ @compressable_connection = compress_connection
626
+ @current_config = connection_options[:hosts].clone
627
+ adapter_conf = if @endpoint
628
+ lambda do |f|
629
+ f.request(
630
+ :aws_sigv4,
631
+ service: @endpoint.aws_service_name.to_s,
632
+ region: @endpoint.region,
633
+ credentials: @_aws_credentials,
634
+ )
635
+
636
+ f.adapter @http_backend, @backend_options
637
+ end
638
+ else
639
+ lambda {|f| f.adapter @http_backend, @backend_options }
640
+ end
641
+
642
+ local_reload_connections = @reload_connections
643
+ if local_reload_connections && @reload_after > DEFAULT_RELOAD_AFTER
644
+ local_reload_connections = @reload_after
645
+ end
646
+
647
+ gzip_headers = if compress_connection
648
+ {'Content-Encoding' => 'gzip'}
649
+ else
650
+ {}
651
+ end
652
+ headers = {}.merge(@custom_headers)
653
+ .merge(gzip_headers)
654
+ ssl_options = { verify: @ssl_verify, ca_file: @ca_file}.merge(@ssl_version_options)
655
+
656
+ transport = OpenSearch::Transport::Transport::HTTP::Faraday.new(connection_options.merge(
657
+ options: {
658
+ reload_connections: local_reload_connections,
659
+ reload_on_failure: @reload_on_failure,
660
+ resurrect_after: @resurrect_after,
661
+ logger: @transport_logger,
662
+ transport_options: {
663
+ headers: headers,
664
+ request: { timeout: @request_timeout },
665
+ ssl: ssl_options,
666
+ },
667
+ http: {
668
+ user: @user,
669
+ password: @password,
670
+ scheme: @scheme
671
+ },
672
+ sniffer_class: @sniffer_class,
673
+ serializer_class: @serializer_class,
674
+ selector_class: @selector_class,
675
+ compression: compress_connection,
676
+ }), &adapter_conf)
677
+ OpenSearch::Client.new transport: transport
678
+ end
679
+ end
680
+
681
+ def get_escaped_userinfo(host_str)
682
+ if m = host_str.match(/(?<scheme>.*)%{(?<user>.*)}:%{(?<password>.*)}(?<path>@.*)/)
683
+ m["scheme"] +
684
+ URI.encode_www_form_component(m["user"]) +
685
+ ':' +
686
+ URI.encode_www_form_component(m["password"]) +
687
+ m["path"]
688
+ else
689
+ host_str
690
+ end
691
+ end
692
+
693
+ def get_connection_options(con_host=nil)
694
+
695
+ hosts = if @endpoint # For AWS OpenSearch Service
696
+ uri = URI(@endpoint.url)
697
+ host = %w(user password path).inject(host: uri.host, port: uri.port, scheme: uri.scheme) do |hash, key|
698
+ hash[key.to_sym] = uri.public_send(key) unless uri.public_send(key).nil? || uri.public_send(key) == ''
699
+ hash
700
+ end
701
+ [host]
702
+ elsif con_host || @hosts
703
+ (con_host || @hosts).split(',').map do |host_str|
704
+ # Support legacy hosts format host:port,host:port,host:port...
705
+ if host_str.match(%r{^[^:]+(\:\d+)?$})
706
+ {
707
+ host: host_str.split(':')[0],
708
+ port: (host_str.split(':')[1] || @port).to_i,
709
+ scheme: @scheme.to_s
710
+ }
711
+ else
712
+ # New hosts format expects URLs such as http://logs.foo.com,https://john:pass@logs2.foo.com/elastic
713
+ uri = URI(get_escaped_userinfo(host_str))
714
+ %w(user password path).inject(host: uri.host, port: uri.port, scheme: uri.scheme) do |hash, key|
715
+ hash[key.to_sym] = uri.public_send(key) unless uri.public_send(key).nil? || uri.public_send(key) == ''
716
+ hash
717
+ end
718
+ end
719
+ end.compact
720
+ else
721
+ if Resolv::IPv6::Regex.match(@host)
722
+ [{host: "[#{@host}]", scheme: @scheme.to_s, port: @port}]
723
+ else
724
+ [{host: @host, port: @port, scheme: @scheme.to_s}]
725
+ end
726
+ end.each do |host|
727
+ host.merge!(user: @user, password: @password) if !host[:user] && @user
728
+ host.merge!(path: @path) if !host[:path] && @path
729
+ end
730
+
731
+ {
732
+ hosts: hosts
733
+ }
734
+ end
735
+
736
+ def connection_options_description(con_host=nil)
737
+ get_connection_options(con_host)[:hosts].map do |host_info|
738
+ attributes = host_info.dup
739
+ attributes[:password] = 'obfuscated' if attributes.has_key?(:password)
740
+ attributes.inspect
741
+ end.join(', ')
742
+ end
743
+
744
+ # append_record_to_messages adds a record to the bulk message
745
+ # payload to be submitted to OpenSearch. Records that do
746
+ # not include '_id' field are skipped when 'write_operation'
747
+ # is configured for 'create' or 'update'
748
+ #
749
+ # returns 'true' if record was appended to the bulk message
750
+ # and 'false' otherwise
751
+ def append_record_to_messages(op, meta, header, record, msgs)
752
+ case op
753
+ when UPDATE_OP, UPSERT_OP
754
+ if meta.has_key?(ID_FIELD)
755
+ header[UPDATE_OP] = meta
756
+ msgs << @dump_proc.call(header) << BODY_DELIMITER
757
+ msgs << @dump_proc.call(update_body(record, op)) << BODY_DELIMITER
758
+ return true
759
+ end
760
+ when CREATE_OP
761
+ if meta.has_key?(ID_FIELD)
762
+ header[CREATE_OP] = meta
763
+ msgs << @dump_proc.call(header) << BODY_DELIMITER
764
+ msgs << @dump_proc.call(record) << BODY_DELIMITER
765
+ return true
766
+ end
767
+ when INDEX_OP
768
+ header[INDEX_OP] = meta
769
+ msgs << @dump_proc.call(header) << BODY_DELIMITER
770
+ msgs << @dump_proc.call(record) << BODY_DELIMITER
771
+ return true
772
+ end
773
+ return false
774
+ end
775
+
776
+ def update_body(record, op)
777
+ update = remove_keys(record)
778
+ if @suppress_doc_wrap
779
+ return update
780
+ end
781
+ body = {"doc".freeze => update}
782
+ if op == UPSERT_OP
783
+ if update == record
784
+ body["doc_as_upsert".freeze] = true
785
+ else
786
+ body[UPSERT_OP] = record
787
+ end
788
+ end
789
+ body
790
+ end
791
+
792
+ def remove_keys(record)
793
+ keys = record[@remove_keys_on_update_key] || @remove_keys_on_update || []
794
+ record.delete(@remove_keys_on_update_key)
795
+ return record unless keys.any?
796
+ record = record.dup
797
+ keys.each { |key| record.delete(key) }
798
+ record
799
+ end
800
+
801
+ def flatten_record(record, prefix=[])
802
+ ret = {}
803
+ if record.is_a? Hash
804
+ record.each { |key, value|
805
+ ret.merge! flatten_record(value, prefix + [key.to_s])
806
+ }
807
+ elsif record.is_a? Array
808
+ # Don't mess with arrays, leave them unprocessed
809
+ ret.merge!({prefix.join(@flatten_hashes_separator) => record})
810
+ else
811
+ return {prefix.join(@flatten_hashes_separator) => record}
812
+ end
813
+ ret
814
+ end
815
+
816
+ def expand_placeholders(chunk)
817
+ logstash_prefix = extract_placeholders(@logstash_prefix, chunk)
818
+ logstash_dateformat = extract_placeholders(@logstash_dateformat, chunk)
819
+ index_name = extract_placeholders(@index_name, chunk)
820
+ if @template_name
821
+ template_name = extract_placeholders(@template_name, chunk)
822
+ else
823
+ template_name = nil
824
+ end
825
+ if @customize_template
826
+ customize_template = @customize_template.each_with_object({}) { |(key, value), hash| hash[key] = extract_placeholders(value, chunk) }
827
+ else
828
+ customize_template = nil
829
+ end
830
+ if @application_name
831
+ application_name = extract_placeholders(@application_name, chunk)
832
+ else
833
+ application_name = nil
834
+ end
835
+ if @pipeline
836
+ pipeline = extract_placeholders(@pipeline, chunk)
837
+ else
838
+ pipeline = nil
839
+ end
840
+ return logstash_prefix, logstash_dateformat, index_name, template_name, customize_template, application_name, pipeline
841
+ end
842
+
843
+ def multi_workers_ready?
844
+ true
845
+ end
846
+
847
+ def inject_chunk_id_to_record_if_needed(record, chunk_id)
848
+ if @metainfo&.include_chunk_id
849
+ record[@metainfo.chunk_id_key] = chunk_id
850
+ record
851
+ else
852
+ record
853
+ end
854
+ end
855
+
856
+ def write(chunk)
857
+ bulk_message_count = Hash.new { |h,k| h[k] = 0 }
858
+ bulk_message = Hash.new { |h,k| h[k] = '' }
859
+ header = {}
860
+ meta = {}
861
+
862
+ tag = chunk.metadata.tag
863
+ chunk_id = dump_unique_id_hex(chunk.unique_id)
864
+ extracted_values = expand_placeholders(chunk)
865
+ host = if @hosts
866
+ extract_placeholders(@hosts, chunk)
867
+ else
868
+ extract_placeholders(@host, chunk)
869
+ end
870
+
871
+ affinity_target_indices = get_affinity_target_indices(chunk)
872
+ chunk.msgpack_each do |time, record|
873
+ next unless record.is_a? Hash
874
+
875
+ record = inject_chunk_id_to_record_if_needed(record, chunk_id)
876
+
877
+ begin
878
+ meta, header, record = process_message(tag, meta, header, time, record, affinity_target_indices, extracted_values)
879
+ info = if @include_index_in_url
880
+ RequestInfo.new(host, meta.delete("_index".freeze), meta["_index".freeze], meta.delete("_alias".freeze))
881
+ else
882
+ RequestInfo.new(host, nil, meta["_index".freeze], meta.delete("_alias".freeze))
883
+ end
884
+
885
+ if split_request?(bulk_message, info)
886
+ bulk_message.each do |info, msgs|
887
+ send_bulk(msgs, tag, chunk, bulk_message_count[info], extracted_values, info) unless msgs.empty?
888
+ msgs.clear
889
+ # Clear bulk_message_count for this info.
890
+ bulk_message_count[info] = 0;
891
+ next
892
+ end
893
+ end
894
+
895
+ if append_record_to_messages(@write_operation, meta, header, record, bulk_message[info])
896
+ bulk_message_count[info] += 1;
897
+ else
898
+ if @emit_error_for_missing_id
899
+ raise MissingIdFieldError, "Missing '_id' field. Write operation is #{@write_operation}"
900
+ else
901
+ log.on_debug { log.debug("Dropping record because its missing an '_id' field and write_operation is #{@write_operation}: #{record}") }
902
+ end
903
+ end
904
+ rescue => e
905
+ if emit_error_label_event?
906
+ router.emit_error_event(tag, time, record, e)
907
+ end
908
+ end
909
+ end
910
+
911
+ bulk_message.each do |info, msgs|
912
+ send_bulk(msgs, tag, chunk, bulk_message_count[info], extracted_values, info) unless msgs.empty?
913
+ msgs.clear
914
+ end
915
+ end
916
+
917
+ def target_index_affinity_enabled?()
918
+ @target_index_affinity && @logstash_format && @id_key && (@write_operation == UPDATE_OP || @write_operation == UPSERT_OP)
919
+ end
920
+
921
+ def get_affinity_target_indices(chunk)
922
+ indices = Hash.new
923
+ if target_index_affinity_enabled?()
924
+ id_key_accessor = record_accessor_create(@id_key)
925
+ ids = Set.new
926
+ chunk.msgpack_each do |time, record|
927
+ next unless record.is_a? Hash
928
+ begin
929
+ ids << id_key_accessor.call(record)
930
+ end
931
+ end
932
+ log.debug("Find affinity target_indices by quering on OpenSearch (write_operation #{@write_operation}) for ids: #{ids.to_a}")
933
+ options = {
934
+ :index => "#{logstash_prefix}#{@logstash_prefix_separator}*",
935
+ }
936
+ query = {
937
+ 'query' => { 'ids' => { 'values' => ids.to_a } },
938
+ '_source' => false,
939
+ 'sort' => [
940
+ {"_index" => {"order" => "desc"}}
941
+ ]
942
+ }
943
+ result = client.search(options.merge(:body => Yajl.dump(query)))
944
+ # There should be just one hit per _id, but in case there still is multiple, just the oldest index is stored to map
945
+ result['hits']['hits'].each do |hit|
946
+ indices[hit["_id"]] = hit["_index"]
947
+ log.debug("target_index for id: #{hit["_id"]} from es: #{hit["_index"]}")
948
+ end
949
+ end
950
+ indices
951
+ end
952
+
953
+ def split_request?(bulk_message, info)
954
+ # For safety.
955
+ end
956
+
957
+ def split_request_size_check?(bulk_message, info)
958
+ bulk_message[info].size > @bulk_message_request_threshold
959
+ end
960
+
961
+ def split_request_size_uncheck?(bulk_message, info)
962
+ false
963
+ end
964
+
965
+ def process_message(tag, meta, header, time, record, affinity_target_indices, extracted_values)
966
+ logstash_prefix, logstash_dateformat, index_name, _template_name, _customize_template, application_name, pipeline = extracted_values
967
+
968
+ if @flatten_hashes
969
+ record = flatten_record(record)
970
+ end
971
+
972
+ dt = nil
973
+ if @logstash_format || @include_timestamp
974
+ if record.has_key?(TIMESTAMP_FIELD)
975
+ rts = record[TIMESTAMP_FIELD]
976
+ dt = parse_time(rts, time, tag)
977
+ elsif record.has_key?(@time_key)
978
+ rts = record[@time_key]
979
+ dt = parse_time(rts, time, tag)
980
+ record[TIMESTAMP_FIELD] = dt.iso8601(@time_precision) unless @time_key_exclude_timestamp
981
+ else
982
+ dt = Time.at(time).to_datetime
983
+ record[TIMESTAMP_FIELD] = dt.iso8601(@time_precision)
984
+ end
985
+ end
986
+
987
+ target_index_parent, target_index_child_key = @target_index_key ? get_parent_of(record, @target_index_key) : nil
988
+ if target_index_parent && target_index_parent[target_index_child_key]
989
+ target_index_alias = target_index = target_index_parent.delete(target_index_child_key)
990
+ elsif @logstash_format
991
+ dt = dt.new_offset(0) if @utc_index
992
+ target_index = "#{logstash_prefix}#{@logstash_prefix_separator}#{dt.strftime(logstash_dateformat)}"
993
+ target_index_alias = "#{logstash_prefix}#{@logstash_prefix_separator}#{application_name}#{@logstash_prefix_separator}#{dt.strftime(logstash_dateformat)}"
994
+ else
995
+ target_index_alias = target_index = index_name
996
+ end
997
+
998
+ # Change target_index to lower-case since OpenSearch doesn't
999
+ # allow upper-case characters in index names.
1000
+ target_index = target_index.downcase
1001
+ target_index_alias = target_index_alias.downcase
1002
+ if @include_tag_key
1003
+ record[@tag_key] = tag
1004
+ end
1005
+
1006
+ # If affinity target indices map has value for this particular id, use it as target_index
1007
+ if !affinity_target_indices.empty?
1008
+ id_accessor = record_accessor_create(@id_key)
1009
+ id_value = id_accessor.call(record)
1010
+ if affinity_target_indices.key?(id_value)
1011
+ target_index = affinity_target_indices[id_value]
1012
+ end
1013
+ end
1014
+
1015
+ if @suppress_type_name || @last_seen_major_version >= 2
1016
+ target_type = nil
1017
+ else
1018
+ # OpenSearch only supports "_doc".
1019
+ target_type = DEFAULT_TYPE_NAME
1020
+ end
1021
+
1022
+ meta.clear
1023
+ meta["_index".freeze] = target_index
1024
+ meta["_type".freeze] = target_type unless target_type.nil?
1025
+ meta["_alias".freeze] = target_index_alias
1026
+
1027
+ if @pipeline
1028
+ meta["pipeline".freeze] = pipeline
1029
+ end
1030
+
1031
+ @meta_config_map.each do |record_accessor, meta_key|
1032
+ if raw_value = record_accessor.call(record)
1033
+ meta[meta_key] = raw_value
1034
+ end
1035
+ end
1036
+
1037
+ if @remove_keys
1038
+ @remove_keys.each { |key| record.delete(key) }
1039
+ end
1040
+
1041
+ return [meta, header, record]
1042
+ end
1043
+
1044
+ # returns [parent, child_key] of child described by path array in record's tree
1045
+ # returns [nil, child_key] if path doesnt exist in record
1046
+ def get_parent_of(record, path)
1047
+ parent_object = path[0..-2].reduce(record) { |a, e| a.is_a?(Hash) ? a[e] : nil }
1048
+ [parent_object, path[-1]]
1049
+ end
1050
+
1051
+ # gzip compress data
1052
+ def gzip(string)
1053
+ wio = StringIO.new("w")
1054
+ w_gz = Zlib::GzipWriter.new(wio, strategy = compression_strategy)
1055
+ w_gz.write(string)
1056
+ w_gz.close
1057
+ wio.string
1058
+ end
1059
+
1060
+ def placeholder_substitution_needed_for_template?
1061
+ need_substitution = placeholder?(:host, @host.to_s) ||
1062
+ placeholder?(:index_name, @index_name.to_s) ||
1063
+ placeholder?(:template_name, @template_name.to_s) ||
1064
+ @customize_template&.values&.any? { |value| placeholder?(:customize_template, value.to_s) } ||
1065
+ placeholder?(:logstash_prefix, @logstash_prefix.to_s) ||
1066
+ placeholder?(:logstash_dateformat, @logstash_dateformat.to_s) ||
1067
+ placeholder?(:application_name, @application_name.to_s) ||
1068
+ log.debug("Need substitution: #{need_substitution}")
1069
+ need_substitution
1070
+ end
1071
+
1072
+ def template_installation(template_name, customize_template, application_name, target_index, host)
1073
+ # for safety.
1074
+ end
1075
+
1076
+ def template_installation_actual(template_name, customize_template, application_name, target_index, host=nil)
1077
+ if template_name && @template_file
1078
+ if !@logstash_format && @template_names.include?(template_name)
1079
+ log.debug("Template #{template_name} already exists (cached)")
1080
+ else
1081
+ retry_operate(@max_retry_putting_template,
1082
+ @fail_on_putting_template_retry_exceed,
1083
+ @catch_transport_exception_on_retry) do
1084
+ if customize_template
1085
+ template_custom_install(template_name, @template_file, @template_overwrite, customize_template, host, target_index, @index_separator)
1086
+ else
1087
+ template_install(template_name, @template_file, @template_overwrite, host, target_index, @index_separator)
1088
+ end
1089
+ end
1090
+ @template_names << template_name
1091
+ end
1092
+ end
1093
+ end
1094
+
1095
+ # send_bulk given a specific bulk request, the original tag,
1096
+ # chunk, and bulk_message_count
1097
+ def send_bulk(data, tag, chunk, bulk_message_count, extracted_values, info)
1098
+ _logstash_prefix, _logstash_dateformat, index_name, template_name, customize_template, application_name, _pipeline = extracted_values
1099
+ template_installation(template_name, customize_template, application_name, index_name, info.host)
1100
+
1101
+ begin
1102
+
1103
+ log.on_trace { log.trace "bulk request: #{data}" }
1104
+
1105
+ prepared_data = if compression
1106
+ gzip(data)
1107
+ else
1108
+ data
1109
+ end
1110
+
1111
+ response = client(info.host, compression).bulk body: prepared_data, index: info.index
1112
+ log.on_trace { log.trace "bulk response: #{response}" }
1113
+
1114
+ if response['errors']
1115
+ error = Fluent::Plugin::OpenSearchErrorHandler.new(self)
1116
+ error.handle_error(response, tag, chunk, bulk_message_count, extracted_values)
1117
+ end
1118
+ rescue RetryStreamError => e
1119
+ log.trace "router.emit_stream for retry stream doing..."
1120
+ emit_tag = @retry_tag ? @retry_tag : tag
1121
+ # check capacity of buffer space
1122
+ if retry_stream_retryable?
1123
+ router.emit_stream(emit_tag, e.retry_stream)
1124
+ else
1125
+ raise RetryStreamEmitFailure, "buffer is full."
1126
+ end
1127
+ log.trace "router.emit_stream for retry stream done."
1128
+ rescue => e
1129
+ ignore = @ignore_exception_classes.any? { |clazz| e.class <= clazz }
1130
+
1131
+ log.warn "Exception ignored in tag #{tag}: #{e.class.name} #{e.message}" if ignore
1132
+
1133
+ @_os = nil if @reconnect_on_error
1134
+ @_os_info = nil if @reconnect_on_error
1135
+
1136
+ raise UnrecoverableRequestFailure if ignore && @exception_backup
1137
+
1138
+ # FIXME: identify unrecoverable errors and raise UnrecoverableRequestFailure instead
1139
+ raise RecoverableRequestFailure, "could not push logs to OpenSearch cluster (#{connection_options_description(info.host)}): #{e.message}" unless ignore
1140
+ end
1141
+ end
1142
+
1143
+ def retry_stream_retryable?
1144
+ @buffer.storable?
1145
+ end
1146
+
1147
+ def is_existing_connection(host)
1148
+ # check if the host provided match the current connection
1149
+ return false if @_os.nil?
1150
+ return false if @current_config.nil?
1151
+ return false if host.length != @current_config.length
1152
+
1153
+ for i in 0...host.length
1154
+ if !host[i][:host].eql? @current_config[i][:host] || host[i][:port] != @current_config[i][:port]
1155
+ return false
1156
+ end
1157
+ end
1158
+
1159
+ return true
1160
+ end
1161
+ end
1162
+ end