fluent-plugin-elasticsearch 5.0.0 → 5.0.5

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 5f2f8268d9a8a5acf6d941a915044bd3781a5722cea8bc7ac205d6b7fd6fe580
4
- data.tar.gz: 7d9373fb963040efac0ea42f11bf1841f2e0b2a39b5566f5e2794507e77e5f5c
3
+ metadata.gz: 0e7a5cd238a268dd3f141ce4ad1dedfacbacc98fdd22f73839bb0e1cdbccc586
4
+ data.tar.gz: 8077a55fae311fd0bd9cd84fc2b2eb34fad8f7a2a890b2707d37c7ac1c753975
5
5
  SHA512:
6
- metadata.gz: 0e432748181717cedfa55239d2da7b0141c3280da7240f6f9643db411f1f3168f00f88c6933d65a5d1b944620dab0d87ce088f4dba76fdf17cec336ca55e83bf
7
- data.tar.gz: f44b2a14c5a13e1a59bd7d6926af13a9a9ba7b990594bca45de20071756d9ca88f7ee53d6ec00e09f0ad2ed054510a7803ac11c7b695275d37dbacb1e5d51457
6
+ metadata.gz: 7bede6c6213db75e2636c6bf31e65b3256ae709ef600b9f3fca8973943344e07421daf4ed04fc0f3dd06f6b1f662c93c27d9240a323f8139aee3bf2a99ed0f9b
7
+ data.tar.gz: caed0a434434c903451cdd4b92ae4427ca39984a8405154308d70a2b68351c6a05b7e505c08cd776230cef9dbbda46f4256cd7de138f3549a1860b5b6cee8e3c
@@ -8,7 +8,7 @@ jobs:
8
8
  strategy:
9
9
  fail-fast: false
10
10
  matrix:
11
- ruby: [ '2.4', '2.5', '2.6', '2.7' ]
11
+ ruby: [ '2.6', '2.7', '3.0' ]
12
12
  os:
13
13
  - ubuntu-latest
14
14
  name: Ruby ${{ matrix.ruby }} unit testing on ${{ matrix.os }}
@@ -8,7 +8,7 @@ jobs:
8
8
  strategy:
9
9
  fail-fast: false
10
10
  matrix:
11
- ruby: [ '2.4', '2.5', '2.6', '2.7' ]
11
+ ruby: [ '2.6', '2.7', '3.0' ]
12
12
  os:
13
13
  - macOS-latest
14
14
  name: Ruby ${{ matrix.ruby }} unit testing on ${{ matrix.os }}
@@ -8,7 +8,7 @@ jobs:
8
8
  strategy:
9
9
  fail-fast: false
10
10
  matrix:
11
- ruby: [ '2.4', '2.5', '2.6', '2.7' ]
11
+ ruby: [ '2.6', '2.7', '3.0' ]
12
12
  os:
13
13
  - windows-latest
14
14
  name: Ruby ${{ matrix.ruby }} unit testing on ${{ matrix.os }}
data/History.md CHANGED
@@ -2,8 +2,29 @@
2
2
 
3
3
  ### [Unreleased]
4
4
 
5
+ ### 5.0.5
6
+ - Drop json_parse_exception messages for bulk failures (#900)
7
+ - GitHub Actions: Drop Ruby 2.5 due to EOL (#894)
8
+
9
+ ### 5.0.4
10
+ - test: out_elasticsearch: Remove a needless headers from affinity stub (#888)
11
+ - Target Index Affinity (#883)
12
+
13
+ ### 5.0.3
14
+ - Fix use_legacy_template documentation (#880)
15
+ - Add FAQ for dynamic index/template (#878)
16
+ - Handle IPv6 address string on host and hosts parameters (#877)
17
+
18
+ ### 5.0.2
19
+ - GitHub Actions: Tweak Ruby versions on test (#875)
20
+ - test: datastreams: Set nonexistent datastream as default (#874)
21
+ - Fix overwriting of index template and index lifecycle policy on existing data streams (#872)
22
+
23
+ ### 5.0.1
24
+ - Use elasticsearch/api instead of elasticsearch/xpack (#870)
25
+
5
26
  ### 5.0.0
6
- - Support #retry_operate on data stream (#863)
27
+ - Support #retry_operate on data stream (#863)
7
28
  - Support placeholder in @data\_stream\_name for @type elasticsearch\_data\_stream (#862)
8
29
  - Extract troubleshooting section (#861)
9
30
  - Fix unmatched `<source>` close tag (#860)
@@ -10,6 +10,7 @@
10
10
  + [Random 400 - Rejected by Elasticsearch is occured, why?](#random-400---rejected-by-elasticsearch-is-occured-why)
11
11
  + [Fluentd seems to hang if it unable to connect Elasticsearch, why?](#fluentd-seems-to-hang-if-it-unable-to-connect-elasticsearch-why)
12
12
  + [Enable Index Lifecycle Management](#enable-index-lifecycle-management)
13
+ + [Configuring for dynamic index or template](#configuring-for-dynamic-index-or-template)
13
14
  + [How to specify index codec](#how-to-specify-index-codec)
14
15
  + [Cannot push logs to Elasticsearch with connect_write timeout reached, why?](#cannot-push-logs-to-elasticsearch-with-connect_write-timeout-reached-why)
15
16
 
@@ -524,6 +525,96 @@ template_name your-fluentd-template
524
525
  template_file /path/to/fluentd-template.json
525
526
  ```
526
527
 
528
+ #### Configuring for dynamic index or template
529
+
530
+ Some users want to setup ILM for dynamic index/template.
531
+ `index_petterns` and `template.settings.index.lifecycle.name` in Elasticsearch template will be overwritten by Elasticsearch plugin:
532
+
533
+ ```json
534
+ {
535
+ "index_patterns": ["mock"],
536
+ "template": {
537
+ "settings": {
538
+ "index": {
539
+ "lifecycle": {
540
+ "name": "mock",
541
+ "rollover_alias": "mock"
542
+ },
543
+ "number_of_shards": "<<shard>>",
544
+ "number_of_replicas": "<<replica>>"
545
+ }
546
+ }
547
+ }
548
+ }
549
+ ```
550
+
551
+ This template will be handled with:
552
+
553
+ ```aconf
554
+ <source>
555
+ @type http
556
+ port 5004
557
+ bind 0.0.0.0
558
+ body_size_limit 32m
559
+ keepalive_timeout 10s
560
+ <parse>
561
+ @type json
562
+ </parse>
563
+ </source>
564
+
565
+ <match kubernetes.var.log.containers.**etl-webserver**.log>
566
+ @type elasticsearch
567
+ @id out_es_etl_webserver
568
+ @log_level info
569
+ include_tag_key true
570
+ host $HOST
571
+ port $PORT
572
+ path "#{ENV['FLUENT_ELASTICSEARCH_PATH']}"
573
+ request_timeout "#{ENV['FLUENT_ELASTICSEARCH_REQUEST_TIMEOUT'] || '30s'}"
574
+ scheme "#{ENV['FLUENT_ELASTICSEARCH_SCHEME'] || 'http'}"
575
+ ssl_verify "#{ENV['FLUENT_ELASTICSEARCH_SSL_VERIFY'] || 'true'}"
576
+ ssl_version "#{ENV['FLUENT_ELASTICSEARCH_SSL_VERSION'] || 'TLSv1'}"
577
+ reload_connections "#{ENV['FLUENT_ELASTICSEARCH_RELOAD_CONNECTIONS'] || 'false'}"
578
+ reconnect_on_error "#{ENV['FLUENT_ELASTICSEARCH_RECONNECT_ON_ERROR'] || 'true'}"
579
+ reload_on_failure "#{ENV['FLUENT_ELASTICSEARCH_RELOAD_ON_FAILURE'] || 'true'}"
580
+ log_es_400_reason "#{ENV['FLUENT_ELASTICSEARCH_LOG_ES_400_REASON'] || 'false'}"
581
+ logstash_prefix "#{ENV['FLUENT_ELASTICSEARCH_LOGSTASH_PREFIX'] || 'etl-webserver'}"
582
+ logstash_format "#{ENV['FLUENT_ELASTICSEARCH_LOGSTASH_FORMAT'] || 'false'}"
583
+ index_name "#{ENV['FLUENT_ELASTICSEARCH_LOGSTASH_INDEX_NAME'] || 'etl-webserver'}"
584
+ type_name "#{ENV['FLUENT_ELASTICSEARCH_LOGSTASH_TYPE_NAME'] || 'fluentd'}"
585
+ time_key "#{ENV['FLUENT_ELASTICSEARCH_TIME_KEY'] || '@timestamp'}"
586
+ include_timestamp "#{ENV['FLUENT_ELASTICSEARCH_INCLUDE_TIMESTAMP'] || 'true'}"
587
+
588
+ # ILM Settings - WITH ROLLOVER support
589
+ # https://github.com/uken/fluent-plugin-elasticsearch#enable-index-lifecycle-management
590
+ application_name "etl-webserver"
591
+ index_date_pattern ""
592
+ # Policy configurations
593
+ enable_ilm true
594
+ ilm_policy_id etl-webserver
595
+ ilm_policy_overwrite true
596
+ ilm_policy {"policy": {"phases": {"hot": {"min_age": "0ms","actions": {"rollover": {"max_age": "5m","max_size": "3gb"},"set_priority": {"priority": 100}}},"delete": {"min_age": "30d","actions": {"delete": {"delete_searchable_snapshot": true}}}}}}
597
+ use_legacy_template false
598
+ template_name etl-webserver
599
+ template_file /configs/index-template.json
600
+ template_overwrite true
601
+ customize_template {"<<shard>>": "3","<<replica>>": "0"}
602
+
603
+ <buffer>
604
+ flush_thread_count "#{ENV['FLUENT_ELASTICSEARCH_BUFFER_FLUSH_THREAD_COUNT'] || '8'}"
605
+ flush_interval "#{ENV['FLUENT_ELASTICSEARCH_BUFFER_FLUSH_INTERVAL'] || '5s'}"
606
+ chunk_limit_size "#{ENV['FLUENT_ELASTICSEARCH_BUFFER_CHUNK_LIMIT_SIZE'] || '8MB'}"
607
+ total_limit_size "#{ENV['FLUENT_ELASTICSEARCH_TOTAL_LIMIT_SIZE'] || '450MB'}"
608
+ queue_limit_length "#{ENV['FLUENT_ELASTICSEARCH_BUFFER_QUEUE_LIMIT_LENGTH'] || '32'}"
609
+ retry_max_interval "#{ENV['FLUENT_ELASTICSEARCH_BUFFER_RETRY_MAX_INTERVAL'] || '60s'}"
610
+ retry_forever false
611
+ </buffer>
612
+ </match>
613
+ ```
614
+
615
+ For more details, please refer the discussion:
616
+ https://github.com/uken/fluent-plugin-elasticsearch/issues/867
617
+
527
618
  ### How to specify index codec
528
619
 
529
620
  Elasticsearch can handle compression methods for stored data such as LZ4 and best_compression.
data/README.md CHANGED
@@ -38,6 +38,7 @@ Current maintainers: @cosmo0920
38
38
  + [suppress_type_name](#suppress_type_name)
39
39
  + [target_index_key](#target_index_key)
40
40
  + [target_type_key](#target_type_key)
41
+ + [target_index_affinity](#target_index_affinity)
41
42
  + [template_name](#template_name)
42
43
  + [template_file](#template_file)
43
44
  + [template_overwrite](#template_overwrite)
@@ -171,6 +172,24 @@ You can specify Elasticsearch host by this parameter.
171
172
 
172
173
  **Note:** Since v3.3.2, `host` parameter supports builtin placeholders. If you want to send events dynamically into different hosts at runtime with `elasticsearch_dynamic` output plugin, please consider to switch to use plain `elasticsearch` output plugin. In more detail for builtin placeholders, please refer to [Placeholders](#placeholders) section.
173
174
 
175
+ To use IPv6 address on `host` parameter, you can use the following styles:
176
+
177
+ #### string style
178
+
179
+ To use string style, you must quote IPv6 address due to prevent to be interpreted as JSON:
180
+
181
+ ```
182
+ host "[2404:7a80:d440:3000:192a:a292:bd7f:ca10]"
183
+ ```
184
+
185
+ #### raw style
186
+
187
+ You can also specify raw IPv6 address. This will be handled as `[specified IPv6 address]`:
188
+
189
+ ```
190
+ host 2404:7a80:d440:3000:192a:a292:bd7f:ca10
191
+ ```
192
+
174
193
  ### port
175
194
 
176
195
  ```
@@ -237,6 +256,16 @@ hosts host1:port1,host2:port2,host3 # port3 is 9200
237
256
 
238
257
  **Note:** Up until v2.8.5, it was allowed to embed the username/password in the URL. However, this syntax is deprecated as of v2.8.6 because it was found to cause serious connection problems (See #394). Please migrate your settings to use the `user` and `password` field (described below) instead.
239
258
 
259
+ #### IPv6 addresses
260
+
261
+ When you want to specify IPv6 addresses, you must specify schema together:
262
+
263
+ ```
264
+ hosts http://[2404:7a80:d440:3000:de:7311:6329:2e6c]:port1,http://[2404:7a80:d440:3000:de:7311:6329:1e6c]:port2,http://[2404:7a80:d440:3000:de:6311:6329:2e6c]:port3
265
+ ```
266
+
267
+ If you don't specify hosts with schema together, Elasticsearch plugin complains Invalid URI for them.
268
+
240
269
  ### user, password, path, scheme, ssl_verify
241
270
 
242
271
  ```
@@ -426,6 +455,75 @@ and this record will be written to the specified index (`logstash-2014.12.19`) r
426
455
 
427
456
  Similar to `target_index_key` config, find the type name to write to in the record under this key (or nested record). If key not found in record - fallback to `type_name` (default "fluentd").
428
457
 
458
+ ### target_index_affinity
459
+
460
+ Enable plugin to dynamically select logstash time based target index in update/upsert operations based on already indexed data rather than current time of indexing.
461
+
462
+ ```
463
+ target_index_affinity true # defaults to false
464
+ ```
465
+
466
+ By default plugin writes data of logstash format index based on current time. For example daily based index after mignight data is written to newly created index. This is normally ok when data is coming from single source and not updated after indexing.
467
+
468
+ But if you have a use case where data is also updated after indexing and `id_key` is used to identify the document uniquely for updating. Logstash format is wanted to be used for easy data managing and retention. Updates are done right after indexing to complete the data (all data not available from single source) and no updates are done anymore later point on time. In this case problem happends at index rotation time where write to 2 indexes with same id_key value may happen.
469
+
470
+ This setting will search existing data by using elastic search's [id query](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-ids-query.html) using `id_key` value (with logstash_prefix and logstash_prefix_separator index pattarn e.g. `logstash-*`). The index of found data is used for update/upsert. When no data is found, data is written to current logstash index as normally.
471
+
472
+ This setting requires following other settings:
473
+ ```
474
+ logstash_format true
475
+ id_key myId # Some field on your data to identify the data uniquely
476
+ write_operation upsert # upsert or update
477
+ ```
478
+
479
+ Suppose you have the following situation where you have 2 different match to consume data from 2 different Kafka topics independently but close in time with each other (order not known).
480
+
481
+ ```
482
+ <match data1>
483
+ @type elasticsearch
484
+ ...
485
+ id_key myId
486
+ write_operation upsert
487
+ logstash_format true
488
+ logstash_dateformat %Y.%m.%d
489
+ logstash_prefix myindexprefix
490
+ target_index_affinity true
491
+ ...
492
+
493
+ <match data2>
494
+ @type elasticsearch
495
+ ...
496
+ id_key myId
497
+ write_operation upsert
498
+ logstash_format true
499
+ logstash_dateformat %Y.%m.%d
500
+ logstash_prefix myindexprefix
501
+ target_index_affinity true
502
+ ...
503
+ ```
504
+
505
+ If your first (data1) input is:
506
+ ```
507
+ {
508
+ "myId": "myuniqueId1",
509
+ "datafield1": "some value",
510
+ }
511
+ ```
512
+
513
+ and your second (data2) input is:
514
+ ```
515
+ {
516
+ "myId": "myuniqueId1",
517
+ "datafield99": "some important data from other source tightly related to id myuniqueId1 and wanted to be in same document.",
518
+ }
519
+ ```
520
+
521
+ Date today is 10.05.2021 so data is written to index `myindexprefix-2021.05.10` when both data1 and data2 is consumed during today.
522
+ But when we are close to index rotation and data1 is consumed and indexed at `2021-05-10T23:59:55.59707672Z` and data2
523
+ is consumed a bit later at `2021-05-11T00:00:58.222079Z` i.e. logstash index has been rotated and normally data2 would have been written
524
+ to index `myindexprefix-2021.05.11`. But with target_index_affinity setting as value true, data2 is now written to index `myindexprefix-2021.05.10`
525
+ into same document with data1 as wanted and duplicated document is avoided.
526
+
429
527
  ### template_name
430
528
 
431
529
  The name of the template to define. If a template by the name given is already present, it will be left unchanged, unless [template_overwrite](#template_overwrite) is set, in which case the template will be updated.
@@ -1325,9 +1423,9 @@ Default value is `nil`.
1325
1423
 
1326
1424
  Use legacy template or not.
1327
1425
 
1328
- Elasticsearch 7.8 or later supports the brand new composable templates.
1426
+ For Elasticsearch 7.8 or later, users can specify this parameter as `false` if their [template_file](#template_file) contains a composable index template.
1329
1427
 
1330
- For Elasticsearch 7.7 or older, users should specify this parameter as `false`.
1428
+ For Elasticsearch 7.7 or older, users should specify this parameter as `true`.
1331
1429
 
1332
1430
  Composable template documentation is [Put Index Template API | Elasticsearch Reference](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-templates.html) and legacy template documentation is [Index Templates | Elasticsearch Reference](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates-v1.html).
1333
1431
 
@@ -3,7 +3,7 @@ $:.push File.expand_path('../lib', __FILE__)
3
3
 
4
4
  Gem::Specification.new do |s|
5
5
  s.name = 'fluent-plugin-elasticsearch'
6
- s.version = '5.0.0'
6
+ s.version = '5.0.5'
7
7
  s.authors = ['diogo', 'pitr', 'Hiroshi Hatake']
8
8
  s.email = ['pitr.vern@gmail.com', 'me@diogoterror.com', 'cosmo0920.wp@gmail.com']
9
9
  s.description = %q{Elasticsearch output plugin for Fluent event collector}
@@ -28,6 +28,7 @@ Gem::Specification.new do |s|
28
28
 
29
29
 
30
30
  s.add_development_dependency 'rake', '>= 0'
31
+ s.add_development_dependency 'webrick', '~> 1.7.0'
31
32
  s.add_development_dependency 'webmock', '~> 3'
32
33
  s.add_development_dependency 'test-unit', '~> 3.3.0'
33
34
  s.add_development_dependency 'minitest', '~> 5.8'
@@ -23,6 +23,10 @@ class Fluent::Plugin::ElasticsearchErrorHandler
23
23
  unrecoverable_error_types.include?(type)
24
24
  end
25
25
 
26
+ def unrecoverable_record_error?(type)
27
+ ['json_parse_exception'].include?(type)
28
+ end
29
+
26
30
  def log_es_400_reason(&block)
27
31
  if @plugin.log_es_400_reason
28
32
  block.call
@@ -43,15 +47,17 @@ class Fluent::Plugin::ElasticsearchErrorHandler
43
47
  stats = Hash.new(0)
44
48
  meta = {}
45
49
  header = {}
50
+ affinity_target_indices = @plugin.get_affinity_target_indices(chunk)
46
51
  chunk.msgpack_each do |time, rawrecord|
47
52
  bulk_message = ''
48
53
  next unless rawrecord.is_a? Hash
49
54
  begin
50
55
  # we need a deep copy for process_message to alter
51
56
  processrecord = Marshal.load(Marshal.dump(rawrecord))
52
- meta, header, record = @plugin.process_message(tag, meta, header, time, processrecord, extracted_values)
57
+ meta, header, record = @plugin.process_message(tag, meta, header, time, processrecord, affinity_target_indices, extracted_values)
53
58
  next unless @plugin.append_record_to_messages(@plugin.write_operation, meta, header, record, bulk_message)
54
59
  rescue => e
60
+ @plugin.log.debug("Exception in error handler during deep copy: #{e}")
55
61
  stats[:bad_chunk_record] += 1
56
62
  next
57
63
  end
@@ -105,10 +111,15 @@ class Fluent::Plugin::ElasticsearchErrorHandler
105
111
  elsif item[write_operation].has_key?('error') && item[write_operation]['error'].has_key?('type')
106
112
  type = item[write_operation]['error']['type']
107
113
  stats[type] += 1
108
- retry_stream.add(time, rawrecord)
109
114
  if unrecoverable_error?(type)
110
115
  raise ElasticsearchRequestAbortError, "Rejected Elasticsearch due to #{type}"
111
116
  end
117
+ if unrecoverable_record_error?(type)
118
+ @plugin.router.emit_error_event(tag, time, rawrecord, ElasticsearchError.new("#{status} - #{type}: #{reason}"))
119
+ next
120
+ else
121
+ retry_stream.add(time, rawrecord) unless unrecoverable_record_error?(type)
122
+ end
112
123
  else
113
124
  # When we don't have a type field, something changed in the API
114
125
  # expected return values (ES 2.x)
@@ -2,6 +2,7 @@
2
2
  require 'date'
3
3
  require 'excon'
4
4
  require 'elasticsearch'
5
+ require 'set'
5
6
  begin
6
7
  require 'elasticsearch/xpack'
7
8
  rescue LoadError
@@ -13,6 +14,7 @@ begin
13
14
  require 'strptime'
14
15
  rescue LoadError
15
16
  end
17
+ require 'resolv'
16
18
 
17
19
  require 'fluent/plugin/output'
18
20
  require 'fluent/event'
@@ -174,6 +176,7 @@ EOC
174
176
  config_param :truncate_caches_interval, :time, :default => nil
175
177
  config_param :use_legacy_template, :bool, :default => true
176
178
  config_param :catch_transport_exception_on_retry, :bool, :default => true
179
+ config_param :target_index_affinity, :bool, :default => false
177
180
 
178
181
  config_section :metadata, param_name: :metainfo, multi: false do
179
182
  config_param :include_chunk_id, :bool, :default => false
@@ -668,7 +671,11 @@ EOC
668
671
  end
669
672
  end.compact
670
673
  else
671
- [{host: @host, port: @port, scheme: @scheme.to_s}]
674
+ if Resolv::IPv6::Regex.match(@host)
675
+ [{host: "[#{@host}]", scheme: @scheme.to_s, port: @port}]
676
+ else
677
+ [{host: @host, port: @port, scheme: @scheme.to_s}]
678
+ end
672
679
  end.each do |host|
673
680
  host.merge!(user: @user, password: @password) if !host[:user] && @user
674
681
  host.merge!(path: @path) if !host[:path] && @path
@@ -829,13 +836,14 @@ EOC
829
836
  extract_placeholders(@host, chunk)
830
837
  end
831
838
 
839
+ affinity_target_indices = get_affinity_target_indices(chunk)
832
840
  chunk.msgpack_each do |time, record|
833
841
  next unless record.is_a? Hash
834
842
 
835
843
  record = inject_chunk_id_to_record_if_needed(record, chunk_id)
836
844
 
837
845
  begin
838
- meta, header, record = process_message(tag, meta, header, time, record, extracted_values)
846
+ meta, header, record = process_message(tag, meta, header, time, record, affinity_target_indices, extracted_values)
839
847
  info = if @include_index_in_url
840
848
  RequestInfo.new(host, meta.delete("_index".freeze), meta["_index".freeze], meta.delete("_alias".freeze))
841
849
  else
@@ -872,6 +880,42 @@ EOC
872
880
  end
873
881
  end
874
882
 
883
+ def target_index_affinity_enabled?()
884
+ @target_index_affinity && @logstash_format && @id_key && (@write_operation == UPDATE_OP || @write_operation == UPSERT_OP)
885
+ end
886
+
887
+ def get_affinity_target_indices(chunk)
888
+ indices = Hash.new
889
+ if target_index_affinity_enabled?()
890
+ id_key_accessor = record_accessor_create(@id_key)
891
+ ids = Set.new
892
+ chunk.msgpack_each do |time, record|
893
+ next unless record.is_a? Hash
894
+ begin
895
+ ids << id_key_accessor.call(record)
896
+ end
897
+ end
898
+ log.debug("Find affinity target_indices by quering on ES (write_operation #{@write_operation}) for ids: #{ids.to_a}")
899
+ options = {
900
+ :index => "#{logstash_prefix}#{@logstash_prefix_separator}*",
901
+ }
902
+ query = {
903
+ 'query' => { 'ids' => { 'values' => ids.to_a } },
904
+ '_source' => false,
905
+ 'sort' => [
906
+ {"_index" => {"order" => "desc"}}
907
+ ]
908
+ }
909
+ result = client.search(options.merge(:body => Yajl.dump(query)))
910
+ # There should be just one hit per _id, but in case there still is multiple, just the oldest index is stored to map
911
+ result['hits']['hits'].each do |hit|
912
+ indices[hit["_id"]] = hit["_index"]
913
+ log.debug("target_index for id: #{hit["_id"]} from es: #{hit["_index"]}")
914
+ end
915
+ end
916
+ indices
917
+ end
918
+
875
919
  def split_request?(bulk_message, info)
876
920
  # For safety.
877
921
  end
@@ -884,7 +928,7 @@ EOC
884
928
  false
885
929
  end
886
930
 
887
- def process_message(tag, meta, header, time, record, extracted_values)
931
+ def process_message(tag, meta, header, time, record, affinity_target_indices, extracted_values)
888
932
  logstash_prefix, logstash_dateformat, index_name, type_name, _template_name, _customize_template, _deflector_alias, application_name, pipeline, _ilm_policy_id = extracted_values
889
933
 
890
934
  if @flatten_hashes
@@ -925,6 +969,15 @@ EOC
925
969
  record[@tag_key] = tag
926
970
  end
927
971
 
972
+ # If affinity target indices map has value for this particular id, use it as target_index
973
+ if !affinity_target_indices.empty?
974
+ id_accessor = record_accessor_create(@id_key)
975
+ id_value = id_accessor.call(record)
976
+ if affinity_target_indices.key?(id_value)
977
+ target_index = affinity_target_indices[id_value]
978
+ end
979
+ end
980
+
928
981
  target_type_parent, target_type_child_key = @target_type_key ? get_parent_of(record, @target_type_key) : nil
929
982
  if target_type_parent && target_type_parent[target_type_child_key]
930
983
  target_type = target_type_parent.delete(target_type_child_key)
@@ -18,9 +18,10 @@ module Fluent::Plugin
18
18
  super
19
19
 
20
20
  begin
21
+ require 'elasticsearch/api'
21
22
  require 'elasticsearch/xpack'
22
23
  rescue LoadError
23
- raise Fluent::ConfigError, "'elasticsearch/xpack'' is required for <@elasticsearch_data_stream>."
24
+ raise Fluent::ConfigError, "'elasticsearch/api', 'elasticsearch/xpack' are required for <@elasticsearch_data_stream>."
24
25
  end
25
26
 
26
27
  # ref. https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-create-data-stream.html
@@ -66,6 +67,7 @@ module Fluent::Plugin
66
67
  end
67
68
 
68
69
  def create_ilm_policy(name)
70
+ return if data_stream_exist?(name)
69
71
  params = {
70
72
  policy_id: "#{name}_policy",
71
73
  body: File.read(File.join(File.dirname(__FILE__), "default-ilm-policy.json"))
@@ -78,6 +80,7 @@ module Fluent::Plugin
78
80
  end
79
81
 
80
82
  def create_index_template(name)
83
+ return if data_stream_exist?(name)
81
84
  body = {
82
85
  "index_patterns" => ["#{name}*"],
83
86
  "data_stream" => {},
@@ -27,13 +27,18 @@ class TestElasticsearchErrorHandler < Test::Unit::TestCase
27
27
  @error_events << {:tag => tag, :time=>time, :record=>record, :error=>e}
28
28
  end
29
29
 
30
- def process_message(tag, meta, header, time, record, extracted_values)
30
+ def process_message(tag, meta, header, time, record, affinity_target_indices, extracted_values)
31
31
  return [meta, header, record]
32
32
  end
33
33
 
34
+ def get_affinity_target_indices(chunk)
35
+ indices = Hash.new
36
+ indices
37
+ end
38
+
34
39
  def append_record_to_messages(op, meta, header, record, msgs)
35
40
  if record.has_key?('raise') && record['raise']
36
- raise Exception('process_message')
41
+ raise 'process_message'
37
42
  end
38
43
  return true
39
44
  end
@@ -302,7 +307,7 @@ class TestElasticsearchErrorHandler < Test::Unit::TestCase
302
307
  def test_retry_error
303
308
  records = []
304
309
  error_records = Hash.new(false)
305
- error_records.merge!({0=>true, 4=>true, 9=>true})
310
+ error_records.merge!({0=>true, 4=>true})
306
311
  10.times do |i|
307
312
  records << {time: 12345, record: {"message"=>"record #{i}","_id"=>i,"raise"=>error_records[i]}}
308
313
  end
@@ -386,6 +391,18 @@ class TestElasticsearchErrorHandler < Test::Unit::TestCase
386
391
  "reason":"unrecognized error"
387
392
  }
388
393
  }
394
+ },
395
+ {
396
+ "create" : {
397
+ "_index" : "foo",
398
+ "_type" : "bar",
399
+ "_id" : "9",
400
+ "status" : 500,
401
+ "error" : {
402
+ "type" : "json_parse_exception",
403
+ "reason":"Invalid UTF-8 start byte 0x92\\n at [Source: org.elasticsearch.transport.netty4.ByteBufStreamInput@204fe9c9; line: 1, column: 81]"
404
+ }
405
+ }
389
406
  }
390
407
  ]
391
408
  }))
@@ -400,12 +417,12 @@ class TestElasticsearchErrorHandler < Test::Unit::TestCase
400
417
  next unless e.respond_to?(:retry_stream)
401
418
  e.retry_stream.each {|time, record| records << record}
402
419
  end
403
- assert_equal 2, records.length
404
- assert_equal 2, records[0]['_id']
405
- assert_equal 8, records[1]['_id']
420
+ assert_equal 2, records.length, "Exp. retry_stream to contain records"
421
+ assert_equal 2, records[0]['_id'], "Exp record with given ID to in retry_stream"
422
+ assert_equal 8, records[1]['_id'], "Exp record with given ID to in retry_stream"
406
423
  error_ids = @plugin.error_events.collect {|h| h[:record]['_id']}
407
- assert_equal 3, error_ids.length
408
- assert_equal [5, 6, 7], error_ids
424
+ assert_equal 4, error_ids.length, "Exp. a certain number of records to be dropped from retry_stream"
425
+ assert_equal [5, 6, 7, 9], error_ids, "Exp. specific records to be dropped from retry_stream"
409
426
  @plugin.error_events.collect {|h| h[:error]}.each do |e|
410
427
  assert_true e.respond_to?(:backtrace)
411
428
  end
@@ -10,7 +10,7 @@ class ElasticsearchOutputTest < Test::Unit::TestCase
10
10
  include FlexMock::TestCase
11
11
  include Fluent::Test::Helpers
12
12
 
13
- attr_accessor :index_cmds, :index_command_counts
13
+ attr_accessor :index_cmds, :index_command_counts, :index_cmds_all_requests
14
14
 
15
15
  def setup
16
16
  Fluent::Test.setup
@@ -70,6 +70,14 @@ class ElasticsearchOutputTest < Test::Unit::TestCase
70
70
  end
71
71
  end
72
72
 
73
+ def stub_elastic_all_requests(url="http://localhost:9200/_bulk")
74
+ @index_cmds_all_requests = Array.new
75
+ stub_request(:post, url).with do |req|
76
+ @index_cmds = req.body.split("\n").map {|r| JSON.parse(r) }
77
+ @index_cmds_all_requests << @index_cmds
78
+ end
79
+ end
80
+
73
81
  def stub_elastic_unavailable(url="http://localhost:9200/_bulk")
74
82
  stub_request(:post, url).to_return(:status => [503, "Service Unavailable"])
75
83
  end
@@ -3612,6 +3620,74 @@ class ElasticsearchOutputTest < Test::Unit::TestCase
3612
3620
  assert_equal '/default_path', host2[:path]
3613
3621
  end
3614
3622
 
3623
+ class IPv6AdressStringHostsTest < self
3624
+ def test_legacy_hosts_list
3625
+ config = %{
3626
+ hosts "[2404:7a80:d440:3000:192a:a292:bd7f:ca19]:50,host2:100,host3"
3627
+ scheme https
3628
+ path /es/
3629
+ port 123
3630
+ }
3631
+ instance = driver(config).instance
3632
+
3633
+ assert_raise(URI::InvalidURIError) do
3634
+ instance.get_connection_options[:hosts].length
3635
+ end
3636
+ end
3637
+
3638
+ def test_hosts_list
3639
+ config = %{
3640
+ hosts https://john:password@[2404:7a80:d440:3000:192a:a292:bd7f:ca19]:443/elastic/,http://host2
3641
+ path /default_path
3642
+ user default_user
3643
+ password default_password
3644
+ }
3645
+ instance = driver(config).instance
3646
+
3647
+ assert_equal 2, instance.get_connection_options[:hosts].length
3648
+ host1, host2 = instance.get_connection_options[:hosts]
3649
+
3650
+ assert_equal '[2404:7a80:d440:3000:192a:a292:bd7f:ca19]', host1[:host]
3651
+ assert_equal 443, host1[:port]
3652
+ assert_equal 'https', host1[:scheme]
3653
+ assert_equal 'john', host1[:user]
3654
+ assert_equal 'password', host1[:password]
3655
+ assert_equal '/elastic/', host1[:path]
3656
+
3657
+ assert_equal 'host2', host2[:host]
3658
+ assert_equal 'http', host2[:scheme]
3659
+ assert_equal 'default_user', host2[:user]
3660
+ assert_equal 'default_password', host2[:password]
3661
+ assert_equal '/default_path', host2[:path]
3662
+ end
3663
+
3664
+ def test_hosts_list_with_escape_placeholders
3665
+ config = %{
3666
+ hosts https://%{j+hn}:%{passw@rd}@[2404:7a80:d440:3000:192a:a292:bd7f:ca19]:443/elastic/,http://host2
3667
+ path /default_path
3668
+ user default_user
3669
+ password default_password
3670
+ }
3671
+ instance = driver(config).instance
3672
+
3673
+ assert_equal 2, instance.get_connection_options[:hosts].length
3674
+ host1, host2 = instance.get_connection_options[:hosts]
3675
+
3676
+ assert_equal '[2404:7a80:d440:3000:192a:a292:bd7f:ca19]', host1[:host]
3677
+ assert_equal 443, host1[:port]
3678
+ assert_equal 'https', host1[:scheme]
3679
+ assert_equal 'j%2Bhn', host1[:user]
3680
+ assert_equal 'passw%40rd', host1[:password]
3681
+ assert_equal '/elastic/', host1[:path]
3682
+
3683
+ assert_equal 'host2', host2[:host]
3684
+ assert_equal 'http', host2[:scheme]
3685
+ assert_equal 'default_user', host2[:user]
3686
+ assert_equal 'default_password', host2[:password]
3687
+ assert_equal '/default_path', host2[:path]
3688
+ end
3689
+ end
3690
+
3615
3691
  def test_single_host_params_and_defaults
3616
3692
  config = %{
3617
3693
  host logs.google.com
@@ -3665,6 +3741,46 @@ class ElasticsearchOutputTest < Test::Unit::TestCase
3665
3741
  assert(ports.none? { |p| p == 9200 })
3666
3742
  end
3667
3743
 
3744
+ class IPv6AdressStringHostTest < self
3745
+ def test_single_host_params_and_defaults
3746
+ config = %{
3747
+ host 2404:7a80:d440:3000:192a:a292:bd7f:ca19
3748
+ user john
3749
+ password doe
3750
+ }
3751
+ instance = driver(config).instance
3752
+
3753
+ assert_equal 1, instance.get_connection_options[:hosts].length
3754
+ host1 = instance.get_connection_options[:hosts][0]
3755
+
3756
+ assert_equal '[2404:7a80:d440:3000:192a:a292:bd7f:ca19]', host1[:host]
3757
+ assert_equal 9200, host1[:port]
3758
+ assert_equal 'http', host1[:scheme]
3759
+ assert_equal 'john', host1[:user]
3760
+ assert_equal 'doe', host1[:password]
3761
+ assert_equal nil, host1[:path]
3762
+ end
3763
+
3764
+ def test_single_host_params_and_defaults_with_escape_placeholders
3765
+ config = %{
3766
+ host 2404:7a80:d440:3000:192a:a292:bd7f:ca19
3767
+ user %{j+hn}
3768
+ password %{d@e}
3769
+ }
3770
+ instance = driver(config).instance
3771
+
3772
+ assert_equal 1, instance.get_connection_options[:hosts].length
3773
+ host1 = instance.get_connection_options[:hosts][0]
3774
+
3775
+ assert_equal '[2404:7a80:d440:3000:192a:a292:bd7f:ca19]', host1[:host]
3776
+ assert_equal 9200, host1[:port]
3777
+ assert_equal 'http', host1[:scheme]
3778
+ assert_equal 'j%2Bhn', host1[:user]
3779
+ assert_equal 'd%40e', host1[:password]
3780
+ assert_equal nil, host1[:path]
3781
+ end
3782
+ end
3783
+
3668
3784
  def test_password_is_required_if_specify_user
3669
3785
  config = %{
3670
3786
  user john
@@ -3986,6 +4102,185 @@ class ElasticsearchOutputTest < Test::Unit::TestCase
3986
4102
  assert_equal(pipeline, index_cmds.first['index']['pipeline'])
3987
4103
  end
3988
4104
 
4105
+ def stub_elastic_affinity_target_index_search_with_body(url="http://localhost:9200/logstash-*/_search", ids, return_body_str)
4106
+ # Note: ids used in query is unique list of ids
4107
+ stub_request(:post, url)
4108
+ .with(
4109
+ body: "{\"query\":{\"ids\":{\"values\":#{ids.uniq.to_json}}},\"_source\":false,\"sort\":[{\"_index\":{\"order\":\"desc\"}}]}",
4110
+ )
4111
+ .to_return(lambda do |req|
4112
+ { :status => 200,
4113
+ :headers => { 'Content-Type' => 'json' },
4114
+ :body => return_body_str
4115
+ }
4116
+ end)
4117
+ end
4118
+
4119
+ def stub_elastic_affinity_target_index_search(url="http://localhost:9200/logstash-*/_search", ids, indices)
4120
+ # Example ids and indices arrays.
4121
+ # [ "3408a2c8eecd4fbfb82e45012b54fa82", "2816fc6ef4524b3f8f7e869002005433"]
4122
+ # [ "logstash-2021.04.28", "logstash-2021.04.29"]
4123
+ body = %({
4124
+ "took" : 31,
4125
+ "timed_out" : false,
4126
+ "_shards" : {
4127
+ "total" : 52,
4128
+ "successful" : 52,
4129
+ "skipped" : 48,
4130
+ "failed" : 0
4131
+ },
4132
+ "hits" : {
4133
+ "total" : {
4134
+ "value" : 356,
4135
+ "relation" : "eq"
4136
+ },
4137
+ "max_score" : null,
4138
+ "hits" : [
4139
+ {
4140
+ "_index" : "#{indices[0]}",
4141
+ "_type" : "_doc",
4142
+ "_id" : "#{ids[0]}",
4143
+ "_score" : null,
4144
+ "sort" : [
4145
+ "#{indices[0]}"
4146
+ ]
4147
+ },
4148
+ {
4149
+ "_index" : "#{indices[1]}",
4150
+ "_type" : "_doc",
4151
+ "_id" : "#{ids[1]}",
4152
+ "_score" : null,
4153
+ "sort" : [
4154
+ "#{indices[1]}"
4155
+ ]
4156
+ }
4157
+ ]
4158
+ }
4159
+ })
4160
+ stub_elastic_affinity_target_index_search_with_body(ids, body)
4161
+ end
4162
+
4163
+ def stub_elastic_affinity_target_index_search_return_empty(url="http://localhost:9200/logstash-*/_search", ids)
4164
+ empty_body = %({
4165
+ "took" : 5,
4166
+ "timed_out" : false,
4167
+ "_shards" : {
4168
+ "total" : 54,
4169
+ "successful" : 54,
4170
+ "skipped" : 53,
4171
+ "failed" : 0
4172
+ },
4173
+ "hits" : {
4174
+ "total" : {
4175
+ "value" : 0,
4176
+ "relation" : "eq"
4177
+ },
4178
+ "max_score" : null,
4179
+ "hits" : [ ]
4180
+ }
4181
+ })
4182
+ stub_elastic_affinity_target_index_search_with_body(ids, empty_body)
4183
+ end
4184
+
4185
+ def test_writes_to_affinity_target_index
4186
+ driver.configure("target_index_affinity true
4187
+ logstash_format true
4188
+ id_key my_id
4189
+ write_operation update")
4190
+
4191
+ my_id_value = "3408a2c8eecd4fbfb82e45012b54fa82"
4192
+ ids = [my_id_value]
4193
+ indices = ["logstash-2021.04.28"]
4194
+ stub_elastic
4195
+ stub_elastic_affinity_target_index_search(ids, indices)
4196
+ driver.run(default_tag: 'test') do
4197
+ driver.feed(sample_record('my_id' => my_id_value))
4198
+ end
4199
+ assert_equal('logstash-2021.04.28', index_cmds.first['update']['_index'])
4200
+ end
4201
+
4202
+ def test_writes_to_affinity_target_index_write_operation_upsert
4203
+ driver.configure("target_index_affinity true
4204
+ logstash_format true
4205
+ id_key my_id
4206
+ write_operation upsert")
4207
+
4208
+ my_id_value = "3408a2c8eecd4fbfb82e45012b54fa82"
4209
+ ids = [my_id_value]
4210
+ indices = ["logstash-2021.04.28"]
4211
+ stub_elastic
4212
+ stub_elastic_affinity_target_index_search(ids, indices)
4213
+ driver.run(default_tag: 'test') do
4214
+ driver.feed(sample_record('my_id' => my_id_value))
4215
+ end
4216
+ assert_equal('logstash-2021.04.28', index_cmds.first['update']['_index'])
4217
+ end
4218
+
4219
+ def test_writes_to_affinity_target_index_index_not_exists_yet
4220
+ driver.configure("target_index_affinity true
4221
+ logstash_format true
4222
+ id_key my_id
4223
+ write_operation update")
4224
+
4225
+ my_id_value = "3408a2c8eecd4fbfb82e45012b54fa82"
4226
+ ids = [my_id_value]
4227
+ stub_elastic
4228
+ stub_elastic_affinity_target_index_search_return_empty(ids)
4229
+ time = Time.parse Date.today.iso8601
4230
+ driver.run(default_tag: 'test') do
4231
+ driver.feed(time.to_i, sample_record('my_id' => my_id_value))
4232
+ end
4233
+ assert_equal("logstash-#{time.utc.strftime("%Y.%m.%d")}", index_cmds.first['update']['_index'])
4234
+ end
4235
+
4236
+ def test_writes_to_affinity_target_index_multiple_indices
4237
+ driver.configure("target_index_affinity true
4238
+ logstash_format true
4239
+ id_key my_id
4240
+ write_operation update")
4241
+
4242
+ my_id_value = "2816fc6ef4524b3f8f7e869002005433"
4243
+ my_id_value2 = "3408a2c8eecd4fbfb82e45012b54fa82"
4244
+ ids = [my_id_value, my_id_value2]
4245
+ indices = ["logstash-2021.04.29", "logstash-2021.04.28"]
4246
+ stub_elastic_all_requests
4247
+ stub_elastic_affinity_target_index_search(ids, indices)
4248
+ driver.run(default_tag: 'test') do
4249
+ driver.feed(sample_record('my_id' => my_id_value))
4250
+ driver.feed(sample_record('my_id' => my_id_value2))
4251
+ end
4252
+ assert_equal(2, index_cmds_all_requests.count)
4253
+ assert_equal('logstash-2021.04.29', index_cmds_all_requests[0].first['update']['_index'])
4254
+ assert_equal(my_id_value, index_cmds_all_requests[0].first['update']['_id'])
4255
+ assert_equal('logstash-2021.04.28', index_cmds_all_requests[1].first['update']['_index'])
4256
+ assert_equal(my_id_value2, index_cmds_all_requests[1].first['update']['_id'])
4257
+ end
4258
+
4259
+ def test_writes_to_affinity_target_index_same_id_dublicated_write_to_oldest_index
4260
+ driver.configure("target_index_affinity true
4261
+ logstash_format true
4262
+ id_key my_id
4263
+ write_operation update")
4264
+
4265
+ my_id_value = "2816fc6ef4524b3f8f7e869002005433"
4266
+ # It may happen than same id has inserted to two index while data inserted during rollover period
4267
+ ids = [my_id_value, my_id_value]
4268
+ # Simulate the used sorting here, as search sorts indices in DESC order to pick only oldest index per single _id
4269
+ indices = ["logstash-2021.04.29", "logstash-2021.04.28"]
4270
+
4271
+ stub_elastic_all_requests
4272
+ stub_elastic_affinity_target_index_search(ids, indices)
4273
+ driver.run(default_tag: 'test') do
4274
+ driver.feed(sample_record('my_id' => my_id_value))
4275
+ driver.feed(sample_record('my_id' => my_id_value))
4276
+ end
4277
+ assert_equal('logstash-2021.04.28', index_cmds.first['update']['_index'])
4278
+
4279
+ assert_equal(1, index_cmds_all_requests.count)
4280
+ assert_equal('logstash-2021.04.28', index_cmds_all_requests[0].first['update']['_index'])
4281
+ assert_equal(my_id_value, index_cmds_all_requests[0].first['update']['_id'])
4282
+ end
4283
+
3989
4284
  class PipelinePlaceholdersTest < self
3990
4285
  def test_writes_to_default_index_with_pipeline_tag_placeholder
3991
4286
  pipeline = "fluentd-${tag}"
@@ -79,7 +79,7 @@ class ElasticsearchOutputDataStreamTest < Test::Unit::TestCase
79
79
  end
80
80
 
81
81
  def stub_nonexistent_data_stream?(name="foo")
82
- stub_request(:get, "http://localhost:9200/_data_stream/#{name}").to_return(:status => [200, Elasticsearch::Transport::Transport::Errors::NotFound])
82
+ stub_request(:get, "http://localhost:9200/_data_stream/#{name}").to_return(:status => [404, Elasticsearch::Transport::Transport::Errors::NotFound])
83
83
  end
84
84
 
85
85
  def stub_bulk_feed(name="foo")
@@ -94,7 +94,7 @@ class ElasticsearchOutputDataStreamTest < Test::Unit::TestCase
94
94
  def stub_default(name="foo")
95
95
  stub_ilm_policy(name)
96
96
  stub_index_template(name)
97
- stub_existent_data_stream?(name)
97
+ stub_nonexistent_data_stream?(name)
98
98
  stub_data_stream(name)
99
99
  end
100
100
 
@@ -207,12 +207,12 @@ class ElasticsearchOutputDataStreamTest < Test::Unit::TestCase
207
207
  assert_equal "foo", driver(conf).instance.data_stream_name
208
208
  end
209
209
 
210
- def test_nonexistent_data_stream
210
+ def test_existent_data_stream
211
211
  omit REQUIRED_ELASTIC_MESSAGE unless data_stream_supported?
212
212
 
213
213
  stub_ilm_policy
214
214
  stub_index_template
215
- stub_nonexistent_data_stream?
215
+ stub_existent_data_stream?
216
216
  stub_data_stream
217
217
  conf = config_element(
218
218
  'ROOT', '', {
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: fluent-plugin-elasticsearch
3
3
  version: !ruby/object:Gem::Version
4
- version: 5.0.0
4
+ version: 5.0.5
5
5
  platform: ruby
6
6
  authors:
7
7
  - diogo
@@ -10,7 +10,7 @@ authors:
10
10
  autorequire:
11
11
  bindir: bin
12
12
  cert_chain: []
13
- date: 2021-02-02 00:00:00.000000000 Z
13
+ date: 2021-06-29 00:00:00.000000000 Z
14
14
  dependencies:
15
15
  - !ruby/object:Gem::Dependency
16
16
  name: fluentd
@@ -68,6 +68,20 @@ dependencies:
68
68
  - - ">="
69
69
  - !ruby/object:Gem::Version
70
70
  version: '0'
71
+ - !ruby/object:Gem::Dependency
72
+ name: webrick
73
+ requirement: !ruby/object:Gem::Requirement
74
+ requirements:
75
+ - - "~>"
76
+ - !ruby/object:Gem::Version
77
+ version: 1.7.0
78
+ type: :development
79
+ prerelease: false
80
+ version_requirements: !ruby/object:Gem::Requirement
81
+ requirements:
82
+ - - "~>"
83
+ - !ruby/object:Gem::Version
84
+ version: 1.7.0
71
85
  - !ruby/object:Gem::Dependency
72
86
  name: webmock
73
87
  requirement: !ruby/object:Gem::Requirement
@@ -209,7 +223,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
209
223
  - !ruby/object:Gem::Version
210
224
  version: '0'
211
225
  requirements: []
212
- rubygems_version: 3.1.2
226
+ rubygems_version: 3.2.3
213
227
  signing_key:
214
228
  specification_version: 4
215
229
  summary: Elasticsearch output plugin for Fluent event collector