fluentd 1.14.3-x86-mingw32 → 1.14.4-x86-mingw32

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of fluentd might be problematic. Click here for more details.

checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 3ba01ca31fbaac62a1c0ea213d4f4dccd7c1a2a21be730a62f9b82007e8a67e3
4
- data.tar.gz: 5b4dbb5aae91e85043e295f037fac15f877a084ee5c6289d09a0e8b4b7cf0c9c
3
+ metadata.gz: '0735519c797e326cb6fcd64a4e27a0e0872af520ae52d24802460c51869417dc'
4
+ data.tar.gz: a6524d76380af765663e85378c6e1bda25a83f01bfd5ee034152f0f672d44071
5
5
  SHA512:
6
- metadata.gz: ecf03095be6cc94747984eb828d607a9ddb70a43e88fa32b483ba38450c78ad13d309914d3cf8f5146a6d0d97e0fcc803178090217c861ad09c44e033f5a19c7
7
- data.tar.gz: 902ef5dcd2289d72da71495877ec1b8337477f177b81b7c8002a2fa7beec11a2571957539044e16f2bc7c5a6a6be32dbd72e6867d2056a03ff41e216d5a2af22
6
+ metadata.gz: af913a0847c7bd11dc534cf935ae1ae73f7b447836a8bca8abd8cd4a151dfc7cdf4e72fefe577c265dbccc1829f2c0dc6fa31d23a046b869b1f69ac972570435
7
+ data.tar.gz: b0360be02ae35ba1b1a5212fbb5815a7b2fb1bf620b6cf5459f5acf4768dd19c1f7825b17ae9bc00983eaea13bcd7c3036ae7a6f802e4684dc7963f7387d9ffe
data/CHANGELOG.md CHANGED
@@ -1,3 +1,22 @@
1
+ # v1.14.4
2
+
3
+ ## Release v1.14.4 - 2022/01/06
4
+
5
+ ### Enhancement
6
+
7
+ * `in_tail`: Add option to skip long lines (`max_line_size`)
8
+ https://github.com/fluent/fluentd/pull/3565
9
+
10
+ ### Bug fix
11
+
12
+ * Incorrect BufferChunkOverflowError when each event size is < `chunk_limit_size`
13
+ https://github.com/fluent/fluentd/pull/3560
14
+ * On macOS with Ruby 2.7/3.0, `out_file` fails to write events if `append` is true.
15
+ https://github.com/fluent/fluentd/pull/3579
16
+ * test: Fix unstable test cases
17
+ https://github.com/fluent/fluentd/pull/3574
18
+ https://github.com/fluent/fluentd/pull/3577
19
+
1
20
  # v1.14.3
2
21
 
3
22
  ## Release v1.14.3 - 2021/11/26
data/lib/fluent/env.rb CHANGED
@@ -33,4 +33,8 @@ module Fluent
33
33
  def self.linux?
34
34
  /linux/ === RUBY_PLATFORM
35
35
  end
36
+
37
+ def self.macos?
38
+ /darwin/ =~ RUBY_PLATFORM
39
+ end
36
40
  end
@@ -767,24 +767,37 @@ module Fluent
767
767
  raise ShouldRetry unless chunk.writable?
768
768
  staged_chunk_used = true if chunk.staged?
769
769
 
770
- original_bytesize = chunk.bytesize
770
+ original_bytesize = committed_bytesize = chunk.bytesize
771
771
  begin
772
772
  while writing_splits_index < splits.size
773
773
  split = splits[writing_splits_index]
774
- formatted_split = format ? format.call(split) : split.first
775
- if split.size == 1 && original_bytesize == 0
776
- if format == nil && @compress != :text
777
- # The actual size of chunk is not determined until after chunk.append.
778
- # so, keep already processed 'split' content here.
779
- # (allow performance regression a bit)
774
+ formatted_split = format ? format.call(split) : nil
775
+
776
+ if split.size == 1 # Check BufferChunkOverflowError
777
+ determined_bytesize = nil
778
+ if @compress != :text
779
+ determined_bytesize = nil
780
+ elsif formatted_split
781
+ determined_bytesize = formatted_split.bytesize
782
+ elsif split.first.respond_to?(:bytesize)
783
+ determined_bytesize = split.first.bytesize
784
+ end
785
+
786
+ if determined_bytesize && determined_bytesize > @chunk_limit_size
787
+ # It is a obvious case that BufferChunkOverflowError should be raised here.
788
+ # But if it raises here, already processed 'split' or
789
+ # the proceeding 'split' will be lost completely.
790
+ # So it is a last resort to delay raising such a exception
791
+ errors << "a #{determined_bytesize} bytes record (nth: #{writing_splits_index}) is larger than buffer chunk limit size (#{@chunk_limit_size})"
792
+ writing_splits_index += 1
793
+ next
794
+ end
795
+
796
+ if determined_bytesize.nil? || chunk.bytesize + determined_bytesize > @chunk_limit_size
797
+ # The split will (might) cause size over so keep already processed
798
+ # 'split' content here (allow performance regression a bit).
780
799
  chunk.commit
781
- else
782
- big_record_size = formatted_split.bytesize
783
- if chunk.bytesize + big_record_size > @chunk_limit_size
784
- errors << "a #{big_record_size} bytes record (nth: #{writing_splits_index}) is larger than buffer chunk limit size (#{@chunk_limit_size})"
785
- writing_splits_index += 1
786
- next
787
- end
800
+ committed_bytesize = chunk.bytesize
788
801
  end
789
802
  end
790
803
 
@@ -793,19 +806,26 @@ module Fluent
793
806
  else
794
807
  chunk.append(split, compress: @compress)
795
808
  end
809
+ adding_bytes = chunk.bytesize - committed_bytesize
796
810
 
797
811
  if chunk_size_over?(chunk) # split size is larger than difference between size_full? and size_over?
798
- adding_bytes = chunk.instance_eval { @adding_bytes } || "N/A" # 3rd party might not have 'adding_bytes'
799
812
  chunk.rollback
813
+ committed_bytesize = chunk.bytesize
800
814
 
801
- if split.size == 1 && original_bytesize == 0
802
- # It is obviously case that BufferChunkOverflowError should be raised here,
803
- # but if it raises here, already processed 'split' or
804
- # the proceeding 'split' will be lost completely.
805
- # so it is a last resort to delay raising such a exception
806
- errors << "concatenated/appended a #{adding_bytes} bytes record (nth: #{writing_splits_index}) is larger than buffer chunk limit size (#{@chunk_limit_size})"
807
- writing_splits_index += 1
808
- next
815
+ if split.size == 1 # Check BufferChunkOverflowError again
816
+ if adding_bytes > @chunk_limit_size
817
+ errors << "concatenated/appended a #{adding_bytes} bytes record (nth: #{writing_splits_index}) is larger than buffer chunk limit size (#{@chunk_limit_size})"
818
+ writing_splits_index += 1
819
+ next
820
+ else
821
+ # As already processed content is kept after rollback, then unstaged chunk should be queued.
822
+ # After that, re-process current split again.
823
+ # New chunk should be allocated, to do it, modify @stage and so on.
824
+ synchronize { @stage.delete(modified_metadata) }
825
+ staged_chunk_used = false
826
+ chunk.unstaged!
827
+ break
828
+ end
809
829
  end
810
830
 
811
831
  if chunk_size_full?(chunk) || split.size == 1
@@ -113,6 +113,8 @@ module Fluent::Plugin
113
113
  config_param :path_timezone, :string, default: nil
114
114
  desc 'Follow inodes instead of following file names. Guarantees more stable delivery and allows to use * in path pattern with rotating files'
115
115
  config_param :follow_inodes, :bool, default: false
116
+ desc 'Maximum length of line. The longer line is just skipped.'
117
+ config_param :max_line_size, :size, default: nil
116
118
 
117
119
  config_section :parse, required: false, multi: true, init: true, param_name: :parser_configs do
118
120
  config_argument :usage, :string, default: 'in_tail_parser'
@@ -594,6 +596,14 @@ module Fluent::Plugin
594
596
 
595
597
  # @return true if no error or unrecoverable error happens in emit action. false if got BufferOverflowError
596
598
  def receive_lines(lines, tail_watcher)
599
+ lines = lines.reject do |line|
600
+ skip_line = @max_line_size ? line.bytesize > @max_line_size : false
601
+ if skip_line
602
+ log.warn "received line length is longer than #{@max_line_size}"
603
+ log.debug "skipped line: #{line.chomp}"
604
+ end
605
+ skip_line
606
+ end
597
607
  es = @receive_handler.call(lines, tail_watcher)
598
608
  unless es.empty?
599
609
  tag = if @tag_prefix || @tag_suffix
@@ -181,6 +181,13 @@ module Fluent::Plugin
181
181
  @dir_perm = system_config.dir_permission || Fluent::DEFAULT_DIR_PERMISSION
182
182
  @file_perm = system_config.file_permission || Fluent::DEFAULT_FILE_PERMISSION
183
183
  @need_lock = system_config.workers > 1
184
+
185
+ # https://github.com/fluent/fluentd/issues/3569
186
+ @need_ruby_on_macos_workaround = false
187
+ if @append && Fluent.macos?
188
+ condition = Gem::Dependency.new('', [">= 2.7.0", "< 3.1.0"])
189
+ @need_ruby_on_macos_workaround = true if condition.match?('', RUBY_VERSION)
190
+ end
184
191
  end
185
192
 
186
193
  def multi_workers_ready?
@@ -223,7 +230,12 @@ module Fluent::Plugin
223
230
 
224
231
  def write_without_compression(path, chunk)
225
232
  File.open(path, "ab", @file_perm) do |f|
226
- chunk.write_to(f)
233
+ if @need_ruby_on_macos_workaround
234
+ content = chunk.read()
235
+ f.puts content
236
+ else
237
+ chunk.write_to(f)
238
+ end
227
239
  end
228
240
  end
229
241
 
@@ -16,6 +16,6 @@
16
16
 
17
17
  module Fluent
18
18
 
19
- VERSION = '1.14.3'
19
+ VERSION = '1.14.4'
20
20
 
21
21
  end
@@ -83,7 +83,7 @@ class BareOutputTest < Test::Unit::TestCase
83
83
 
84
84
  @p.configure(config_element('ROOT', '', {'@log_level' => 'debug'}))
85
85
 
86
- assert{ @p.log.object_id != original_logger.object_id }
86
+ assert(@p.log.object_id != original_logger.object_id)
87
87
  assert_equal Fluent::Log::LEVEL_DEBUG, @p.log.level
88
88
  end
89
89
 
@@ -990,6 +990,51 @@ class BufferTest < Test::Unit::TestCase
990
990
  assert_equal [@dm0], @p.queue.map(&:metadata)
991
991
  assert_equal [5000], @p.queue.map(&:size)
992
992
  end
993
+
994
+ test "confirm that every message which is smaller than chunk threshold does not raise BufferChunkOverflowError" do
995
+ assert_equal [@dm0], @p.stage.keys
996
+ assert_equal [], @p.queue.map(&:metadata)
997
+ timestamp = event_time('2016-04-11 16:00:02 +0000')
998
+ es = Fluent::ArrayEventStream.new([[timestamp, {"message" => "a" * 1_000_000}],
999
+ [timestamp, {"message" => "b" * 1_000_000}],
1000
+ [timestamp, {"message" => "c" * 1_000_000}]])
1001
+
1002
+ # https://github.com/fluent/fluentd/issues/1849
1003
+ # Even though 1_000_000 < 1_280_000 (chunk_limit_size), it raised BufferChunkOverflowError before.
1004
+ # It should not be raised and message a,b,c should be stored into 3 chunks.
1005
+ assert_nothing_raised do
1006
+ @p.write({@dm0 => es}, format: @format)
1007
+ end
1008
+ messages = []
1009
+ # pick up first letter to check whether chunk is queued in expected order
1010
+ 3.times do |index|
1011
+ chunk = @p.queue[index]
1012
+ es = Fluent::MessagePackEventStream.new(chunk.chunk)
1013
+ es.ensure_unpacked!
1014
+ records = es.instance_eval{ @unpacked_records }
1015
+ records.each do |record|
1016
+ messages << record["message"][0]
1017
+ end
1018
+ end
1019
+ es = Fluent::MessagePackEventStream.new(@p.stage[@dm0].chunk)
1020
+ es.ensure_unpacked!
1021
+ staged_message = es.instance_eval{ @unpacked_records }.first["message"]
1022
+ # message a and b are queued, message c is staged
1023
+ assert_equal([
1024
+ [@dm0],
1025
+ "c" * 1_000_000,
1026
+ [@dm0, @dm0, @dm0],
1027
+ [5000, 1, 1],
1028
+ [["x"] * 5000, "a", "b"].flatten
1029
+ ],
1030
+ [
1031
+ @p.stage.keys,
1032
+ staged_message,
1033
+ @p.queue.map(&:metadata),
1034
+ @p.queue.map(&:size),
1035
+ messages
1036
+ ])
1037
+ end
993
1038
  end
994
1039
 
995
1040
  sub_test_case 'custom format with configuration for test with lower chunk limit size' do
@@ -1078,6 +1123,38 @@ class BufferTest < Test::Unit::TestCase
1078
1123
  @p.write({@dm0 => es})
1079
1124
  end
1080
1125
  end
1126
+
1127
+ test 'confirm that every array message which is smaller than chunk threshold does not raise BufferChunkOverflowError' do
1128
+ assert_equal [@dm0], @p.stage.keys
1129
+ assert_equal [], @p.queue.map(&:metadata)
1130
+
1131
+ assert_equal 1_280_000, @p.chunk_limit_size
1132
+
1133
+ es = ["a" * 1_000_000, "b" * 1_000_000, "c" * 1_000_000]
1134
+ assert_nothing_raised do
1135
+ @p.write({@dm0 => es})
1136
+ end
1137
+ queue_messages = @p.queue.collect do |chunk|
1138
+ # collect first character of each message
1139
+ chunk.chunk[0]
1140
+ end
1141
+ assert_equal([
1142
+ [@dm0],
1143
+ 1,
1144
+ "c",
1145
+ [@dm0, @dm0, @dm0],
1146
+ [5000, 1, 1],
1147
+ ["x", "a", "b"]
1148
+ ],
1149
+ [
1150
+ @p.stage.keys,
1151
+ @p.stage[@dm0].size,
1152
+ @p.stage[@dm0].chunk[0],
1153
+ @p.queue.map(&:metadata),
1154
+ @p.queue.map(&:size),
1155
+ queue_messages
1156
+ ])
1157
+ end
1081
1158
  end
1082
1159
 
1083
1160
  sub_test_case 'with configuration for test with lower limits' do
@@ -153,7 +153,7 @@ class FilterPluginTest < Test::Unit::TestCase
153
153
 
154
154
  @p.configure(config_element('ROOT', '', {'@log_level' => 'debug'}))
155
155
 
156
- assert{ @p.log.object_id != original_logger.object_id }
156
+ assert(@p.log.object_id != original_logger.object_id)
157
157
  assert_equal Fluent::Log::LEVEL_DEBUG, @p.log.level
158
158
  end
159
159
 
@@ -1707,6 +1707,41 @@ class TailInputTest < Test::Unit::TestCase
1707
1707
  mock(plugin.router).emit_stream('pre.foo.bar.log.post', anything).once
1708
1708
  plugin.receive_lines(['foo', 'bar'], DummyWatcher.new('foo.bar.log'))
1709
1709
  end
1710
+
1711
+ data(
1712
+ small: ["128", 128],
1713
+ KiB: ["1k", 1024]
1714
+ )
1715
+ test 'max_line_size' do |(label, size)|
1716
+ config = config_element("", "", {
1717
+ "tag" => "max_line_size",
1718
+ "path" => "#{TMP_DIR}/with_long_lines.txt",
1719
+ "format" => "none",
1720
+ "read_from_head" => true,
1721
+ "max_line_size" => label,
1722
+ "log_level" => "debug"
1723
+ })
1724
+ File.open("#{TMP_DIR}/with_long_lines.txt", "w+") do |f|
1725
+ f.puts "foo"
1726
+ f.puts "x" * size # 'x' * size + \n > @max_line_size
1727
+ f.puts "bar"
1728
+ end
1729
+ d = create_driver(config, false)
1730
+ timestamp = Time.parse("Mon Nov 29 11:22:33 UTC 2021")
1731
+ Timecop.freeze(timestamp)
1732
+ d.run(expect_records: 2)
1733
+ assert_equal([
1734
+ [{"message" => "foo"},{"message" => "bar"}],
1735
+ [
1736
+ "2021-11-29 11:22:33 +0000 [warn]: received line length is longer than #{size}\n",
1737
+ "2021-11-29 11:22:33 +0000 [debug]: skipped line: #{'x' * size}\n"
1738
+ ]
1739
+ ],
1740
+ [
1741
+ d.events.collect { |event| event.last },
1742
+ d.logs[-2..]
1743
+ ])
1744
+ end
1710
1745
  end
1711
1746
 
1712
1747
  # Ensure that no fatal exception is raised when a file is missing and that
@@ -73,7 +73,7 @@ class InputTest < Test::Unit::TestCase
73
73
 
74
74
  @p.configure(config_element('ROOT', '', {'@log_level' => 'debug'}))
75
75
 
76
- assert{ @p.log.object_id != original_logger.object_id }
76
+ assert(@p.log.object_id != original_logger.object_id)
77
77
  assert_equal Fluent::Log::LEVEL_DEBUG, @p.log.level
78
78
  end
79
79
 
@@ -243,7 +243,7 @@ class ExecOutputTest < Test::Unit::TestCase
243
243
  sub_test_case 'when executed process dies unexpectedly' do
244
244
  setup do
245
245
  @gen_config = ->(num){ <<EOC
246
- command ruby -e "ARGV.first.to_i == 0 ? open(ARGV[1]){|f| STDOUT.write f.read} : (sleep 1 ; exit ARGV.first.to_i)" #{num} >#{TMP_DIR}/fail_out
246
+ command ruby -e "ARGV.first.to_i == 0 ? open(ARGV[1]){|f| STDOUT.write(f.read); STDOUT.flush} : (sleep 1 ; exit ARGV.first.to_i)" #{num} >#{TMP_DIR}/fail_out
247
247
  <inject>
248
248
  tag_key tag
249
249
  time_key time
@@ -265,7 +265,7 @@ EOC
265
265
  expect_path = "#{TMP_DIR}/fail_out"
266
266
 
267
267
  d.end_if{ File.exist?(expect_path) }
268
- d.run(default_tag: 'test', flush: true, wait_flush_completion: false, shutdown: false) do
268
+ d.run(default_tag: 'test', flush: true, wait_flush_completion: true, shutdown: false) do
269
269
  d.feed(time, records[0])
270
270
  d.feed(time, records[1])
271
271
  end
@@ -281,7 +281,8 @@ EOC
281
281
  assert{ d.instance.buffer.queue.empty? }
282
282
  assert{ d.instance.dequeued_chunks.empty? }
283
283
 
284
- d.instance_shutdown
284
+ ensure
285
+ d.instance_shutdown if d && d.instance
285
286
  end
286
287
 
287
288
  test 'flushed chunk will be taken back after child process unexpectedly exits' do
@@ -304,7 +305,8 @@ EOC
304
305
 
305
306
  assert{ File.exist?(expect_path) && File.size(expect_path) == 0 }
306
307
 
307
- d.instance_shutdown
308
+ ensure
309
+ d.instance_shutdown if d && d.instance
308
310
  end
309
311
  end
310
312
  end
@@ -394,6 +394,11 @@ class FileOutputTest < Test::Unit::TestCase
394
394
  assert_equal expect, result
395
395
  end
396
396
 
397
+ def check_result(path, expect)
398
+ result = File.read(path, mode: "rb")
399
+ assert_equal expect, result
400
+ end
401
+
397
402
  sub_test_case 'write' do
398
403
  test 'basic case' do
399
404
  d = create_driver
@@ -535,20 +540,27 @@ class FileOutputTest < Test::Unit::TestCase
535
540
  assert_equal 3, Dir.glob("#{TMP_DIR}/out_file_test.*").size
536
541
  end
537
542
 
538
- test 'append' do
543
+ data(
544
+ "with compression" => true,
545
+ "without compression" => false,
546
+ )
547
+ test 'append' do |compression|
539
548
  time = event_time("2011-01-02 13:14:15 UTC")
540
549
  formatted_lines = %[2011-01-02T13:14:15Z\ttest\t{"a":1}#{@default_newline}] + %[2011-01-02T13:14:15Z\ttest\t{"a":2}#{@default_newline}]
541
550
 
542
551
  write_once = ->(){
543
- d = create_driver %[
552
+ config = %[
544
553
  path #{TMP_DIR}/out_file_test
545
- compress gz
546
554
  utc
547
555
  append true
548
556
  <buffer>
549
557
  timekey_use_utc true
550
558
  </buffer>
551
559
  ]
560
+ if compression
561
+ config << " compress gz"
562
+ end
563
+ d = create_driver(config)
552
564
  d.run(default_tag: 'test'){
553
565
  d.feed(time, {"a"=>1})
554
566
  d.feed(time, {"a"=>2})
@@ -556,17 +568,21 @@ class FileOutputTest < Test::Unit::TestCase
556
568
  d.instance.last_written_path
557
569
  }
558
570
 
559
- path = write_once.call
560
- assert_equal "#{TMP_DIR}/out_file_test.20110102.log.gz", path
561
- check_gzipped_result(path, formatted_lines)
562
-
563
- path = write_once.call
564
- assert_equal "#{TMP_DIR}/out_file_test.20110102.log.gz", path
565
- check_gzipped_result(path, formatted_lines * 2)
571
+ log_file_name = "out_file_test.20110102.log"
572
+ if compression
573
+ log_file_name << ".gz"
574
+ end
566
575
 
567
- path = write_once.call
568
- assert_equal "#{TMP_DIR}/out_file_test.20110102.log.gz", path
569
- check_gzipped_result(path, formatted_lines * 3)
576
+ 1.upto(3) do |i|
577
+ path = write_once.call
578
+ assert_equal "#{TMP_DIR}/#{log_file_name}", path
579
+ expect = formatted_lines * i
580
+ if compression
581
+ check_gzipped_result(path, expect)
582
+ else
583
+ check_result(path, expect)
584
+ end
585
+ end
570
586
  end
571
587
 
572
588
  test 'append when JST' do
@@ -775,7 +775,7 @@ class BufferedOutputSecondaryTest < Test::Unit::TestCase
775
775
  assert_equal [ 'test.tag.1', event_time('2016-04-13 18:33:13').to_i, {"name" => "moris", "age" => 36, "message" => "data2"} ], written[1]
776
776
  assert_equal [ 'test.tag.1', event_time('2016-04-13 18:33:32').to_i, {"name" => "moris", "age" => 36, "message" => "data3"} ], written[2]
777
777
 
778
- assert{ @i.log.out.logs.any?{|l| l.include?("[warn]: retry succeeded by secondary.") } }
778
+ assert(@i.log.out.logs.any?{|l| l.include?("[warn]: retry succeeded by secondary.") })
779
779
  end
780
780
 
781
781
  test 'exponential backoff interval will be initialized when switched to secondary' do
@@ -90,8 +90,8 @@ class TimerTest < Test::Unit::TestCase
90
90
  assert{ counter1 >= 4 && counter1 <= 5 }
91
91
  assert{ counter2 == 2 }
92
92
  msg = "Unexpected error raised. Stopping the timer. title=:t2"
93
- assert{ d1.log.out.logs.any?{|line| line.include?("[error]:") && line.include?(msg) && line.include?("abort!!!!!!") } }
94
- assert{ d1.log.out.logs.any?{|line| line.include?("[error]:") && line.include?("Timer detached. title=:t2") } }
93
+ assert(d1.log.out.logs.any?{|line| line.include?("[error]:") && line.include?(msg) && line.include?("abort!!!!!!") })
94
+ assert(d1.log.out.logs.any?{|line| line.include?("[error]:") && line.include?("Timer detached. title=:t2") })
95
95
 
96
96
  d1.shutdown; d1.close; d1.terminate
97
97
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: fluentd
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.14.3
4
+ version: 1.14.4
5
5
  platform: x86-mingw32
6
6
  authors:
7
7
  - Sadayuki Furuhashi
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2021-11-26 00:00:00.000000000 Z
11
+ date: 2022-01-06 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler
@@ -1035,7 +1035,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
1035
1035
  - !ruby/object:Gem::Version
1036
1036
  version: '0'
1037
1037
  requirements: []
1038
- rubygems_version: 3.1.6
1038
+ rubygems_version: 3.1.2
1039
1039
  signing_key:
1040
1040
  specification_version: 4
1041
1041
  summary: Fluentd event collector