fluentd 1.16.1 → 1.16.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (49) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +37 -0
  3. data/fluentd.gemspec +1 -1
  4. data/lib/fluent/command/ctl.rb +2 -2
  5. data/lib/fluent/command/plugin_config_formatter.rb +1 -1
  6. data/lib/fluent/config/dsl.rb +1 -1
  7. data/lib/fluent/config/v1_parser.rb +2 -2
  8. data/lib/fluent/counter/server.rb +1 -1
  9. data/lib/fluent/counter/validator.rb +3 -3
  10. data/lib/fluent/engine.rb +1 -1
  11. data/lib/fluent/event.rb +6 -2
  12. data/lib/fluent/log.rb +9 -0
  13. data/lib/fluent/match.rb +1 -1
  14. data/lib/fluent/msgpack_factory.rb +6 -1
  15. data/lib/fluent/plugin/base.rb +1 -1
  16. data/lib/fluent/plugin/filter_record_transformer.rb +1 -1
  17. data/lib/fluent/plugin/in_forward.rb +1 -1
  18. data/lib/fluent/plugin/in_http.rb +8 -8
  19. data/lib/fluent/plugin/in_sample.rb +1 -1
  20. data/lib/fluent/plugin/in_tail/position_file.rb +32 -18
  21. data/lib/fluent/plugin/in_tail.rb +58 -24
  22. data/lib/fluent/plugin/out_exec_filter.rb +2 -2
  23. data/lib/fluent/plugin/output.rb +1 -1
  24. data/lib/fluent/plugin/parser_json.rb +1 -1
  25. data/lib/fluent/plugin_helper/event_loop.rb +2 -2
  26. data/lib/fluent/plugin_helper/record_accessor.rb +1 -1
  27. data/lib/fluent/plugin_helper/thread.rb +3 -3
  28. data/lib/fluent/plugin_id.rb +1 -1
  29. data/lib/fluent/supervisor.rb +1 -1
  30. data/lib/fluent/version.rb +1 -1
  31. data/test/plugin/in_tail/test_position_file.rb +31 -1
  32. data/test/plugin/test_base.rb +1 -1
  33. data/test/plugin/test_buffer_chunk.rb +11 -0
  34. data/test/plugin/test_in_forward.rb +9 -9
  35. data/test/plugin/test_in_tail.rb +379 -0
  36. data/test/plugin/test_in_unix.rb +2 -2
  37. data/test/plugin/test_multi_output.rb +1 -1
  38. data/test/plugin/test_out_exec_filter.rb +2 -2
  39. data/test/plugin/test_out_file.rb +2 -2
  40. data/test/plugin/test_output.rb +12 -12
  41. data/test/plugin/test_output_as_buffered.rb +44 -44
  42. data/test/plugin/test_output_as_buffered_retries.rb +1 -1
  43. data/test/plugin/test_output_as_buffered_secondary.rb +2 -2
  44. data/test/plugin_helper/test_child_process.rb +2 -2
  45. data/test/plugin_helper/test_server.rb +1 -1
  46. data/test/test_log.rb +38 -1
  47. data/test/test_msgpack_factory.rb +32 -0
  48. data/test/test_supervisor.rb +13 -0
  49. metadata +4 -4
@@ -101,16 +101,16 @@ module Fluent
101
101
  end
102
102
 
103
103
  def thread_exist?(title)
104
- @_threads.values.select{|thread| title == thread[:_fluentd_plugin_helper_thread_title] }.size > 0
104
+ @_threads.values.count{|thread| title == thread[:_fluentd_plugin_helper_thread_title] } > 0
105
105
  end
106
106
 
107
107
  def thread_started?(title)
108
- t = @_threads.values.select{|thread| title == thread[:_fluentd_plugin_helper_thread_title] }.first
108
+ t = @_threads.values.find{|thread| title == thread[:_fluentd_plugin_helper_thread_title] }
109
109
  t && t[:_fluentd_plugin_helper_thread_started]
110
110
  end
111
111
 
112
112
  def thread_running?(title)
113
- t = @_threads.values.select{|thread| title == thread[:_fluentd_plugin_helper_thread_title] }.first
113
+ t = @_threads.values.find{|thread| title == thread[:_fluentd_plugin_helper_thread_title] }
114
114
  t && t[:_fluentd_plugin_helper_thread_running]
115
115
  end
116
116
 
@@ -49,7 +49,7 @@ module Fluent
49
49
  # Thread::Backtrace::Location#path returns base filename or absolute path.
50
50
  # #absolute_path returns absolute_path always.
51
51
  # https://bugs.ruby-lang.org/issues/12159
52
- if location.absolute_path =~ /\/test_[^\/]+\.rb$/ # location.path =~ /test_.+\.rb$/
52
+ if /\/test_[^\/]+\.rb$/.match?(location.absolute_path) # location.path =~ /test_.+\.rb$/
53
53
  return true
54
54
  end
55
55
  end
@@ -697,7 +697,7 @@ module Fluent
697
697
  actual_log_path = @log_path
698
698
 
699
699
  # We need to prepare a unique path for each worker since Windows locks files.
700
- if Fluent.windows? && rotate
700
+ if Fluent.windows? && rotate && @log_path && @log_path != "-"
701
701
  actual_log_path = Fluent::Log.per_process_path(@log_path, process_type, worker_id)
702
702
  end
703
703
 
@@ -16,6 +16,6 @@
16
16
 
17
17
  module Fluent
18
18
 
19
- VERSION = '1.16.1'
19
+ VERSION = '1.16.2'
20
20
 
21
21
  end
@@ -26,6 +26,10 @@ class IntailPositionFileTest < Test::Unit::TestCase
26
26
  "valid_path" => Fluent::Plugin::TailInput::TargetInfo.new("valid_path", 1),
27
27
  "inode23bit" => Fluent::Plugin::TailInput::TargetInfo.new("inode23bit", 0),
28
28
  }
29
+ TEST_CONTENT_INODES = {
30
+ 1 => Fluent::Plugin::TailInput::TargetInfo.new("valid_path", 1),
31
+ 0 => Fluent::Plugin::TailInput::TargetInfo.new("inode23bit", 0),
32
+ }
29
33
 
30
34
  def write_data(f, content)
31
35
  f.write(content)
@@ -221,7 +225,7 @@ class IntailPositionFileTest < Test::Unit::TestCase
221
225
  end
222
226
 
223
227
  sub_test_case '#unwatch' do
224
- test 'deletes entry by path' do
228
+ test 'unwatch entry by path' do
225
229
  write_data(@file, TEST_CONTENT)
226
230
  pf = Fluent::Plugin::TailInput::PositionFile.load(@file, false, {}, logger: $log)
227
231
  inode1 = File.stat(@file).ino
@@ -239,6 +243,32 @@ class IntailPositionFileTest < Test::Unit::TestCase
239
243
 
240
244
  assert_not_equal p1, p2
241
245
  end
246
+
247
+ test 'unwatch entries by inode' do
248
+ write_data(@file, TEST_CONTENT)
249
+ pf = Fluent::Plugin::TailInput::PositionFile.load(@file, true, TEST_CONTENT_INODES, logger: $log)
250
+
251
+ existing_targets = TEST_CONTENT_INODES.select do |inode, target_info|
252
+ inode == 1
253
+ end
254
+ pe_to_unwatch = pf[TEST_CONTENT_INODES[0]]
255
+
256
+ pf.unwatch_removed_targets(existing_targets)
257
+
258
+ assert_equal(
259
+ {
260
+ map_keys: [TEST_CONTENT_INODES[1].ino],
261
+ unwatched_pe_pos: Fluent::Plugin::TailInput::PositionFile::UNWATCHED_POSITION,
262
+ },
263
+ {
264
+ map_keys: pf.instance_variable_get(:@map).keys,
265
+ unwatched_pe_pos: pe_to_unwatch.read_pos,
266
+ }
267
+ )
268
+
269
+ unwatched_pe_retaken = pf[TEST_CONTENT_INODES[0]]
270
+ assert_not_equal pe_to_unwatch, unwatched_pe_retaken
271
+ end
242
272
  end
243
273
 
244
274
  sub_test_case 'FilePositionEntry' do
@@ -108,7 +108,7 @@ class BaseTest < Test::Unit::TestCase
108
108
  @p.extend m
109
109
  assert_equal [], logger.logs
110
110
 
111
- ret = @p.string_safe_encoding("abc\xff.\x01f"){|s| s.split(/\./) }
111
+ ret = @p.string_safe_encoding("abc\xff.\x01f"){|s| s.split(".") }
112
112
  assert_equal ['abc?', "\u0001f"], ret
113
113
  assert_equal 1, logger.logs.size
114
114
  assert{ logger.logs.first.include?("invalid byte sequence is replaced in ") }
@@ -57,6 +57,17 @@ class BufferChunkTest < Test::Unit::TestCase
57
57
  assert chunk.respond_to?(:msgpack_each)
58
58
  end
59
59
 
60
+ test 'unpacker arg is not implemented for ChunkMessagePackEventStreamer' do
61
+ meta = Object.new
62
+ chunk = Fluent::Plugin::Buffer::Chunk.new(meta)
63
+ chunk.extend Fluent::ChunkMessagePackEventStreamer
64
+
65
+ unpacker = Fluent::MessagePackFactory.thread_local_msgpack_unpacker
66
+
67
+ assert_raise(NotImplementedError){ chunk.each(unpacker: unpacker) }
68
+ assert_raise(NotImplementedError){ chunk.msgpack_each(unpacker: unpacker) }
69
+ end
70
+
60
71
  test 'some methods raise ArgumentError with an option of `compressed: :gzip` and without extending Compressble`' do
61
72
  meta = Object.new
62
73
  chunk = Fluent::Plugin::Buffer::Chunk.new(meta)
@@ -367,7 +367,7 @@ class ForwardInputTest < Test::Unit::TestCase
367
367
  end
368
368
 
369
369
  logs = d.instance.log.out.logs
370
- assert{ logs.select{|line| line =~ /skip invalid event/ }.size == 2 }
370
+ assert{ logs.count{|line| line =~ /skip invalid event/ } == 2 }
371
371
 
372
372
  d.instance_shutdown
373
373
  end
@@ -593,10 +593,10 @@ class ForwardInputTest < Test::Unit::TestCase
593
593
 
594
594
  # check log
595
595
  logs = d.instance.log.logs
596
- assert_equal 1, logs.select{|line|
596
+ assert_equal 1, logs.count{|line|
597
597
  line =~ / \[warn\]: Input chunk size is larger than 'chunk_size_warn_limit':/ &&
598
598
  line =~ / tag="test.tag" host="#{LOCALHOST_HOSTNAME}" limit=16777216 size=16777501/
599
- }.size, "large chunk warning is not logged"
599
+ }, "large chunk warning is not logged"
600
600
 
601
601
  d.instance_shutdown
602
602
  end
@@ -619,10 +619,10 @@ class ForwardInputTest < Test::Unit::TestCase
619
619
 
620
620
  # check log
621
621
  logs = d.instance.log.logs
622
- assert_equal 1, logs.select{ |line|
622
+ assert_equal 1, logs.count{ |line|
623
623
  line =~ / \[warn\]: Input chunk size is larger than 'chunk_size_warn_limit':/ &&
624
624
  line =~ / tag="test.tag" host="#{LOCALHOST_HOSTNAME}" limit=16777216 size=16777501/
625
- }.size, "large chunk warning is not logged"
625
+ }, "large chunk warning is not logged"
626
626
 
627
627
  d.instance_shutdown
628
628
  end
@@ -653,10 +653,10 @@ class ForwardInputTest < Test::Unit::TestCase
653
653
 
654
654
  # check log
655
655
  logs = d.instance.log.logs
656
- assert_equal 1, logs.select{|line|
656
+ assert_equal 1, logs.count{|line|
657
657
  line =~ / \[warn\]: Input chunk size is larger than 'chunk_size_limit', dropped:/ &&
658
658
  line =~ / tag="test.tag" host="#{LOCALHOST_HOSTNAME}" limit=33554432 size=33554989/
659
- }.size, "large chunk warning is not logged"
659
+ }, "large chunk warning is not logged"
660
660
 
661
661
  d.instance_shutdown
662
662
  end
@@ -676,9 +676,9 @@ class ForwardInputTest < Test::Unit::TestCase
676
676
 
677
677
  # check log
678
678
  logs = d.instance.log.logs
679
- assert_equal 1, logs.select{|line|
679
+ assert_equal 1, logs.count{|line|
680
680
  line =~ / \[warn\]: incoming chunk is broken: host="#{LOCALHOST_HOSTNAME}" msg=#{data.inspect}/
681
- }.size, "should not accept broken chunk"
681
+ }, "should not accept broken chunk"
682
682
 
683
683
  d.instance_shutdown
684
684
  end
@@ -2638,4 +2638,383 @@ class TailInputTest < Test::Unit::TestCase
2638
2638
  end
2639
2639
  end
2640
2640
  end
2641
+
2642
+ sub_test_case "Update watchers for rotation with follow_inodes" do
2643
+ def test_updateTW_before_refreshTW_and_detach_before_refreshTW
2644
+ config = config_element(
2645
+ "ROOT",
2646
+ "",
2647
+ {
2648
+ "path" => "#{@tmp_dir}/tail.txt*",
2649
+ "pos_file" => "#{@tmp_dir}/tail.pos",
2650
+ "tag" => "t1",
2651
+ "format" => "none",
2652
+ "read_from_head" => "true",
2653
+ "follow_inodes" => "true",
2654
+ # In order to detach the old watcher quickly.
2655
+ "rotate_wait" => "1s",
2656
+ # In order to reproduce the same condition stably, ensure that `refresh_watchers` is not
2657
+ # called by a timer.
2658
+ "refresh_interval" => "1h",
2659
+ # stat_watcher often calls `TailWatcher::on_notify` faster than creating a new log file,
2660
+ # so disable it in order to reproduce the same condition stably.
2661
+ "enable_stat_watcher" => "false",
2662
+ }
2663
+ )
2664
+ d = create_driver(config, false)
2665
+
2666
+ tail_watchers = []
2667
+ stub.proxy(d.instance).setup_watcher do |tw|
2668
+ tail_watchers.append(tw)
2669
+ tw
2670
+ end
2671
+
2672
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "wb") {|f| f.puts "file1 log1"}
2673
+
2674
+ d.run(expect_records: 4, timeout: 10) do
2675
+ # Rotate (If the timing is bad, `TailWatcher::on_notify` might be called between mv and new-file-creation)
2676
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "ab") {|f| f.puts "file1 log2"}
2677
+ FileUtils.move("#{@tmp_dir}/tail.txt", "#{@tmp_dir}/tail.txt" + "1")
2678
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "wb") {|f| f.puts "file2 log1"}
2679
+
2680
+ # `watch_timer` calls `TailWatcher::on_notify`, and then `update_watcher` updates the TailWatcher:
2681
+ # TailWatcher(path: "tail.txt", inode: inode_0) => TailWatcher(path: "tail.txt", inode: inode_1)
2682
+ # The old TailWathcer is detached here since `rotate_wait` is just `1s`.
2683
+ sleep 3
2684
+
2685
+ # This reproduces the following situation:
2686
+ # Rotation => update_watcher => refresh_watchers
2687
+ # This adds a new TailWatcher: TailWatcher(path: "tail.txt1", inode: inode_0)
2688
+ d.instance.refresh_watchers
2689
+
2690
+ # Append to the new current log file.
2691
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "ab") {|f| f.puts "file2 log2"}
2692
+ end
2693
+
2694
+ inode_0 = tail_watchers[0].ino
2695
+ inode_1 = tail_watchers[1].ino
2696
+ record_values = d.events.collect { |event| event[2]["message"] }.sort
2697
+ position_entries = []
2698
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.pos", "r") do |f|
2699
+ f.readlines(chomp: true).each do |line|
2700
+ values = line.split("\t")
2701
+ position_entries.append([values[0], values[1], values[2].to_i(16)])
2702
+ end
2703
+ end
2704
+
2705
+ assert_equal(
2706
+ {
2707
+ record_values: ["file1 log1", "file1 log2", "file2 log1", "file2 log2"],
2708
+ tail_watcher_paths: ["#{@tmp_dir}/tail.txt", "#{@tmp_dir}/tail.txt", "#{@tmp_dir}/tail.txt1"],
2709
+ tail_watcher_inodes: [inode_0, inode_1, inode_0],
2710
+ tail_watcher_io_handler_opened_statuses: [false, false, false],
2711
+ position_entries: [
2712
+ ["#{@tmp_dir}/tail.txt", "0000000000000016", inode_0],
2713
+ ["#{@tmp_dir}/tail.txt", "0000000000000016", inode_1],
2714
+ ],
2715
+ },
2716
+ {
2717
+ record_values: record_values,
2718
+ tail_watcher_paths: tail_watchers.collect { |tw| tw.path },
2719
+ tail_watcher_inodes: tail_watchers.collect { |tw| tw.ino },
2720
+ tail_watcher_io_handler_opened_statuses: tail_watchers.collect { |tw| tw.instance_variable_get(:@io_handler)&.opened? || false },
2721
+ position_entries: position_entries
2722
+ },
2723
+ )
2724
+ end
2725
+
2726
+ def test_updateTW_before_refreshTW_and_detach_after_refreshTW
2727
+ config = config_element(
2728
+ "ROOT",
2729
+ "",
2730
+ {
2731
+ "path" => "#{@tmp_dir}/tail.txt*",
2732
+ "pos_file" => "#{@tmp_dir}/tail.pos",
2733
+ "tag" => "t1",
2734
+ "format" => "none",
2735
+ "read_from_head" => "true",
2736
+ "follow_inodes" => "true",
2737
+ # In order to detach the old watcher after refresh_watchers.
2738
+ "rotate_wait" => "4s",
2739
+ # In order to reproduce the same condition stably, ensure that `refresh_watchers` is not
2740
+ # called by a timer.
2741
+ "refresh_interval" => "1h",
2742
+ # stat_watcher often calls `TailWatcher::on_notify` faster than creating a new log file,
2743
+ # so disable it in order to reproduce the same condition stably.
2744
+ "enable_stat_watcher" => "false",
2745
+ }
2746
+ )
2747
+ d = create_driver(config, false)
2748
+
2749
+ tail_watchers = []
2750
+ stub.proxy(d.instance).setup_watcher do |tw|
2751
+ tail_watchers.append(tw)
2752
+ tw
2753
+ end
2754
+
2755
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "wb") {|f| f.puts "file1 log1"}
2756
+
2757
+ d.run(expect_records: 4, timeout: 10) do
2758
+ # Rotate (If the timing is bad, `TailWatcher::on_notify` might be called between mv and new-file-creation)
2759
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "ab") {|f| f.puts "file1 log2"}
2760
+ FileUtils.move("#{@tmp_dir}/tail.txt", "#{@tmp_dir}/tail.txt" + "1")
2761
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "wb") {|f| f.puts "file2 log1"}
2762
+
2763
+ # `watch_timer` calls `TailWatcher::on_notify`, and then `update_watcher` updates the TailWatcher:
2764
+ # TailWatcher(path: "tail.txt", inode: inode_0) => TailWatcher(path: "tail.txt", inode: inode_1)
2765
+ sleep 2
2766
+
2767
+ # This reproduces the following situation:
2768
+ # Rotation => update_watcher => refresh_watchers
2769
+ # This adds a new TailWatcher: TailWatcher(path: "tail.txt1", inode: inode_0)
2770
+ d.instance.refresh_watchers
2771
+
2772
+ # The old TailWathcer is detached here since `rotate_wait` is `4s`.
2773
+ sleep 3
2774
+
2775
+ # Append to the new current log file.
2776
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "ab") {|f| f.puts "file2 log2"}
2777
+ end
2778
+
2779
+ inode_0 = tail_watchers[0].ino
2780
+ inode_1 = tail_watchers[1].ino
2781
+ record_values = d.events.collect { |event| event[2]["message"] }.sort
2782
+ position_entries = []
2783
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.pos", "r") do |f|
2784
+ f.readlines(chomp: true).each do |line|
2785
+ values = line.split("\t")
2786
+ position_entries.append([values[0], values[1], values[2].to_i(16)])
2787
+ end
2788
+ end
2789
+
2790
+ assert_equal(
2791
+ {
2792
+ record_values: ["file1 log1", "file1 log2", "file2 log1", "file2 log2"],
2793
+ tail_watcher_paths: ["#{@tmp_dir}/tail.txt", "#{@tmp_dir}/tail.txt", "#{@tmp_dir}/tail.txt1"],
2794
+ tail_watcher_inodes: [inode_0, inode_1, inode_0],
2795
+ tail_watcher_io_handler_opened_statuses: [false, false, false],
2796
+ position_entries: [
2797
+ # The recorded path is old, but it is no problem. The path is not used when using follow_inodes.
2798
+ ["#{@tmp_dir}/tail.txt", "0000000000000016", inode_0],
2799
+ ["#{@tmp_dir}/tail.txt", "0000000000000016", inode_1],
2800
+ ],
2801
+ },
2802
+ {
2803
+ record_values: record_values,
2804
+ tail_watcher_paths: tail_watchers.collect { |tw| tw.path },
2805
+ tail_watcher_inodes: tail_watchers.collect { |tw| tw.ino },
2806
+ tail_watcher_io_handler_opened_statuses: tail_watchers.collect { |tw| tw.instance_variable_get(:@io_handler)&.opened? || false },
2807
+ position_entries: position_entries
2808
+ },
2809
+ )
2810
+ end
2811
+
2812
+ # The scenario where in_tail wrongly detaches TailWatcher.
2813
+ # This is reported in https://github.com/fluent/fluentd/issues/4190.
2814
+ def test_updateTW_after_refreshTW
2815
+ config = config_element(
2816
+ "ROOT",
2817
+ "",
2818
+ {
2819
+ "path" => "#{@tmp_dir}/tail.txt*",
2820
+ "pos_file" => "#{@tmp_dir}/tail.pos",
2821
+ "tag" => "t1",
2822
+ "format" => "none",
2823
+ "read_from_head" => "true",
2824
+ "follow_inodes" => "true",
2825
+ # In order to detach the old watcher quickly.
2826
+ "rotate_wait" => "1s",
2827
+ # In order to reproduce the same condition stably, ensure that `refresh_watchers` is not
2828
+ # called by a timer.
2829
+ "refresh_interval" => "1h",
2830
+ # stat_watcher often calls `TailWatcher::on_notify` faster than creating a new log file,
2831
+ # so disable it in order to reproduce the same condition stably.
2832
+ "enable_stat_watcher" => "false",
2833
+ }
2834
+ )
2835
+ d = create_driver(config, false)
2836
+
2837
+ tail_watchers = []
2838
+ stub.proxy(d.instance).setup_watcher do |tw|
2839
+ tail_watchers.append(tw)
2840
+ tw
2841
+ end
2842
+
2843
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "wb") {|f| f.puts "file1 log1"}
2844
+
2845
+ d.run(expect_records: 4, timeout: 10) do
2846
+ # Rotate (If the timing is bad, `TailWatcher::on_notify` might be called between mv and new-file-creation)
2847
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "ab") {|f| f.puts "file1 log2"}
2848
+ FileUtils.move("#{@tmp_dir}/tail.txt", "#{@tmp_dir}/tail.txt" + "1")
2849
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "wb") {|f| f.puts "file2 log1"}
2850
+
2851
+ # This reproduces the following situation:
2852
+ # Rotation => refresh_watchers => update_watcher
2853
+ # This add a new TailWatcher: TailWatcher(path: "tail.txt", inode: inode_1)
2854
+ # This overwrites `@tails["tail.txt"]`
2855
+ d.instance.refresh_watchers
2856
+
2857
+ # `watch_timer` calls `TailWatcher::on_notify`, and then `update_watcher` trys to update the TailWatcher:
2858
+ # TailWatcher(path: "tail.txt", inode: inode_0) => TailWatcher(path: "tail.txt", inode: inode_1)
2859
+ # However, it is already added in `refresh_watcher`, so `update_watcher` doesn't create the new TailWatcher.
2860
+ # The old TailWathcer is detached here since `rotate_wait` is just `1s`.
2861
+ sleep 3
2862
+
2863
+ # This adds a new TailWatcher: TailWatcher(path: "tail.txt1", inode: inode_0)
2864
+ d.instance.refresh_watchers
2865
+
2866
+ # Append to the new current log file.
2867
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "ab") {|f| f.puts "file2 log2"}
2868
+ end
2869
+
2870
+ inode_0 = tail_watchers[0].ino
2871
+ inode_1 = tail_watchers[1].ino
2872
+ record_values = d.events.collect { |event| event[2]["message"] }.sort
2873
+ position_entries = []
2874
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.pos", "r") do |f|
2875
+ f.readlines(chomp: true).each do |line|
2876
+ values = line.split("\t")
2877
+ position_entries.append([values[0], values[1], values[2].to_i(16)])
2878
+ end
2879
+ end
2880
+
2881
+ assert_equal(
2882
+ {
2883
+ record_values: ["file1 log1", "file1 log2", "file2 log1", "file2 log2"],
2884
+ tail_watcher_paths: ["#{@tmp_dir}/tail.txt", "#{@tmp_dir}/tail.txt", "#{@tmp_dir}/tail.txt1"],
2885
+ tail_watcher_inodes: [inode_0, inode_1, inode_0],
2886
+ tail_watcher_io_handler_opened_statuses: [false, false, false],
2887
+ position_entries: [
2888
+ # The recorded path is old, but it is no problem. The path is not used when using follow_inodes.
2889
+ ["#{@tmp_dir}/tail.txt", "0000000000000016", inode_0],
2890
+ ["#{@tmp_dir}/tail.txt", "0000000000000016", inode_1],
2891
+ ],
2892
+ },
2893
+ {
2894
+ record_values: record_values,
2895
+ tail_watcher_paths: tail_watchers.collect { |tw| tw.path },
2896
+ tail_watcher_inodes: tail_watchers.collect { |tw| tw.ino },
2897
+ tail_watcher_io_handler_opened_statuses: tail_watchers.collect { |tw| tw.instance_variable_get(:@io_handler)&.opened? || false },
2898
+ position_entries: position_entries
2899
+ },
2900
+ )
2901
+ end
2902
+
2903
+ def test_path_resurrection
2904
+ config = config_element(
2905
+ "ROOT",
2906
+ "",
2907
+ {
2908
+ "path" => "#{@tmp_dir}/tail.txt*",
2909
+ "pos_file" => "#{@tmp_dir}/tail.pos",
2910
+ "tag" => "t1",
2911
+ "format" => "none",
2912
+ "read_from_head" => "true",
2913
+ "follow_inodes" => "true",
2914
+ # In order to reproduce the same condition stably, ensure that `refresh_watchers` is not
2915
+ # called by a timer.
2916
+ "refresh_interval" => "1h",
2917
+ # https://github.com/fluent/fluentd/pull/4237#issuecomment-1633358632
2918
+ # Because of this problem, log duplication can occur during `rotate_wait`.
2919
+ # Need to set `rotate_wait 0` for a workaround.
2920
+ "rotate_wait" => "0s",
2921
+ }
2922
+ )
2923
+ d = create_driver(config, false)
2924
+
2925
+ tail_watchers = []
2926
+ stub.proxy(d.instance).setup_watcher do |tw|
2927
+ tail_watchers.append(tw)
2928
+ tw
2929
+ end
2930
+
2931
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "wb") {|f| f.puts "file1 log1"}
2932
+
2933
+ d.run(expect_records: 5, timeout: 10) do
2934
+ # Rotate
2935
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "ab") {|f| f.puts "file1 log2"}
2936
+ FileUtils.move("#{@tmp_dir}/tail.txt", "#{@tmp_dir}/tail.txt" + "1")
2937
+ # TailWatcher(path: "tail.txt", inode: inode_0) detects `tail.txt` disappeared.
2938
+ # Call `update_watcher` to stop and discard self.
2939
+ # If not discarding, then it will be a orphan and cause leak and log duplication.
2940
+ #
2941
+ # This reproduces the case where the notify to TailWatcher comes before the new file for the path
2942
+ # is created during rotation.
2943
+ # (stat_watcher notifies faster than a new file is created)
2944
+ # Overall, this is a rotation operation, but from the TailWatcher, it appears as if the file
2945
+ # was resurrected once it disappeared.
2946
+ sleep 2 # On Windows and macOS, StatWatcher doesn't work, so need enough interval for TimeTrigger.
2947
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "wb") {|f| f.puts "file2 log1"}
2948
+
2949
+ # Add new TailWatchers
2950
+ # tail.txt: TailWatcher(path: "tail.txt", inode: inode_1)
2951
+ # tail.txt: TailWatcher(path: "tail.txt1", inode: inode_0)
2952
+ # NOTE: If not discarding the first TailWatcher on notify, this makes it a orphan because
2953
+ # this overwrites the `@tails[tail.txt]` by adding TailWatcher(path: "tail.txt", inode: inode_1)
2954
+ d.instance.refresh_watchers
2955
+
2956
+ # This does nothing.
2957
+ # NOTE: If not discarding the first TailWatcher on notify, this add
2958
+ # tail.txt1: TailWatcher(path: "tail.txt1", inode: inode_0)
2959
+ # because the previous refresh_watcher overwrites `@tails[tail.txt]` and the inode_0 is lost.
2960
+ # This would cause log duplication.
2961
+ d.instance.refresh_watchers
2962
+
2963
+ # Append to the old file
2964
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt1", "ab") {|f| f.puts "file1 log3"}
2965
+
2966
+ # Append to the new current log file.
2967
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "ab") {|f| f.puts "file2 log2"}
2968
+ end
2969
+
2970
+ inode_0 = Fluent::FileWrapper.stat("#{@tmp_dir}/tail.txt1").ino
2971
+ inode_1 = Fluent::FileWrapper.stat("#{@tmp_dir}/tail.txt").ino
2972
+ record_values = d.events.collect { |event| event[2]["message"] }.sort
2973
+ position_entries = []
2974
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.pos", "r") do |f|
2975
+ f.readlines(chomp: true).each do |line|
2976
+ values = line.split("\t")
2977
+ position_entries.append([values[0], values[1], values[2].to_i(16)])
2978
+ end
2979
+ end
2980
+
2981
+ assert_equal(
2982
+ {
2983
+ record_values: ["file1 log1", "file1 log2", "file1 log3", "file2 log1", "file2 log2"],
2984
+ tail_watcher_set: Set[
2985
+ {
2986
+ path: "#{@tmp_dir}/tail.txt",
2987
+ inode: inode_0,
2988
+ io_handler_opened_status: false,
2989
+ },
2990
+ {
2991
+ path: "#{@tmp_dir}/tail.txt",
2992
+ inode: inode_1,
2993
+ io_handler_opened_status: false,
2994
+ },
2995
+ {
2996
+ path: "#{@tmp_dir}/tail.txt1",
2997
+ inode: inode_0,
2998
+ io_handler_opened_status: false,
2999
+ },
3000
+ ],
3001
+ position_entries: [
3002
+ ["#{@tmp_dir}/tail.txt", "0000000000000021", inode_0],
3003
+ ["#{@tmp_dir}/tail.txt", "0000000000000016", inode_1],
3004
+ ],
3005
+ },
3006
+ {
3007
+ record_values: record_values,
3008
+ tail_watcher_set: Set.new(tail_watchers.collect { |tw|
3009
+ {
3010
+ path: tw.path,
3011
+ inode: tw.ino,
3012
+ io_handler_opened_status: tw.instance_variable_get(:@io_handler)&.opened? || false,
3013
+ }
3014
+ }),
3015
+ position_entries: position_entries,
3016
+ },
3017
+ )
3018
+ end
3019
+ end
2641
3020
  end
@@ -174,8 +174,8 @@ class UnixInputTest < Test::Unit::TestCase
174
174
  assert_equal 0, @d.events.size
175
175
 
176
176
  logs = @d.instance.log.logs
177
- assert_equal 1, logs.select { |line|
177
+ assert_equal 1, logs.count { |line|
178
178
  line =~ / \[warn\]: incoming data is broken: msg=#{data.inspect}/
179
- }.size, "should not accept broken chunk"
179
+ }, "should not accept broken chunk"
180
180
  end
181
181
  end unless Fluent.windows?
@@ -150,7 +150,7 @@ class MultiOutputTest < Test::Unit::TestCase
150
150
  log_size_for_metrics_plugin_helper = 4
151
151
  expected_warn_log_size = log_size_for_multi_output_itself + log_size_for_metrics_plugin_helper
152
152
  logs = @i.log.out.logs
153
- assert{ logs.select{|log| log.include?('[warn]') && log.include?("'type' is deprecated parameter name. use '@type' instead.") }.size == expected_warn_log_size }
153
+ assert{ logs.count{|log| log.include?('[warn]') && log.include?("'type' is deprecated parameter name. use '@type' instead.") } == expected_warn_log_size }
154
154
  end
155
155
 
156
156
  test '#emit_events calls #process always' do
@@ -597,8 +597,8 @@ class ExecFilterOutputTest < Test::Unit::TestCase
597
597
  # the number of pids should be same with number of child processes
598
598
  assert_equal 2, pid_list.size
599
599
  logs = d.instance.log.out.logs
600
- assert_equal 2, logs.select { |l| l.include?('child process exits with error code') }.size
601
- assert_equal 2, logs.select { |l| l.include?('respawning child process') }.size
600
+ assert_equal 2, logs.count { |l| l.include?('child process exits with error code') }
601
+ assert_equal 2, logs.count { |l| l.include?('respawning child process') }
602
602
 
603
603
  ensure
604
604
  d.run(start: false, shutdown: true)
@@ -264,8 +264,8 @@ class FileOutputTest < Test::Unit::TestCase
264
264
  assert !(Dir.exist?("#{TMP_DIR}/my.data/a"))
265
265
  assert !(Dir.exist?("#{TMP_DIR}/your.data/a"))
266
266
  buffer_files = Dir.entries("#{TMP_DIR}/buf_full").reject{|e| e =~ /^\.+$/ }
267
- assert_equal 2, buffer_files.select{|n| n.end_with?('.meta') }.size
268
- assert_equal 2, buffer_files.select{|n| !n.end_with?('.meta') }.size
267
+ assert_equal 2, buffer_files.count{|n| n.end_with?('.meta') }
268
+ assert_equal 2, buffer_files.count{|n| !n.end_with?('.meta') }
269
269
 
270
270
  m1 = d.instance.metadata('my.data', t1, {"type" => "a"})
271
271
  m2 = d.instance.metadata('your.data', t3, {"type" => "a"})