fluentd 1.15.3-x86-mingw32 → 1.16.2-x86-mingw32
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/ISSUE_TEMPLATE/bug_report.yaml +1 -0
- data/.github/ISSUE_TEMPLATE/feature_request.yaml +1 -0
- data/.github/workflows/linux-test.yaml +2 -2
- data/.github/workflows/macos-test.yaml +2 -2
- data/.github/workflows/stale-actions.yml +24 -0
- data/.github/workflows/windows-test.yaml +2 -2
- data/CHANGELOG.md +151 -0
- data/CONTRIBUTING.md +1 -1
- data/MAINTAINERS.md +5 -3
- data/README.md +0 -1
- data/SECURITY.md +5 -9
- data/fluentd.gemspec +3 -3
- data/lib/fluent/command/ctl.rb +2 -2
- data/lib/fluent/command/fluentd.rb +55 -53
- data/lib/fluent/command/plugin_config_formatter.rb +1 -1
- data/lib/fluent/config/dsl.rb +1 -1
- data/lib/fluent/config/v1_parser.rb +2 -2
- data/lib/fluent/counter/server.rb +1 -1
- data/lib/fluent/counter/validator.rb +3 -3
- data/lib/fluent/daemon.rb +2 -4
- data/lib/fluent/engine.rb +1 -1
- data/lib/fluent/event.rb +8 -4
- data/lib/fluent/log/console_adapter.rb +66 -0
- data/lib/fluent/log.rb +44 -5
- data/lib/fluent/match.rb +1 -1
- data/lib/fluent/msgpack_factory.rb +6 -1
- data/lib/fluent/plugin/base.rb +6 -8
- data/lib/fluent/plugin/buf_file.rb +32 -3
- data/lib/fluent/plugin/buf_file_single.rb +32 -3
- data/lib/fluent/plugin/buffer/file_chunk.rb +1 -1
- data/lib/fluent/plugin/buffer.rb +21 -0
- data/lib/fluent/plugin/filter_record_transformer.rb +1 -1
- data/lib/fluent/plugin/in_forward.rb +1 -1
- data/lib/fluent/plugin/in_http.rb +8 -8
- data/lib/fluent/plugin/in_sample.rb +1 -1
- data/lib/fluent/plugin/in_tail/position_file.rb +32 -18
- data/lib/fluent/plugin/in_tail.rb +58 -24
- data/lib/fluent/plugin/in_tcp.rb +47 -2
- data/lib/fluent/plugin/out_exec_filter.rb +2 -2
- data/lib/fluent/plugin/out_forward/ack_handler.rb +19 -4
- data/lib/fluent/plugin/out_forward.rb +2 -2
- data/lib/fluent/plugin/out_secondary_file.rb +39 -22
- data/lib/fluent/plugin/output.rb +50 -13
- data/lib/fluent/plugin/parser_json.rb +1 -1
- data/lib/fluent/plugin_helper/event_loop.rb +2 -2
- data/lib/fluent/plugin_helper/http_server/server.rb +2 -1
- data/lib/fluent/plugin_helper/record_accessor.rb +1 -1
- data/lib/fluent/plugin_helper/server.rb +8 -0
- data/lib/fluent/plugin_helper/thread.rb +3 -3
- data/lib/fluent/plugin_id.rb +1 -1
- data/lib/fluent/supervisor.rb +157 -251
- data/lib/fluent/test/driver/base.rb +11 -5
- data/lib/fluent/test/driver/filter.rb +4 -0
- data/lib/fluent/test/startup_shutdown.rb +6 -8
- data/lib/fluent/version.rb +1 -1
- data/templates/new_gem/test/helper.rb.erb +0 -1
- data/test/command/test_ctl.rb +1 -1
- data/test/command/test_fluentd.rb +137 -6
- data/test/command/test_plugin_config_formatter.rb +0 -1
- data/test/compat/test_parser.rb +5 -5
- data/test/config/test_system_config.rb +0 -8
- data/test/log/test_console_adapter.rb +110 -0
- data/test/plugin/in_tail/test_position_file.rb +31 -1
- data/test/plugin/out_forward/test_ack_handler.rb +39 -0
- data/test/plugin/test_base.rb +99 -1
- data/test/plugin/test_buf_file.rb +62 -23
- data/test/plugin/test_buf_file_single.rb +65 -0
- data/test/plugin/test_buffer_chunk.rb +11 -0
- data/test/plugin/test_in_forward.rb +9 -9
- data/test/plugin/test_in_http.rb +2 -3
- data/test/plugin/test_in_monitor_agent.rb +2 -3
- data/test/plugin/test_in_tail.rb +379 -0
- data/test/plugin/test_in_tcp.rb +87 -2
- data/test/plugin/test_in_udp.rb +28 -0
- data/test/plugin/test_in_unix.rb +2 -2
- data/test/plugin/test_multi_output.rb +1 -1
- data/test/plugin/test_out_exec_filter.rb +2 -2
- data/test/plugin/test_out_file.rb +2 -2
- data/test/plugin/test_out_forward.rb +14 -18
- data/test/plugin/test_out_http.rb +1 -0
- data/test/plugin/test_output.rb +281 -12
- data/test/plugin/test_output_as_buffered.rb +44 -44
- data/test/plugin/test_output_as_buffered_compress.rb +32 -18
- data/test/plugin/test_output_as_buffered_retries.rb +1 -1
- data/test/plugin/test_output_as_buffered_secondary.rb +2 -2
- data/test/plugin/test_parser_regexp.rb +1 -6
- data/test/plugin_helper/test_child_process.rb +2 -2
- data/test/plugin_helper/test_http_server_helper.rb +1 -1
- data/test/plugin_helper/test_server.rb +60 -6
- data/test/test_config.rb +0 -21
- data/test/test_formatter.rb +23 -20
- data/test/test_log.rb +108 -36
- data/test/test_msgpack_factory.rb +32 -0
- data/test/test_supervisor.rb +287 -279
- metadata +15 -21
- data/.drone.yml +0 -35
- data/.gitlab-ci.yml +0 -103
- data/test/test_logger_initializer.rb +0 -46
data/test/plugin/test_in_tail.rb
CHANGED
@@ -2638,4 +2638,383 @@ class TailInputTest < Test::Unit::TestCase
|
|
2638
2638
|
end
|
2639
2639
|
end
|
2640
2640
|
end
|
2641
|
+
|
2642
|
+
sub_test_case "Update watchers for rotation with follow_inodes" do
|
2643
|
+
def test_updateTW_before_refreshTW_and_detach_before_refreshTW
|
2644
|
+
config = config_element(
|
2645
|
+
"ROOT",
|
2646
|
+
"",
|
2647
|
+
{
|
2648
|
+
"path" => "#{@tmp_dir}/tail.txt*",
|
2649
|
+
"pos_file" => "#{@tmp_dir}/tail.pos",
|
2650
|
+
"tag" => "t1",
|
2651
|
+
"format" => "none",
|
2652
|
+
"read_from_head" => "true",
|
2653
|
+
"follow_inodes" => "true",
|
2654
|
+
# In order to detach the old watcher quickly.
|
2655
|
+
"rotate_wait" => "1s",
|
2656
|
+
# In order to reproduce the same condition stably, ensure that `refresh_watchers` is not
|
2657
|
+
# called by a timer.
|
2658
|
+
"refresh_interval" => "1h",
|
2659
|
+
# stat_watcher often calls `TailWatcher::on_notify` faster than creating a new log file,
|
2660
|
+
# so disable it in order to reproduce the same condition stably.
|
2661
|
+
"enable_stat_watcher" => "false",
|
2662
|
+
}
|
2663
|
+
)
|
2664
|
+
d = create_driver(config, false)
|
2665
|
+
|
2666
|
+
tail_watchers = []
|
2667
|
+
stub.proxy(d.instance).setup_watcher do |tw|
|
2668
|
+
tail_watchers.append(tw)
|
2669
|
+
tw
|
2670
|
+
end
|
2671
|
+
|
2672
|
+
Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "wb") {|f| f.puts "file1 log1"}
|
2673
|
+
|
2674
|
+
d.run(expect_records: 4, timeout: 10) do
|
2675
|
+
# Rotate (If the timing is bad, `TailWatcher::on_notify` might be called between mv and new-file-creation)
|
2676
|
+
Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "ab") {|f| f.puts "file1 log2"}
|
2677
|
+
FileUtils.move("#{@tmp_dir}/tail.txt", "#{@tmp_dir}/tail.txt" + "1")
|
2678
|
+
Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "wb") {|f| f.puts "file2 log1"}
|
2679
|
+
|
2680
|
+
# `watch_timer` calls `TailWatcher::on_notify`, and then `update_watcher` updates the TailWatcher:
|
2681
|
+
# TailWatcher(path: "tail.txt", inode: inode_0) => TailWatcher(path: "tail.txt", inode: inode_1)
|
2682
|
+
# The old TailWathcer is detached here since `rotate_wait` is just `1s`.
|
2683
|
+
sleep 3
|
2684
|
+
|
2685
|
+
# This reproduces the following situation:
|
2686
|
+
# Rotation => update_watcher => refresh_watchers
|
2687
|
+
# This adds a new TailWatcher: TailWatcher(path: "tail.txt1", inode: inode_0)
|
2688
|
+
d.instance.refresh_watchers
|
2689
|
+
|
2690
|
+
# Append to the new current log file.
|
2691
|
+
Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "ab") {|f| f.puts "file2 log2"}
|
2692
|
+
end
|
2693
|
+
|
2694
|
+
inode_0 = tail_watchers[0].ino
|
2695
|
+
inode_1 = tail_watchers[1].ino
|
2696
|
+
record_values = d.events.collect { |event| event[2]["message"] }.sort
|
2697
|
+
position_entries = []
|
2698
|
+
Fluent::FileWrapper.open("#{@tmp_dir}/tail.pos", "r") do |f|
|
2699
|
+
f.readlines(chomp: true).each do |line|
|
2700
|
+
values = line.split("\t")
|
2701
|
+
position_entries.append([values[0], values[1], values[2].to_i(16)])
|
2702
|
+
end
|
2703
|
+
end
|
2704
|
+
|
2705
|
+
assert_equal(
|
2706
|
+
{
|
2707
|
+
record_values: ["file1 log1", "file1 log2", "file2 log1", "file2 log2"],
|
2708
|
+
tail_watcher_paths: ["#{@tmp_dir}/tail.txt", "#{@tmp_dir}/tail.txt", "#{@tmp_dir}/tail.txt1"],
|
2709
|
+
tail_watcher_inodes: [inode_0, inode_1, inode_0],
|
2710
|
+
tail_watcher_io_handler_opened_statuses: [false, false, false],
|
2711
|
+
position_entries: [
|
2712
|
+
["#{@tmp_dir}/tail.txt", "0000000000000016", inode_0],
|
2713
|
+
["#{@tmp_dir}/tail.txt", "0000000000000016", inode_1],
|
2714
|
+
],
|
2715
|
+
},
|
2716
|
+
{
|
2717
|
+
record_values: record_values,
|
2718
|
+
tail_watcher_paths: tail_watchers.collect { |tw| tw.path },
|
2719
|
+
tail_watcher_inodes: tail_watchers.collect { |tw| tw.ino },
|
2720
|
+
tail_watcher_io_handler_opened_statuses: tail_watchers.collect { |tw| tw.instance_variable_get(:@io_handler)&.opened? || false },
|
2721
|
+
position_entries: position_entries
|
2722
|
+
},
|
2723
|
+
)
|
2724
|
+
end
|
2725
|
+
|
2726
|
+
def test_updateTW_before_refreshTW_and_detach_after_refreshTW
|
2727
|
+
config = config_element(
|
2728
|
+
"ROOT",
|
2729
|
+
"",
|
2730
|
+
{
|
2731
|
+
"path" => "#{@tmp_dir}/tail.txt*",
|
2732
|
+
"pos_file" => "#{@tmp_dir}/tail.pos",
|
2733
|
+
"tag" => "t1",
|
2734
|
+
"format" => "none",
|
2735
|
+
"read_from_head" => "true",
|
2736
|
+
"follow_inodes" => "true",
|
2737
|
+
# In order to detach the old watcher after refresh_watchers.
|
2738
|
+
"rotate_wait" => "4s",
|
2739
|
+
# In order to reproduce the same condition stably, ensure that `refresh_watchers` is not
|
2740
|
+
# called by a timer.
|
2741
|
+
"refresh_interval" => "1h",
|
2742
|
+
# stat_watcher often calls `TailWatcher::on_notify` faster than creating a new log file,
|
2743
|
+
# so disable it in order to reproduce the same condition stably.
|
2744
|
+
"enable_stat_watcher" => "false",
|
2745
|
+
}
|
2746
|
+
)
|
2747
|
+
d = create_driver(config, false)
|
2748
|
+
|
2749
|
+
tail_watchers = []
|
2750
|
+
stub.proxy(d.instance).setup_watcher do |tw|
|
2751
|
+
tail_watchers.append(tw)
|
2752
|
+
tw
|
2753
|
+
end
|
2754
|
+
|
2755
|
+
Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "wb") {|f| f.puts "file1 log1"}
|
2756
|
+
|
2757
|
+
d.run(expect_records: 4, timeout: 10) do
|
2758
|
+
# Rotate (If the timing is bad, `TailWatcher::on_notify` might be called between mv and new-file-creation)
|
2759
|
+
Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "ab") {|f| f.puts "file1 log2"}
|
2760
|
+
FileUtils.move("#{@tmp_dir}/tail.txt", "#{@tmp_dir}/tail.txt" + "1")
|
2761
|
+
Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "wb") {|f| f.puts "file2 log1"}
|
2762
|
+
|
2763
|
+
# `watch_timer` calls `TailWatcher::on_notify`, and then `update_watcher` updates the TailWatcher:
|
2764
|
+
# TailWatcher(path: "tail.txt", inode: inode_0) => TailWatcher(path: "tail.txt", inode: inode_1)
|
2765
|
+
sleep 2
|
2766
|
+
|
2767
|
+
# This reproduces the following situation:
|
2768
|
+
# Rotation => update_watcher => refresh_watchers
|
2769
|
+
# This adds a new TailWatcher: TailWatcher(path: "tail.txt1", inode: inode_0)
|
2770
|
+
d.instance.refresh_watchers
|
2771
|
+
|
2772
|
+
# The old TailWathcer is detached here since `rotate_wait` is `4s`.
|
2773
|
+
sleep 3
|
2774
|
+
|
2775
|
+
# Append to the new current log file.
|
2776
|
+
Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "ab") {|f| f.puts "file2 log2"}
|
2777
|
+
end
|
2778
|
+
|
2779
|
+
inode_0 = tail_watchers[0].ino
|
2780
|
+
inode_1 = tail_watchers[1].ino
|
2781
|
+
record_values = d.events.collect { |event| event[2]["message"] }.sort
|
2782
|
+
position_entries = []
|
2783
|
+
Fluent::FileWrapper.open("#{@tmp_dir}/tail.pos", "r") do |f|
|
2784
|
+
f.readlines(chomp: true).each do |line|
|
2785
|
+
values = line.split("\t")
|
2786
|
+
position_entries.append([values[0], values[1], values[2].to_i(16)])
|
2787
|
+
end
|
2788
|
+
end
|
2789
|
+
|
2790
|
+
assert_equal(
|
2791
|
+
{
|
2792
|
+
record_values: ["file1 log1", "file1 log2", "file2 log1", "file2 log2"],
|
2793
|
+
tail_watcher_paths: ["#{@tmp_dir}/tail.txt", "#{@tmp_dir}/tail.txt", "#{@tmp_dir}/tail.txt1"],
|
2794
|
+
tail_watcher_inodes: [inode_0, inode_1, inode_0],
|
2795
|
+
tail_watcher_io_handler_opened_statuses: [false, false, false],
|
2796
|
+
position_entries: [
|
2797
|
+
# The recorded path is old, but it is no problem. The path is not used when using follow_inodes.
|
2798
|
+
["#{@tmp_dir}/tail.txt", "0000000000000016", inode_0],
|
2799
|
+
["#{@tmp_dir}/tail.txt", "0000000000000016", inode_1],
|
2800
|
+
],
|
2801
|
+
},
|
2802
|
+
{
|
2803
|
+
record_values: record_values,
|
2804
|
+
tail_watcher_paths: tail_watchers.collect { |tw| tw.path },
|
2805
|
+
tail_watcher_inodes: tail_watchers.collect { |tw| tw.ino },
|
2806
|
+
tail_watcher_io_handler_opened_statuses: tail_watchers.collect { |tw| tw.instance_variable_get(:@io_handler)&.opened? || false },
|
2807
|
+
position_entries: position_entries
|
2808
|
+
},
|
2809
|
+
)
|
2810
|
+
end
|
2811
|
+
|
2812
|
+
# The scenario where in_tail wrongly detaches TailWatcher.
|
2813
|
+
# This is reported in https://github.com/fluent/fluentd/issues/4190.
|
2814
|
+
def test_updateTW_after_refreshTW
|
2815
|
+
config = config_element(
|
2816
|
+
"ROOT",
|
2817
|
+
"",
|
2818
|
+
{
|
2819
|
+
"path" => "#{@tmp_dir}/tail.txt*",
|
2820
|
+
"pos_file" => "#{@tmp_dir}/tail.pos",
|
2821
|
+
"tag" => "t1",
|
2822
|
+
"format" => "none",
|
2823
|
+
"read_from_head" => "true",
|
2824
|
+
"follow_inodes" => "true",
|
2825
|
+
# In order to detach the old watcher quickly.
|
2826
|
+
"rotate_wait" => "1s",
|
2827
|
+
# In order to reproduce the same condition stably, ensure that `refresh_watchers` is not
|
2828
|
+
# called by a timer.
|
2829
|
+
"refresh_interval" => "1h",
|
2830
|
+
# stat_watcher often calls `TailWatcher::on_notify` faster than creating a new log file,
|
2831
|
+
# so disable it in order to reproduce the same condition stably.
|
2832
|
+
"enable_stat_watcher" => "false",
|
2833
|
+
}
|
2834
|
+
)
|
2835
|
+
d = create_driver(config, false)
|
2836
|
+
|
2837
|
+
tail_watchers = []
|
2838
|
+
stub.proxy(d.instance).setup_watcher do |tw|
|
2839
|
+
tail_watchers.append(tw)
|
2840
|
+
tw
|
2841
|
+
end
|
2842
|
+
|
2843
|
+
Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "wb") {|f| f.puts "file1 log1"}
|
2844
|
+
|
2845
|
+
d.run(expect_records: 4, timeout: 10) do
|
2846
|
+
# Rotate (If the timing is bad, `TailWatcher::on_notify` might be called between mv and new-file-creation)
|
2847
|
+
Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "ab") {|f| f.puts "file1 log2"}
|
2848
|
+
FileUtils.move("#{@tmp_dir}/tail.txt", "#{@tmp_dir}/tail.txt" + "1")
|
2849
|
+
Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "wb") {|f| f.puts "file2 log1"}
|
2850
|
+
|
2851
|
+
# This reproduces the following situation:
|
2852
|
+
# Rotation => refresh_watchers => update_watcher
|
2853
|
+
# This add a new TailWatcher: TailWatcher(path: "tail.txt", inode: inode_1)
|
2854
|
+
# This overwrites `@tails["tail.txt"]`
|
2855
|
+
d.instance.refresh_watchers
|
2856
|
+
|
2857
|
+
# `watch_timer` calls `TailWatcher::on_notify`, and then `update_watcher` trys to update the TailWatcher:
|
2858
|
+
# TailWatcher(path: "tail.txt", inode: inode_0) => TailWatcher(path: "tail.txt", inode: inode_1)
|
2859
|
+
# However, it is already added in `refresh_watcher`, so `update_watcher` doesn't create the new TailWatcher.
|
2860
|
+
# The old TailWathcer is detached here since `rotate_wait` is just `1s`.
|
2861
|
+
sleep 3
|
2862
|
+
|
2863
|
+
# This adds a new TailWatcher: TailWatcher(path: "tail.txt1", inode: inode_0)
|
2864
|
+
d.instance.refresh_watchers
|
2865
|
+
|
2866
|
+
# Append to the new current log file.
|
2867
|
+
Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "ab") {|f| f.puts "file2 log2"}
|
2868
|
+
end
|
2869
|
+
|
2870
|
+
inode_0 = tail_watchers[0].ino
|
2871
|
+
inode_1 = tail_watchers[1].ino
|
2872
|
+
record_values = d.events.collect { |event| event[2]["message"] }.sort
|
2873
|
+
position_entries = []
|
2874
|
+
Fluent::FileWrapper.open("#{@tmp_dir}/tail.pos", "r") do |f|
|
2875
|
+
f.readlines(chomp: true).each do |line|
|
2876
|
+
values = line.split("\t")
|
2877
|
+
position_entries.append([values[0], values[1], values[2].to_i(16)])
|
2878
|
+
end
|
2879
|
+
end
|
2880
|
+
|
2881
|
+
assert_equal(
|
2882
|
+
{
|
2883
|
+
record_values: ["file1 log1", "file1 log2", "file2 log1", "file2 log2"],
|
2884
|
+
tail_watcher_paths: ["#{@tmp_dir}/tail.txt", "#{@tmp_dir}/tail.txt", "#{@tmp_dir}/tail.txt1"],
|
2885
|
+
tail_watcher_inodes: [inode_0, inode_1, inode_0],
|
2886
|
+
tail_watcher_io_handler_opened_statuses: [false, false, false],
|
2887
|
+
position_entries: [
|
2888
|
+
# The recorded path is old, but it is no problem. The path is not used when using follow_inodes.
|
2889
|
+
["#{@tmp_dir}/tail.txt", "0000000000000016", inode_0],
|
2890
|
+
["#{@tmp_dir}/tail.txt", "0000000000000016", inode_1],
|
2891
|
+
],
|
2892
|
+
},
|
2893
|
+
{
|
2894
|
+
record_values: record_values,
|
2895
|
+
tail_watcher_paths: tail_watchers.collect { |tw| tw.path },
|
2896
|
+
tail_watcher_inodes: tail_watchers.collect { |tw| tw.ino },
|
2897
|
+
tail_watcher_io_handler_opened_statuses: tail_watchers.collect { |tw| tw.instance_variable_get(:@io_handler)&.opened? || false },
|
2898
|
+
position_entries: position_entries
|
2899
|
+
},
|
2900
|
+
)
|
2901
|
+
end
|
2902
|
+
|
2903
|
+
def test_path_resurrection
|
2904
|
+
config = config_element(
|
2905
|
+
"ROOT",
|
2906
|
+
"",
|
2907
|
+
{
|
2908
|
+
"path" => "#{@tmp_dir}/tail.txt*",
|
2909
|
+
"pos_file" => "#{@tmp_dir}/tail.pos",
|
2910
|
+
"tag" => "t1",
|
2911
|
+
"format" => "none",
|
2912
|
+
"read_from_head" => "true",
|
2913
|
+
"follow_inodes" => "true",
|
2914
|
+
# In order to reproduce the same condition stably, ensure that `refresh_watchers` is not
|
2915
|
+
# called by a timer.
|
2916
|
+
"refresh_interval" => "1h",
|
2917
|
+
# https://github.com/fluent/fluentd/pull/4237#issuecomment-1633358632
|
2918
|
+
# Because of this problem, log duplication can occur during `rotate_wait`.
|
2919
|
+
# Need to set `rotate_wait 0` for a workaround.
|
2920
|
+
"rotate_wait" => "0s",
|
2921
|
+
}
|
2922
|
+
)
|
2923
|
+
d = create_driver(config, false)
|
2924
|
+
|
2925
|
+
tail_watchers = []
|
2926
|
+
stub.proxy(d.instance).setup_watcher do |tw|
|
2927
|
+
tail_watchers.append(tw)
|
2928
|
+
tw
|
2929
|
+
end
|
2930
|
+
|
2931
|
+
Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "wb") {|f| f.puts "file1 log1"}
|
2932
|
+
|
2933
|
+
d.run(expect_records: 5, timeout: 10) do
|
2934
|
+
# Rotate
|
2935
|
+
Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "ab") {|f| f.puts "file1 log2"}
|
2936
|
+
FileUtils.move("#{@tmp_dir}/tail.txt", "#{@tmp_dir}/tail.txt" + "1")
|
2937
|
+
# TailWatcher(path: "tail.txt", inode: inode_0) detects `tail.txt` disappeared.
|
2938
|
+
# Call `update_watcher` to stop and discard self.
|
2939
|
+
# If not discarding, then it will be a orphan and cause leak and log duplication.
|
2940
|
+
#
|
2941
|
+
# This reproduces the case where the notify to TailWatcher comes before the new file for the path
|
2942
|
+
# is created during rotation.
|
2943
|
+
# (stat_watcher notifies faster than a new file is created)
|
2944
|
+
# Overall, this is a rotation operation, but from the TailWatcher, it appears as if the file
|
2945
|
+
# was resurrected once it disappeared.
|
2946
|
+
sleep 2 # On Windows and macOS, StatWatcher doesn't work, so need enough interval for TimeTrigger.
|
2947
|
+
Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "wb") {|f| f.puts "file2 log1"}
|
2948
|
+
|
2949
|
+
# Add new TailWatchers
|
2950
|
+
# tail.txt: TailWatcher(path: "tail.txt", inode: inode_1)
|
2951
|
+
# tail.txt: TailWatcher(path: "tail.txt1", inode: inode_0)
|
2952
|
+
# NOTE: If not discarding the first TailWatcher on notify, this makes it a orphan because
|
2953
|
+
# this overwrites the `@tails[tail.txt]` by adding TailWatcher(path: "tail.txt", inode: inode_1)
|
2954
|
+
d.instance.refresh_watchers
|
2955
|
+
|
2956
|
+
# This does nothing.
|
2957
|
+
# NOTE: If not discarding the first TailWatcher on notify, this add
|
2958
|
+
# tail.txt1: TailWatcher(path: "tail.txt1", inode: inode_0)
|
2959
|
+
# because the previous refresh_watcher overwrites `@tails[tail.txt]` and the inode_0 is lost.
|
2960
|
+
# This would cause log duplication.
|
2961
|
+
d.instance.refresh_watchers
|
2962
|
+
|
2963
|
+
# Append to the old file
|
2964
|
+
Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt1", "ab") {|f| f.puts "file1 log3"}
|
2965
|
+
|
2966
|
+
# Append to the new current log file.
|
2967
|
+
Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "ab") {|f| f.puts "file2 log2"}
|
2968
|
+
end
|
2969
|
+
|
2970
|
+
inode_0 = Fluent::FileWrapper.stat("#{@tmp_dir}/tail.txt1").ino
|
2971
|
+
inode_1 = Fluent::FileWrapper.stat("#{@tmp_dir}/tail.txt").ino
|
2972
|
+
record_values = d.events.collect { |event| event[2]["message"] }.sort
|
2973
|
+
position_entries = []
|
2974
|
+
Fluent::FileWrapper.open("#{@tmp_dir}/tail.pos", "r") do |f|
|
2975
|
+
f.readlines(chomp: true).each do |line|
|
2976
|
+
values = line.split("\t")
|
2977
|
+
position_entries.append([values[0], values[1], values[2].to_i(16)])
|
2978
|
+
end
|
2979
|
+
end
|
2980
|
+
|
2981
|
+
assert_equal(
|
2982
|
+
{
|
2983
|
+
record_values: ["file1 log1", "file1 log2", "file1 log3", "file2 log1", "file2 log2"],
|
2984
|
+
tail_watcher_set: Set[
|
2985
|
+
{
|
2986
|
+
path: "#{@tmp_dir}/tail.txt",
|
2987
|
+
inode: inode_0,
|
2988
|
+
io_handler_opened_status: false,
|
2989
|
+
},
|
2990
|
+
{
|
2991
|
+
path: "#{@tmp_dir}/tail.txt",
|
2992
|
+
inode: inode_1,
|
2993
|
+
io_handler_opened_status: false,
|
2994
|
+
},
|
2995
|
+
{
|
2996
|
+
path: "#{@tmp_dir}/tail.txt1",
|
2997
|
+
inode: inode_0,
|
2998
|
+
io_handler_opened_status: false,
|
2999
|
+
},
|
3000
|
+
],
|
3001
|
+
position_entries: [
|
3002
|
+
["#{@tmp_dir}/tail.txt", "0000000000000021", inode_0],
|
3003
|
+
["#{@tmp_dir}/tail.txt", "0000000000000016", inode_1],
|
3004
|
+
],
|
3005
|
+
},
|
3006
|
+
{
|
3007
|
+
record_values: record_values,
|
3008
|
+
tail_watcher_set: Set.new(tail_watchers.collect { |tw|
|
3009
|
+
{
|
3010
|
+
path: tw.path,
|
3011
|
+
inode: tw.ino,
|
3012
|
+
io_handler_opened_status: tw.instance_variable_get(:@io_handler)&.opened? || false,
|
3013
|
+
}
|
3014
|
+
}),
|
3015
|
+
position_entries: position_entries,
|
3016
|
+
},
|
3017
|
+
)
|
3018
|
+
end
|
3019
|
+
end
|
2641
3020
|
end
|
data/test/plugin/test_in_tcp.rb
CHANGED
@@ -156,6 +156,19 @@ class TcpInputTest < Test::Unit::TestCase
|
|
156
156
|
assert_equal hostname, event[2]['host']
|
157
157
|
end
|
158
158
|
|
159
|
+
test "send_keepalive_packet_can_be_enabled" do
|
160
|
+
d = create_driver(base_config + %!
|
161
|
+
format none
|
162
|
+
send_keepalive_packet true
|
163
|
+
!)
|
164
|
+
assert_true d.instance.send_keepalive_packet
|
165
|
+
|
166
|
+
d = create_driver(base_config + %!
|
167
|
+
format none
|
168
|
+
!)
|
169
|
+
assert_false d.instance.send_keepalive_packet
|
170
|
+
end
|
171
|
+
|
159
172
|
test 'source_address_key' do
|
160
173
|
d = create_driver(base_config + %!
|
161
174
|
format none
|
@@ -205,13 +218,13 @@ class TcpInputTest < Test::Unit::TestCase
|
|
205
218
|
</client>
|
206
219
|
</security>
|
207
220
|
!)
|
208
|
-
d.run(
|
221
|
+
d.run(expect_records: 1, timeout: 2) do
|
209
222
|
create_tcp_socket('127.0.0.1', @port) do |sock|
|
210
223
|
sock.send("hello\n", 0)
|
211
224
|
end
|
212
225
|
end
|
213
226
|
|
214
|
-
assert_equal 1, d.
|
227
|
+
assert_equal 1, d.logs.count { |l| l =~ /anonymous client/ }
|
215
228
|
assert_equal 0, d.events.size
|
216
229
|
end
|
217
230
|
end
|
@@ -240,4 +253,76 @@ class TcpInputTest < Test::Unit::TestCase
|
|
240
253
|
assert_equal 'hello', event[2]['msg']
|
241
254
|
end
|
242
255
|
end
|
256
|
+
|
257
|
+
sub_test_case "message_length_limit" do
|
258
|
+
data("batch_emit", { extract: "" }, keep: true)
|
259
|
+
data("single_emit", { extract: "<extract>\ntag_key tag\n</extract>\n" }, keep: true)
|
260
|
+
test "drop records exceeding limit" do |data|
|
261
|
+
message_length_limit = 10
|
262
|
+
d = create_driver(base_config + %!
|
263
|
+
message_length_limit #{message_length_limit}
|
264
|
+
<parse>
|
265
|
+
@type none
|
266
|
+
</parse>
|
267
|
+
#{data[:extract]}
|
268
|
+
!)
|
269
|
+
d.run(expect_records: 2, timeout: 10) do
|
270
|
+
create_tcp_socket('127.0.0.1', @port) do |sock|
|
271
|
+
sock.send("a" * message_length_limit + "\n", 0)
|
272
|
+
sock.send("b" * (message_length_limit + 1) + "\n", 0)
|
273
|
+
sock.send("c" * (message_length_limit - 1) + "\n", 0)
|
274
|
+
end
|
275
|
+
end
|
276
|
+
|
277
|
+
expected_records = [
|
278
|
+
"a" * message_length_limit,
|
279
|
+
"c" * (message_length_limit - 1)
|
280
|
+
]
|
281
|
+
actual_records = d.events.collect do |event|
|
282
|
+
event[2]["message"]
|
283
|
+
end
|
284
|
+
|
285
|
+
assert_equal expected_records, actual_records
|
286
|
+
end
|
287
|
+
|
288
|
+
test "clear buffer and discard the subsequent data until the next delimiter" do |data|
|
289
|
+
message_length_limit = 12
|
290
|
+
d = create_driver(base_config + %!
|
291
|
+
message_length_limit #{message_length_limit}
|
292
|
+
delimiter ";"
|
293
|
+
<parse>
|
294
|
+
@type json
|
295
|
+
</parse>
|
296
|
+
#{data[:extract]}
|
297
|
+
!)
|
298
|
+
d.run(expect_records: 1, timeout: 10) do
|
299
|
+
create_tcp_socket('127.0.0.1', @port) do |sock|
|
300
|
+
sock.send('{"message":', 0)
|
301
|
+
sock.send('"hello', 0)
|
302
|
+
sleep 1 # To make the server read data and clear the buffer here.
|
303
|
+
sock.send('world!"};', 0) # This subsequent data must be discarded so that a parsing failure doesn't occur.
|
304
|
+
sock.send('{"k":"v"};', 0) # This will succeed to parse.
|
305
|
+
end
|
306
|
+
end
|
307
|
+
|
308
|
+
logs = d.logs.collect do |log|
|
309
|
+
log.gsub(/\A\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} [-+]\d{4} /, "")
|
310
|
+
end
|
311
|
+
actual_records = d.events.collect do |event|
|
312
|
+
event[2]
|
313
|
+
end
|
314
|
+
|
315
|
+
assert_equal(
|
316
|
+
{
|
317
|
+
# Asserting that '[warn]: pattern not matched message="world!\"}"' warning does not occur.
|
318
|
+
logs: ['[info]: The buffer size exceeds \'message_length_limit\', cleared: limit=12 size=17 head="{\"message\":\"hello"' + "\n"],
|
319
|
+
records: [{"k" => "v"}],
|
320
|
+
},
|
321
|
+
{
|
322
|
+
logs: logs[1..],
|
323
|
+
records: actual_records,
|
324
|
+
}
|
325
|
+
)
|
326
|
+
end
|
327
|
+
end
|
243
328
|
end
|
data/test/plugin/test_in_udp.rb
CHANGED
@@ -265,4 +265,32 @@ class UdpInputTest < Test::Unit::TestCase
|
|
265
265
|
end
|
266
266
|
end
|
267
267
|
end
|
268
|
+
|
269
|
+
test 'message_length_limit' do
|
270
|
+
message_length_limit = 32
|
271
|
+
d = create_driver(base_config + %!
|
272
|
+
format none
|
273
|
+
message_length_limit #{message_length_limit}
|
274
|
+
!)
|
275
|
+
d.run(expect_records: 3) do
|
276
|
+
create_udp_socket('127.0.0.1', @port) do |u|
|
277
|
+
3.times do |i|
|
278
|
+
u.send("#{i}" * 40 + "\n", 0)
|
279
|
+
end
|
280
|
+
end
|
281
|
+
end
|
282
|
+
|
283
|
+
if Fluent.windows?
|
284
|
+
expected_records = []
|
285
|
+
else
|
286
|
+
expected_records = 3.times.collect do |i|
|
287
|
+
"#{i}" * message_length_limit
|
288
|
+
end
|
289
|
+
end
|
290
|
+
actual_records = d.events.collect do |event|
|
291
|
+
event[2]["message"]
|
292
|
+
end
|
293
|
+
|
294
|
+
assert_equal expected_records, actual_records
|
295
|
+
end
|
268
296
|
end
|
data/test/plugin/test_in_unix.rb
CHANGED
@@ -174,8 +174,8 @@ class UnixInputTest < Test::Unit::TestCase
|
|
174
174
|
assert_equal 0, @d.events.size
|
175
175
|
|
176
176
|
logs = @d.instance.log.logs
|
177
|
-
assert_equal 1, logs.
|
177
|
+
assert_equal 1, logs.count { |line|
|
178
178
|
line =~ / \[warn\]: incoming data is broken: msg=#{data.inspect}/
|
179
|
-
}
|
179
|
+
}, "should not accept broken chunk"
|
180
180
|
end
|
181
181
|
end unless Fluent.windows?
|
@@ -150,7 +150,7 @@ class MultiOutputTest < Test::Unit::TestCase
|
|
150
150
|
log_size_for_metrics_plugin_helper = 4
|
151
151
|
expected_warn_log_size = log_size_for_multi_output_itself + log_size_for_metrics_plugin_helper
|
152
152
|
logs = @i.log.out.logs
|
153
|
-
assert{ logs.
|
153
|
+
assert{ logs.count{|log| log.include?('[warn]') && log.include?("'type' is deprecated parameter name. use '@type' instead.") } == expected_warn_log_size }
|
154
154
|
end
|
155
155
|
|
156
156
|
test '#emit_events calls #process always' do
|
@@ -597,8 +597,8 @@ class ExecFilterOutputTest < Test::Unit::TestCase
|
|
597
597
|
# the number of pids should be same with number of child processes
|
598
598
|
assert_equal 2, pid_list.size
|
599
599
|
logs = d.instance.log.out.logs
|
600
|
-
assert_equal 2, logs.
|
601
|
-
assert_equal 2, logs.
|
600
|
+
assert_equal 2, logs.count { |l| l.include?('child process exits with error code') }
|
601
|
+
assert_equal 2, logs.count { |l| l.include?('respawning child process') }
|
602
602
|
|
603
603
|
ensure
|
604
604
|
d.run(start: false, shutdown: true)
|
@@ -264,8 +264,8 @@ class FileOutputTest < Test::Unit::TestCase
|
|
264
264
|
assert !(Dir.exist?("#{TMP_DIR}/my.data/a"))
|
265
265
|
assert !(Dir.exist?("#{TMP_DIR}/your.data/a"))
|
266
266
|
buffer_files = Dir.entries("#{TMP_DIR}/buf_full").reject{|e| e =~ /^\.+$/ }
|
267
|
-
assert_equal 2, buffer_files.
|
268
|
-
assert_equal 2, buffer_files.
|
267
|
+
assert_equal 2, buffer_files.count{|n| n.end_with?('.meta') }
|
268
|
+
assert_equal 2, buffer_files.count{|n| !n.end_with?('.meta') }
|
269
269
|
|
270
270
|
m1 = d.instance.metadata('my.data', t1, {"type" => "a"})
|
271
271
|
m2 = d.instance.metadata('your.data', t3, {"type" => "a"})
|
@@ -1331,26 +1331,22 @@ EOL
|
|
1331
1331
|
d = create_driver(output_conf)
|
1332
1332
|
d.instance_start
|
1333
1333
|
|
1334
|
-
|
1335
|
-
|
1336
|
-
|
1337
|
-
|
1338
|
-
|
1339
|
-
|
1340
|
-
|
1341
|
-
|
1342
|
-
|
1343
|
-
|
1344
|
-
|
1345
|
-
d.
|
1346
|
-
|
1347
|
-
|
1348
|
-
node.send_data('test', chunk) rescue nil
|
1349
|
-
end
|
1334
|
+
chunk = Fluent::Plugin::Buffer::MemoryChunk.new(Fluent::Plugin::Buffer::Metadata.new(nil, nil, nil))
|
1335
|
+
mock.proxy(d.instance).socket_create_tcp(TARGET_HOST, @target_port,
|
1336
|
+
linger_timeout: anything,
|
1337
|
+
send_timeout: anything,
|
1338
|
+
recv_timeout: anything,
|
1339
|
+
connect_timeout: anything) { |sock|
|
1340
|
+
mock(sock).close.once; sock
|
1341
|
+
}.twice
|
1342
|
+
|
1343
|
+
target_input_driver.run(timeout: 15) do
|
1344
|
+
d.run do
|
1345
|
+
node = d.instance.nodes.first
|
1346
|
+
2.times do
|
1347
|
+
node.send_data('test', chunk) rescue nil
|
1350
1348
|
end
|
1351
1349
|
end
|
1352
|
-
ensure
|
1353
|
-
d.instance_shutdown
|
1354
1350
|
end
|
1355
1351
|
end
|
1356
1352
|
end
|
@@ -378,6 +378,7 @@ class HTTPOutputTest < Test::Unit::TestCase
|
|
378
378
|
password hello?
|
379
379
|
</auth>
|
380
380
|
])
|
381
|
+
d.instance.system_config_override(root_dir: TMP_DIR) # Backup files are generated in TMP_DIR.
|
381
382
|
d.run(default_tag: 'test.http', shutdown: false) do
|
382
383
|
test_events.each { |event|
|
383
384
|
d.feed(event)
|