fluentd 1.16.3 → 1.16.4

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: f8a64e2943ee3f6abc8c90018a85205b0bd54a8fc5f367da3d5e9ce53f03db4f
4
- data.tar.gz: 0d06e0bec37b228eea16a92353a70b0198a9f6b3fef88627898f6ef16ae2322b
3
+ metadata.gz: c5692dc727fe7cfb5bc8067382585203321fe93e9cdd8d081c97d31239579a5f
4
+ data.tar.gz: 067f6e0ade694fc438d600c1d2c7b18f251c6b64e26b4ac4164a9d706ee4ab29
5
5
  SHA512:
6
- metadata.gz: 90e357ccd0f4c02013739c538585d3a1b79f3f3d423efc4bd668496e7b318b87358e3bc0618393b85ce07885445734dadbdc4112a7cd082be4aa89d738d94066
7
- data.tar.gz: bfa4f0817153e79538b24af9af0a840cd6ecd6a8ecc2a0a1536a5268b7d29aae7671371181050dac4ba670538d9eb40efbbd9634936217caf7b3ecec090b67e1
6
+ metadata.gz: d991817aa42a3a773f58150c160cf4a328020ed64c8f50a5be6f68064bbbcfbea741d54ed692c50a6dec94244c03a60337508ac7b3af9ce022068cb5a4fe2a48
7
+ data.tar.gz: 9e05ad8b3558a377cba7abe379032a2e6aa8070b85d84c6f3f7e6d9a4a7594015d79868a67a8fe36e16e48e38bb87a3cfc0c936c7936e6a038257b2d9588064c
@@ -1,27 +1,22 @@
1
- name: Testing on Ubuntu
1
+ name: Test
2
2
 
3
3
  on:
4
4
  push:
5
- branches: [master, v1.16]
5
+ branches: [v1.16]
6
6
  pull_request:
7
- branches: [master, v1.16]
7
+ branches: [v1.16]
8
8
 
9
9
  jobs:
10
10
  test:
11
11
  runs-on: ${{ matrix.os }}
12
- continue-on-error: ${{ matrix.experimental }}
12
+ continue-on-error: false
13
13
  strategy:
14
14
  fail-fast: false
15
15
  matrix:
16
- ruby-version: ['3.2', '3.1', '3.0', '2.7']
17
- os: [ubuntu-latest]
18
- experimental: [false]
19
- include:
20
- - ruby-version: head
21
- os: ubuntu-latest
22
- experimental: true
16
+ os: ['ubuntu-latest', 'macos-latest', 'windows-latest']
17
+ ruby-version: ['3.3', '3.2', '3.1', '3.0', '2.7']
23
18
 
24
- name: Unit testing with Ruby ${{ matrix.ruby-version }} on ${{ matrix.os }}
19
+ name: Ruby ${{ matrix.ruby-version }} on ${{ matrix.os }}
25
20
  steps:
26
21
  - uses: actions/checkout@v3
27
22
  - name: Set up Ruby
@@ -29,8 +24,9 @@ jobs:
29
24
  with:
30
25
  ruby-version: ${{ matrix.ruby-version }}
31
26
  - name: Install addons
27
+ if: ${{ matrix.os == 'ubuntu-latest' }}
32
28
  run: sudo apt-get install libgmp3-dev libcap-ng-dev
33
29
  - name: Install dependencies
34
30
  run: bundle install
35
31
  - name: Run tests
36
- run: bundle exec rake test
32
+ run: bundle exec rake test TESTOPTS=-v
data/CHANGELOG.md CHANGED
@@ -1,5 +1,21 @@
1
1
  # v1.16
2
2
 
3
+ ## Release v1.16.4 - 2024/03/14
4
+
5
+ ### Bug Fix
6
+
7
+ * Fix to avoid processing discarded chunks in write_step_by_step.
8
+ It fixes not to raise pile of IOError when many `chunk
9
+ bytes limit exceeds` errors are occurred.
10
+ https://github.com/fluent/fluentd/pull/4342
11
+ * in_tail: Fix tail watchers in `rotate_wait` state not being managed.
12
+ https://github.com/fluent/fluentd/pull/4334
13
+
14
+ ### Misc
15
+
16
+ * buffer: Avoid unnecessary log processing. It will improve performance.
17
+ https://github.com/fluent/fluentd/pull/4331
18
+
3
19
  ## Release v1.16.3 - 2023/11/14
4
20
 
5
21
  ### Bug Fix
@@ -580,7 +580,7 @@ module Fluent
580
580
  chunk = @dequeued.delete(chunk_id)
581
581
  return false unless chunk # already purged by other thread
582
582
  @queue.unshift(chunk)
583
- log.trace "chunk taken back", instance: self.object_id, chunk_id: dump_unique_id_hex(chunk_id), metadata: chunk.metadata
583
+ log.on_trace { log.trace "chunk taken back", instance: self.object_id, chunk_id: dump_unique_id_hex(chunk_id), metadata: chunk.metadata }
584
584
  @queued_num[chunk.metadata] += 1 # BUG if nil
585
585
  @dequeued_num[chunk.metadata] -= 1
586
586
  end
@@ -610,7 +610,7 @@ module Fluent
610
610
  @queued_num.delete(metadata)
611
611
  @dequeued_num.delete(metadata)
612
612
  end
613
- log.trace "chunk purged", instance: self.object_id, chunk_id: dump_unique_id_hex(chunk_id), metadata: metadata
613
+ log.on_trace { log.trace "chunk purged", instance: self.object_id, chunk_id: dump_unique_id_hex(chunk_id), metadata: metadata }
614
614
  end
615
615
 
616
616
  nil
@@ -728,7 +728,6 @@ module Fluent
728
728
 
729
729
  def write_step_by_step(metadata, data, format, splits_count, &block)
730
730
  splits = []
731
- errors = []
732
731
  if splits_count > data.size
733
732
  splits_count = data.size
734
733
  end
@@ -749,16 +748,14 @@ module Fluent
749
748
  modified_chunks = []
750
749
  modified_metadata = metadata
751
750
  get_next_chunk = ->(){
752
- c = if staged_chunk_used
753
- # Staging new chunk here is bad idea:
754
- # Recovering whole state including newly staged chunks is much harder than current implementation.
755
- modified_metadata = modified_metadata.dup_next
756
- generate_chunk(modified_metadata)
757
- else
758
- synchronize { @stage[modified_metadata] ||= generate_chunk(modified_metadata).staged! }
759
- end
760
- modified_chunks << c
761
- c
751
+ if staged_chunk_used
752
+ # Staging new chunk here is bad idea:
753
+ # Recovering whole state including newly staged chunks is much harder than current implementation.
754
+ modified_metadata = modified_metadata.dup_next
755
+ generate_chunk(modified_metadata)
756
+ else
757
+ synchronize { @stage[modified_metadata] ||= generate_chunk(modified_metadata).staged! }
758
+ end
762
759
  }
763
760
 
764
761
  writing_splits_index = 0
@@ -766,6 +763,8 @@ module Fluent
766
763
 
767
764
  while writing_splits_index < splits.size
768
765
  chunk = get_next_chunk.call
766
+ errors = []
767
+ modified_chunks << {chunk: chunk, adding_bytesize: 0, errors: errors}
769
768
  chunk.synchronize do
770
769
  raise ShouldRetry unless chunk.writable?
771
770
  staged_chunk_used = true if chunk.staged?
@@ -851,15 +850,18 @@ module Fluent
851
850
  raise
852
851
  end
853
852
 
854
- block.call(chunk, chunk.bytesize - original_bytesize, errors)
855
- errors = []
853
+ modified_chunks.last[:adding_bytesize] = chunk.bytesize - original_bytesize
856
854
  end
857
855
  end
856
+ modified_chunks.each do |data|
857
+ block.call(data[:chunk], data[:adding_bytesize], data[:errors])
858
+ end
858
859
  rescue ShouldRetry
859
- modified_chunks.each do |mc|
860
- mc.rollback rescue nil
861
- if mc.unstaged?
862
- mc.purge rescue nil
860
+ modified_chunks.each do |data|
861
+ chunk = data[:chunk]
862
+ chunk.rollback rescue nil
863
+ if chunk.unstaged?
864
+ chunk.purge rescue nil
863
865
  end
864
866
  end
865
867
  enqueue_chunk(metadata) if enqueue_chunk_before_retry
@@ -52,6 +52,7 @@ module Fluent::Plugin
52
52
  super
53
53
  @paths = []
54
54
  @tails = {}
55
+ @tails_rotate_wait = {}
55
56
  @pf_file = nil
56
57
  @pf = nil
57
58
  @ignore_list = []
@@ -267,6 +268,9 @@ module Fluent::Plugin
267
268
  @shutdown_start_time = Fluent::Clock.now
268
269
  # during shutdown phase, don't close io. It should be done in close after all threads are stopped. See close.
269
270
  stop_watchers(existence_path, immediate: true, remove_watcher: false)
271
+ @tails_rotate_wait.keys.each do |tw|
272
+ detach_watcher(tw, @tails_rotate_wait[tw][:ino], false)
273
+ end
270
274
  @pf_file.close if @pf_file
271
275
 
272
276
  super
@@ -275,6 +279,7 @@ module Fluent::Plugin
275
279
  def close
276
280
  super
277
281
  # close file handles after all threads stopped (in #close of thread plugin helper)
282
+ # It may be because we need to wait IOHanlder.ready_to_shutdown()
278
283
  close_watcher_handles
279
284
  end
280
285
 
@@ -516,6 +521,9 @@ module Fluent::Plugin
516
521
  tw.close
517
522
  end
518
523
  end
524
+ @tails_rotate_wait.keys.each do |tw|
525
+ tw.close
526
+ end
519
527
  end
520
528
 
521
529
  # refresh_watchers calls @tails.keys so we don't use stop_watcher -> start_watcher sequence for safety.
@@ -570,10 +578,6 @@ module Fluent::Plugin
570
578
  detach_watcher_after_rotate_wait(tail_watcher, pe.read_inode)
571
579
  end
572
580
 
573
- # TailWatcher#close is called by another thread at shutdown phase.
574
- # It causes 'can't modify string; temporarily locked' error in IOHandler
575
- # so adding close_io argument to avoid this problem.
576
- # At shutdown, IOHandler's io will be released automatically after detached the event loop
577
581
  def detach_watcher(tw, ino, close_io = true)
578
582
  if @follow_inodes && tw.ino != ino
579
583
  log.warn("detach_watcher could be detaching an unexpected tail_watcher with a different ino.",
@@ -604,7 +608,11 @@ module Fluent::Plugin
604
608
  if @open_on_every_update
605
609
  # Detach now because it's already closed, waiting it doesn't make sense.
606
610
  detach_watcher(tw, ino)
607
- elsif throttling_is_enabled?(tw)
611
+ end
612
+
613
+ return if @tails_rotate_wait[tw]
614
+
615
+ if throttling_is_enabled?(tw)
608
616
  # When the throttling feature is enabled, it might not reach EOF yet.
609
617
  # Should ensure to read all contents before closing it, with keeping throttling.
610
618
  start_time_to_wait = Fluent::Clock.now
@@ -612,14 +620,18 @@ module Fluent::Plugin
612
620
  elapsed = Fluent::Clock.now - start_time_to_wait
613
621
  if tw.eof? && elapsed >= @rotate_wait
614
622
  timer.detach
623
+ @tails_rotate_wait.delete(tw)
615
624
  detach_watcher(tw, ino)
616
625
  end
617
626
  end
627
+ @tails_rotate_wait[tw] = { ino: ino, timer: timer }
618
628
  else
619
629
  # when the throttling feature isn't enabled, just wait @rotate_wait
620
- timer_execute(:in_tail_close_watcher, @rotate_wait, repeat: false) do
630
+ timer = timer_execute(:in_tail_close_watcher, @rotate_wait, repeat: false) do
631
+ @tails_rotate_wait.delete(tw)
621
632
  detach_watcher(tw, ino)
622
633
  end
634
+ @tails_rotate_wait[tw] = { ino: ino, timer: timer }
623
635
  end
624
636
  end
625
637
 
@@ -16,6 +16,6 @@
16
16
 
17
17
  module Fluent
18
18
 
19
- VERSION = '1.16.3'
19
+ VERSION = '1.16.4'
20
20
 
21
21
  end
@@ -941,7 +941,7 @@ CONF
941
941
  '-external-encoding' => '--external-encoding=utf-8',
942
942
  '-internal-encoding' => '--internal-encoding=utf-8',
943
943
  )
944
- test "-E option is set to RUBYOPT" do |opt|
944
+ test "-E option is set to RUBYOPT" do |base_opt|
945
945
  conf = <<CONF
946
946
  <source>
947
947
  @type dummy
@@ -952,6 +952,7 @@ CONF
952
952
  </match>
953
953
  CONF
954
954
  conf_path = create_conf_file('rubyopt_test.conf', conf)
955
+ opt = base_opt.dup
955
956
  opt << " #{ENV['RUBYOPT']}" if ENV['RUBYOPT']
956
957
  assert_log_matches(
957
958
  create_cmdline(conf_path),
@@ -991,9 +992,14 @@ CONF
991
992
  </match>
992
993
  CONF
993
994
  conf_path = create_conf_file('rubyopt_invalid_test.conf', conf)
995
+ if Gem::Version.create(RUBY_VERSION) >= Gem::Version.create('3.3.0')
996
+ expected_phrase = 'ruby: invalid switch in RUBYOPT'
997
+ else
998
+ expected_phrase = 'Invalid option is passed to RUBYOPT'
999
+ end
994
1000
  assert_log_matches(
995
1001
  create_cmdline(conf_path),
996
- 'Invalid option is passed to RUBYOPT',
1002
+ expected_phrase,
997
1003
  env: { 'RUBYOPT' => 'a' },
998
1004
  )
999
1005
  end
@@ -850,6 +850,57 @@ class BufferTest < Test::Unit::TestCase
850
850
  test '#compress returns :text' do
851
851
  assert_equal :text, @p.compress
852
852
  end
853
+
854
+ # https://github.com/fluent/fluentd/issues/3089
855
+ test "closed chunk should not be committed" do
856
+ assert_equal 8 * 1024 * 1024, @p.chunk_limit_size
857
+ assert_equal 0.95, @p.chunk_full_threshold
858
+
859
+ purge_count = 0
860
+
861
+ stub.proxy(@p).generate_chunk(anything) do |chunk|
862
+ stub.proxy(chunk).purge do |result|
863
+ purge_count += 1
864
+ result
865
+ end
866
+ stub.proxy(chunk).commit do |result|
867
+ assert_false(chunk.closed?)
868
+ result
869
+ end
870
+ stub.proxy(chunk).rollback do |result|
871
+ assert_false(chunk.closed?)
872
+ result
873
+ end
874
+ chunk
875
+ end
876
+
877
+ m = @p.metadata(timekey: Time.parse('2016-04-11 16:40:00 +0000').to_i)
878
+ small_row = "x" * 1024 * 400
879
+ big_row = "x" * 1024 * 1024 * 8 # just `chunk_size_limit`, it does't cause BufferOverFlowError.
880
+
881
+ # Write 42 events in 1 event stream, last one is for triggering `ShouldRetry`
882
+ @p.write({m => [small_row] * 40 + [big_row] + ["x"]})
883
+
884
+ # Above event strem will be splitted twice by `Buffer#write_step_by_step`
885
+ #
886
+ # 1. `write_once`: 42 [events] * 1 [stream]
887
+ # 2. `write_step_by_step`: 4 [events]* 10 [streams] + 2 [events] * 1 [stream]
888
+ # 3. `write_step_by_step` (by `ShouldRetry`): 1 [event] * 42 [streams]
889
+ #
890
+ # The problematic data is built in the 2nd stage.
891
+ # In the 2nd stage, 5 streams are packed in a chunk.
892
+ # ((1024 * 400) [bytes] * 4 [events] * 5 [streams] = 8192000 [bytes] < `chunk_limit_size` (8MB)).
893
+ # So 3 chunks are used to store all data.
894
+ # The 1st chunk is already staged by `write_once`.
895
+ # The 2nd & 3rd chunks are newly created as unstaged.
896
+ # The 3rd chunk is purged before `ShouldRetry`, it's no problem:
897
+ # https://github.com/fluent/fluentd/blob/7e9eba736ff40ad985341be800ddc46558be75f2/lib/fluent/plugin/buffer.rb#L850
898
+ # The 2nd chunk is purged in `rescue ShouldRetry`:
899
+ # https://github.com/fluent/fluentd/blob/7e9eba736ff40ad985341be800ddc46558be75f2/lib/fluent/plugin/buffer.rb#L862
900
+ # It causes the issue described in https://github.com/fluent/fluentd/issues/3089#issuecomment-1811839198
901
+
902
+ assert_equal 2, purge_count
903
+ end
853
904
  end
854
905
 
855
906
  sub_test_case 'standard format with configuration for test with lower chunk limit size' do
@@ -3016,6 +3016,92 @@ class TailInputTest < Test::Unit::TestCase
3016
3016
  },
3017
3017
  )
3018
3018
  end
3019
+
3020
+ def test_next_rotation_occurs_very_fast_while_old_TW_still_waiting_rotate_wait
3021
+ config = config_element(
3022
+ "ROOT",
3023
+ "",
3024
+ {
3025
+ "path" => "#{@tmp_dir}/tail.txt*",
3026
+ "pos_file" => "#{@tmp_dir}/tail.pos",
3027
+ "tag" => "t1",
3028
+ "format" => "none",
3029
+ "read_from_head" => "true",
3030
+ "follow_inodes" => "true",
3031
+ "rotate_wait" => "3s",
3032
+ "refresh_interval" => "1h",
3033
+ # stat_watcher often calls `TailWatcher::on_notify` faster than creating a new log file,
3034
+ # so disable it in order to reproduce the same condition stably.
3035
+ "enable_stat_watcher" => "false",
3036
+ }
3037
+ )
3038
+ d = create_driver(config, false)
3039
+
3040
+ tail_watchers = []
3041
+ stub.proxy(d.instance).setup_watcher do |tw|
3042
+ tail_watchers.append(tw)
3043
+ mock.proxy(tw).close.once # Note: Currently, there is no harm in duplicate calls.
3044
+ tw
3045
+ end
3046
+
3047
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt0", "wb") {|f| f.puts "file1 log1"}
3048
+
3049
+ d.run(expect_records: 6, timeout: 15) do
3050
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt0", "ab") {|f| f.puts "file1 log2"}
3051
+
3052
+ sleep 1.5 # Need to be larger than 1s (the interval of watch_timer)
3053
+
3054
+ FileUtils.move("#{@tmp_dir}/tail.txt0", "#{@tmp_dir}/tail.txt" + "1")
3055
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt0", "wb") {|f| f.puts "file2 log1"}
3056
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt0", "ab") {|f| f.puts "file2 log2"}
3057
+
3058
+ sleep 1.5 # Need to be larger than 1s (the interval of watch_timer)
3059
+
3060
+ # Rotate again (Old TailWatcher waiting rotate_wait also calls update_watcher)
3061
+ [1, 0].each do |i|
3062
+ FileUtils.move("#{@tmp_dir}/tail.txt#{i}", "#{@tmp_dir}/tail.txt#{i + 1}")
3063
+ end
3064
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt0", "wb") {|f| f.puts "file3 log1"}
3065
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt0", "ab") {|f| f.puts "file3 log2"}
3066
+
3067
+ # Wait rotate_wait to confirm that TailWatcher.close is not called in duplicate.
3068
+ # (Note: Currently, there is no harm in duplicate calls)
3069
+ sleep 4
3070
+ end
3071
+
3072
+ inode_0 = tail_watchers[0]&.ino
3073
+ inode_1 = tail_watchers[1]&.ino
3074
+ inode_2 = tail_watchers[2]&.ino
3075
+ record_values = d.events.collect { |event| event[2]["message"] }.sort
3076
+ position_entries = []
3077
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.pos", "r") do |f|
3078
+ f.readlines(chomp: true).each do |line|
3079
+ values = line.split("\t")
3080
+ position_entries.append([values[0], values[1], values[2].to_i(16)])
3081
+ end
3082
+ end
3083
+
3084
+ assert_equal(
3085
+ {
3086
+ record_values: ["file1 log1", "file1 log2", "file2 log1", "file2 log2", "file3 log1", "file3 log2"],
3087
+ tail_watcher_paths: ["#{@tmp_dir}/tail.txt0", "#{@tmp_dir}/tail.txt0", "#{@tmp_dir}/tail.txt0"],
3088
+ tail_watcher_inodes: [inode_0, inode_1, inode_2],
3089
+ tail_watcher_io_handler_opened_statuses: [false, false, false],
3090
+ position_entries: [
3091
+ ["#{@tmp_dir}/tail.txt0", "0000000000000016", inode_0],
3092
+ ["#{@tmp_dir}/tail.txt0", "0000000000000016", inode_1],
3093
+ ["#{@tmp_dir}/tail.txt0", "0000000000000016", inode_2],
3094
+ ],
3095
+ },
3096
+ {
3097
+ record_values: record_values,
3098
+ tail_watcher_paths: tail_watchers.collect { |tw| tw.path },
3099
+ tail_watcher_inodes: tail_watchers.collect { |tw| tw.ino },
3100
+ tail_watcher_io_handler_opened_statuses: tail_watchers.collect { |tw| tw.instance_variable_get(:@io_handler)&.opened? || false },
3101
+ position_entries: position_entries
3102
+ },
3103
+ )
3104
+ end
3019
3105
  end
3020
3106
 
3021
3107
  sub_test_case "Update watchers for rotation without follow_inodes" do
@@ -3084,9 +3170,6 @@ class TailInputTest < Test::Unit::TestCase
3084
3170
  sleep 3
3085
3171
 
3086
3172
  Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt0", "ab") {|f| f.puts "file3 log2"}
3087
-
3088
- # Wait `rotate_wait` for file2 to make sure to close all IO handlers
3089
- sleep 3
3090
3173
  end
3091
3174
 
3092
3175
  inode_0 = tail_watchers[0]&.ino
@@ -3121,5 +3204,85 @@ class TailInputTest < Test::Unit::TestCase
3121
3204
  },
3122
3205
  )
3123
3206
  end
3207
+
3208
+ def test_next_rotation_occurs_very_fast_while_old_TW_still_waiting_rotate_wait
3209
+ config = config_element(
3210
+ "ROOT",
3211
+ "",
3212
+ {
3213
+ "path" => "#{@tmp_dir}/tail.txt0",
3214
+ "pos_file" => "#{@tmp_dir}/tail.pos",
3215
+ "tag" => "t1",
3216
+ "format" => "none",
3217
+ "read_from_head" => "true",
3218
+ "rotate_wait" => "3s",
3219
+ "refresh_interval" => "1h",
3220
+ }
3221
+ )
3222
+ d = create_driver(config, false)
3223
+
3224
+ tail_watchers = []
3225
+ stub.proxy(d.instance).setup_watcher do |tw|
3226
+ tail_watchers.append(tw)
3227
+ mock.proxy(tw).close.once # Note: Currently, there is no harm in duplicate calls.
3228
+ tw
3229
+ end
3230
+
3231
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt0", "wb") {|f| f.puts "file1 log1"}
3232
+
3233
+ d.run(expect_records: 6, timeout: 15) do
3234
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt0", "ab") {|f| f.puts "file1 log2"}
3235
+
3236
+ sleep 1.5 # Need to be larger than 1s (the interval of watch_timer)
3237
+
3238
+ FileUtils.move("#{@tmp_dir}/tail.txt0", "#{@tmp_dir}/tail.txt" + "1")
3239
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt0", "wb") {|f| f.puts "file2 log1"}
3240
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt0", "ab") {|f| f.puts "file2 log2"}
3241
+
3242
+ sleep 1.5 # Need to be larger than 1s (the interval of watch_timer)
3243
+
3244
+ # Rotate again (Old TailWatcher waiting rotate_wait also calls update_watcher)
3245
+ [1, 0].each do |i|
3246
+ FileUtils.move("#{@tmp_dir}/tail.txt#{i}", "#{@tmp_dir}/tail.txt#{i + 1}")
3247
+ end
3248
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt0", "wb") {|f| f.puts "file3 log1"}
3249
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt0", "ab") {|f| f.puts "file3 log2"}
3250
+
3251
+ # Wait rotate_wait to confirm that TailWatcher.close is not called in duplicate.
3252
+ # (Note: Currently, there is no harm in duplicate calls)
3253
+ sleep 4
3254
+ end
3255
+
3256
+ inode_0 = tail_watchers[0]&.ino
3257
+ inode_1 = tail_watchers[1]&.ino
3258
+ inode_2 = tail_watchers[2]&.ino
3259
+ record_values = d.events.collect { |event| event[2]["message"] }.sort
3260
+ position_entries = []
3261
+ Fluent::FileWrapper.open("#{@tmp_dir}/tail.pos", "r") do |f|
3262
+ f.readlines(chomp: true).each do |line|
3263
+ values = line.split("\t")
3264
+ position_entries.append([values[0], values[1], values[2].to_i(16)])
3265
+ end
3266
+ end
3267
+
3268
+ assert_equal(
3269
+ {
3270
+ record_values: ["file1 log1", "file1 log2", "file2 log1", "file2 log2", "file3 log1", "file3 log2"],
3271
+ tail_watcher_paths: ["#{@tmp_dir}/tail.txt0", "#{@tmp_dir}/tail.txt0", "#{@tmp_dir}/tail.txt0"],
3272
+ tail_watcher_inodes: [inode_0, inode_1, inode_2],
3273
+ tail_watcher_io_handler_opened_statuses: [false, false, false],
3274
+ position_entries: [
3275
+ ["#{@tmp_dir}/tail.txt0", "0000000000000016", inode_2],
3276
+ ],
3277
+ },
3278
+ {
3279
+ record_values: record_values,
3280
+ tail_watcher_paths: tail_watchers.collect { |tw| tw.path },
3281
+ tail_watcher_inodes: tail_watchers.collect { |tw| tw.ino },
3282
+ tail_watcher_io_handler_opened_statuses: tail_watchers.collect { |tw| tw.instance_variable_get(:@io_handler)&.opened? || false },
3283
+ position_entries: position_entries
3284
+ },
3285
+ )
3286
+ end
3124
3287
  end
3125
3288
  end
@@ -156,7 +156,14 @@ EOL
156
156
  normal_conf = config_element('match', '**', {}, [
157
157
  config_element('server', '', {'name' => 'test', 'host' => 'unexisting.yaaaaaaaaaaaaaay.host.example.com'})
158
158
  ])
159
- assert_raise SocketError do
159
+
160
+ if Socket.const_defined?(:ResolutionError) # as of Ruby 3.3
161
+ error_class = Socket::ResolutionError
162
+ else
163
+ error_class = SocketError
164
+ end
165
+
166
+ assert_raise error_class do
160
167
  create_driver(normal_conf)
161
168
  end
162
169
 
@@ -165,7 +172,7 @@ EOL
165
172
  ])
166
173
  @d = d = create_driver(conf)
167
174
  expected_log = "failed to resolve node name when configured"
168
- expected_detail = 'server="test" error_class=SocketError'
175
+ expected_detail = "server=\"test\" error_class=#{error_class.name}"
169
176
  logs = d.logs
170
177
  assert{ logs.any?{|log| log.include?(expected_log) && log.include?(expected_detail) } }
171
178
  end
@@ -1241,27 +1248,22 @@ EOL
1241
1248
  target_input_driver = create_target_input_driver(conf: target_config)
1242
1249
  output_conf = config
1243
1250
  d = create_driver(output_conf)
1244
- d.instance_start
1245
1251
 
1246
- begin
1247
- chunk = Fluent::Plugin::Buffer::MemoryChunk.new(Fluent::Plugin::Buffer::Metadata.new(nil, nil, nil))
1248
- mock.proxy(d.instance).socket_create_tcp(TARGET_HOST, @target_port,
1249
- linger_timeout: anything,
1250
- send_timeout: anything,
1251
- recv_timeout: anything,
1252
- connect_timeout: anything
1253
- ) { |sock| mock(sock).close.once; sock }.twice
1252
+ chunk = Fluent::Plugin::Buffer::MemoryChunk.new(Fluent::Plugin::Buffer::Metadata.new(nil, nil, nil))
1253
+ mock.proxy(d.instance).socket_create_tcp(TARGET_HOST, @target_port,
1254
+ linger_timeout: anything,
1255
+ send_timeout: anything,
1256
+ recv_timeout: anything,
1257
+ connect_timeout: anything
1258
+ ) { |sock| mock(sock).close.once; sock }.twice
1254
1259
 
1255
- target_input_driver.run(timeout: 15) do
1256
- d.run(shutdown: false) do
1257
- node = d.instance.nodes.first
1258
- 2.times do
1259
- node.send_data('test', chunk) rescue nil
1260
- end
1260
+ target_input_driver.run(timeout: 15) do
1261
+ d.run do
1262
+ node = d.instance.nodes.first
1263
+ 2.times do
1264
+ node.send_data('test', chunk) rescue nil
1261
1265
  end
1262
1266
  end
1263
- ensure
1264
- d.instance_shutdown
1265
1267
  end
1266
1268
  end
1267
1269
 
@@ -1275,7 +1277,6 @@ EOL
1275
1277
  port #{@target_port}
1276
1278
  </server>
1277
1279
  ])
1278
- d.instance_start
1279
1280
  assert_nothing_raised { d.run }
1280
1281
  end
1281
1282
 
@@ -1287,33 +1288,28 @@ EOL
1287
1288
  keepalive_timeout 2
1288
1289
  ]
1289
1290
  d = create_driver(output_conf)
1290
- d.instance_start
1291
1291
 
1292
- begin
1293
- chunk = Fluent::Plugin::Buffer::MemoryChunk.new(Fluent::Plugin::Buffer::Metadata.new(nil, nil, nil))
1294
- mock.proxy(d.instance).socket_create_tcp(TARGET_HOST, @target_port,
1295
- linger_timeout: anything,
1296
- send_timeout: anything,
1297
- recv_timeout: anything,
1298
- connect_timeout: anything
1299
- ) { |sock| mock(sock).close.once; sock }.once
1292
+ chunk = Fluent::Plugin::Buffer::MemoryChunk.new(Fluent::Plugin::Buffer::Metadata.new(nil, nil, nil))
1293
+ mock.proxy(d.instance).socket_create_tcp(TARGET_HOST, @target_port,
1294
+ linger_timeout: anything,
1295
+ send_timeout: anything,
1296
+ recv_timeout: anything,
1297
+ connect_timeout: anything
1298
+ ) { |sock| mock(sock).close.once; sock }.once
1300
1299
 
1301
- target_input_driver.run(timeout: 15) do
1302
- d.run(shutdown: false) do
1303
- node = d.instance.nodes.first
1304
- 2.times do
1305
- node.send_data('test', chunk) rescue nil
1306
- end
1300
+ target_input_driver.run(timeout: 15) do
1301
+ d.run do
1302
+ node = d.instance.nodes.first
1303
+ 2.times do
1304
+ node.send_data('test', chunk) rescue nil
1307
1305
  end
1308
1306
  end
1309
- ensure
1310
- d.instance_shutdown
1311
1307
  end
1312
1308
  end
1313
1309
 
1314
1310
  test 'create timer of purging obsolete sockets' do
1315
1311
  output_conf = config + %[keepalive true]
1316
- d = create_driver(output_conf)
1312
+ @d = d = create_driver(output_conf)
1317
1313
 
1318
1314
  mock(d.instance).timer_execute(:out_forward_heartbeat_request, 1).once
1319
1315
  mock(d.instance).timer_execute(:out_forward_keep_alived_socket_watcher, 5).once
@@ -1329,7 +1325,6 @@ EOL
1329
1325
  keepalive_timeout 2
1330
1326
  ]
1331
1327
  d = create_driver(output_conf)
1332
- d.instance_start
1333
1328
 
1334
1329
  chunk = Fluent::Plugin::Buffer::MemoryChunk.new(Fluent::Plugin::Buffer::Metadata.new(nil, nil, nil))
1335
1330
  mock.proxy(d.instance).socket_create_tcp(TARGET_HOST, @target_port,
@@ -515,6 +515,9 @@ class ChildProcessTest < Test::Unit::TestCase
515
515
  end
516
516
 
517
517
  test 'can scrub characters without exceptions' do
518
+ if Gem::Version.create(RUBY_VERSION) >= Gem::Version.create('3.3.0')
519
+ pend "Behaviour of IO#set_encoding is changed as of Ruby 3.3 (#4058)"
520
+ end
518
521
  m = Mutex.new
519
522
  str = nil
520
523
  Timeout.timeout(TEST_DEADLOCK_TIMEOUT) do
@@ -529,19 +532,25 @@ class ChildProcessTest < Test::Unit::TestCase
529
532
  sleep TEST_WAIT_INTERVAL_FOR_BLOCK_RUNNING until m.locked? || ran
530
533
  m.lock
531
534
  assert_equal Encoding.find('utf-8'), str.encoding
532
- expected = "\xEF\xBF\xBD\xEF\xBF\xBD\x00\xEF\xBF\xBD\xEF\xBF\xBD".force_encoding("utf-8")
535
+ replacement = "\uFFFD" # U+FFFD (REPLACEMENT CHARACTER)
536
+ nul = "\x00" # U+0000 (NUL)
537
+ expected = replacement * 2 + nul + replacement * 2
533
538
  assert_equal expected, str
534
539
  @d.stop; @d.shutdown; @d.close; @d.terminate
535
540
  end
536
541
  end
537
542
 
538
543
  test 'can scrub characters without exceptions and replace specified chars' do
544
+ if Gem::Version.create(RUBY_VERSION) >= Gem::Version.create('3.3.0')
545
+ pend "Behaviour of IO#set_encoding is changed as of Ruby 3.3 (#4058)"
546
+ end
539
547
  m = Mutex.new
540
548
  str = nil
549
+ replacement = "?"
541
550
  Timeout.timeout(TEST_DEADLOCK_TIMEOUT) do
542
551
  ran = false
543
552
  args = ['-e', 'STDOUT.set_encoding("ascii-8bit"); STDOUT.write "\xFF\xFF\x00\xF0\xF0"']
544
- @d.child_process_execute(:t13b, "ruby", arguments: args, mode: [:read], scrub: true, replace_string: '?') do |io|
553
+ @d.child_process_execute(:t13b, "ruby", arguments: args, mode: [:read], scrub: true, replace_string: replacement) do |io|
545
554
  m.lock
546
555
  ran = true
547
556
  str = io.read
@@ -550,7 +559,8 @@ class ChildProcessTest < Test::Unit::TestCase
550
559
  sleep TEST_WAIT_INTERVAL_FOR_BLOCK_RUNNING until m.locked? || ran
551
560
  m.lock
552
561
  assert_equal Encoding.find('utf-8'), str.encoding
553
- expected = "??\x00??".force_encoding("utf-8")
562
+ nul = "\x00" # U+0000 (NUL)
563
+ expected = replacement * 2 + nul + replacement * 2
554
564
  assert_equal expected, str
555
565
  @d.stop; @d.shutdown; @d.close; @d.terminate
556
566
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: fluentd
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.16.3
4
+ version: 1.16.4
5
5
  platform: ruby
6
6
  authors:
7
7
  - Sadayuki Furuhashi
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2023-11-14 00:00:00.000000000 Z
11
+ date: 2024-03-14 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler
@@ -380,14 +380,12 @@ extra_rdoc_files: []
380
380
  files:
381
381
  - ".deepsource.toml"
382
382
  - ".github/ISSUE_TEMPLATE.md"
383
- - ".github/ISSUE_TEMPLATE/bug_report.yaml"
383
+ - ".github/ISSUE_TEMPLATE/bug_report.yml"
384
384
  - ".github/ISSUE_TEMPLATE/config.yml"
385
- - ".github/ISSUE_TEMPLATE/feature_request.yaml"
385
+ - ".github/ISSUE_TEMPLATE/feature_request.yml"
386
386
  - ".github/PULL_REQUEST_TEMPLATE.md"
387
- - ".github/workflows/linux-test.yaml"
388
- - ".github/workflows/macos-test.yaml"
389
387
  - ".github/workflows/stale-actions.yml"
390
- - ".github/workflows/windows-test.yaml"
388
+ - ".github/workflows/test.yml"
391
389
  - ".gitignore"
392
390
  - ADOPTERS.md
393
391
  - AUTHORS
@@ -1,34 +0,0 @@
1
- name: Testing on macOS
2
-
3
- on:
4
- push:
5
- branches: [master, v1.16]
6
- pull_request:
7
- branches: [master, v1.16]
8
-
9
- jobs:
10
- test:
11
- runs-on: ${{ matrix.os }}
12
- continue-on-error: ${{ matrix.experimental }}
13
- strategy:
14
- fail-fast: false
15
- matrix:
16
- ruby-version: ['3.2', '3.1', '3.0', '2.7']
17
- os: [macos-latest]
18
- experimental: [true]
19
- include:
20
- - ruby-version: head
21
- os: macos-latest
22
- experimental: true
23
-
24
- name: Unit testing with Ruby ${{ matrix.ruby-version }} on ${{ matrix.os }}
25
- steps:
26
- - uses: actions/checkout@v3
27
- - name: Set up Ruby
28
- uses: ruby/setup-ruby@v1
29
- with:
30
- ruby-version: ${{ matrix.ruby-version }}
31
- - name: Install dependencies
32
- run: bundle install
33
- - name: Run tests
34
- run: bundle exec rake test
@@ -1,49 +0,0 @@
1
- name: Testing on Windows
2
-
3
- on:
4
- push:
5
- branches: [master, v1.16]
6
- pull_request:
7
- branches: [master, v1.16]
8
-
9
- jobs:
10
- test:
11
- runs-on: ${{ matrix.os }}
12
- continue-on-error: ${{ matrix.experimental }}
13
- strategy:
14
- fail-fast: false
15
- matrix:
16
- ruby-version: ['3.2', '3.1', '2.7']
17
- os:
18
- - windows-latest
19
- experimental: [false]
20
- include:
21
- - ruby-version: head
22
- os: windows-latest
23
- experimental: true
24
- - ruby-version: '3.0.3'
25
- os: windows-latest
26
- experimental: false
27
- # On Ruby 3.0, we need to use fiddle 1.0.8 or later to retrieve correct
28
- # error code. In addition, we have to specify the path of fiddle by RUBYLIB
29
- # because RubyInstaller loads Ruby's bundled fiddle before initializing gem.
30
- # See also:
31
- # * https://github.com/ruby/fiddle/issues/72
32
- # * https://bugs.ruby-lang.org/issues/17813
33
- # * https://github.com/oneclick/rubyinstaller2/blob/8225034c22152d8195bc0aabc42a956c79d6c712/lib/ruby_installer/build/dll_directory.rb
34
- ruby-lib-opt: RUBYLIB=%RUNNER_TOOL_CACHE%/Ruby/3.0.3/x64/lib/ruby/gems/3.0.0/gems/fiddle-1.1.0/lib
35
-
36
- name: Unit testing with Ruby ${{ matrix.ruby-version }} on ${{ matrix.os }}
37
- steps:
38
- - uses: actions/checkout@v3
39
- - name: Set up Ruby
40
- uses: ruby/setup-ruby@v1
41
- with:
42
- ruby-version: ${{ matrix.ruby-version }}
43
- - name: Add Fiddle 1.1.0
44
- if: ${{ matrix.ruby-version == '3.0.3' }}
45
- run: gem install fiddle --version 1.1.0
46
- - name: Install dependencies
47
- run: ridk exec bundle install
48
- - name: Run tests
49
- run: bundle exec rake test TESTOPTS=-v ${{ matrix.ruby-lib-opt }}