fluentd 1.16.0-x64-mingw32 → 1.16.2-x64-mingw32

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. checksums.yaml +4 -4
  2. data/.github/ISSUE_TEMPLATE/bug_report.yaml +1 -0
  3. data/.github/ISSUE_TEMPLATE/feature_request.yaml +1 -0
  4. data/.github/workflows/stale-actions.yml +24 -0
  5. data/CHANGELOG.md +74 -0
  6. data/CONTRIBUTING.md +1 -1
  7. data/MAINTAINERS.md +3 -3
  8. data/SECURITY.md +5 -9
  9. data/fluentd.gemspec +1 -1
  10. data/lib/fluent/command/ctl.rb +2 -2
  11. data/lib/fluent/command/plugin_config_formatter.rb +1 -1
  12. data/lib/fluent/config/dsl.rb +1 -1
  13. data/lib/fluent/config/v1_parser.rb +2 -2
  14. data/lib/fluent/counter/server.rb +1 -1
  15. data/lib/fluent/counter/validator.rb +3 -3
  16. data/lib/fluent/engine.rb +1 -1
  17. data/lib/fluent/event.rb +8 -4
  18. data/lib/fluent/log.rb +9 -0
  19. data/lib/fluent/match.rb +1 -1
  20. data/lib/fluent/msgpack_factory.rb +6 -1
  21. data/lib/fluent/plugin/base.rb +1 -1
  22. data/lib/fluent/plugin/filter_record_transformer.rb +1 -1
  23. data/lib/fluent/plugin/in_forward.rb +1 -1
  24. data/lib/fluent/plugin/in_http.rb +8 -8
  25. data/lib/fluent/plugin/in_sample.rb +1 -1
  26. data/lib/fluent/plugin/in_tail/position_file.rb +32 -18
  27. data/lib/fluent/plugin/in_tail.rb +58 -24
  28. data/lib/fluent/plugin/in_tcp.rb +43 -0
  29. data/lib/fluent/plugin/out_exec_filter.rb +2 -2
  30. data/lib/fluent/plugin/output.rb +2 -2
  31. data/lib/fluent/plugin/parser_json.rb +1 -1
  32. data/lib/fluent/plugin_helper/event_loop.rb +2 -2
  33. data/lib/fluent/plugin_helper/record_accessor.rb +1 -1
  34. data/lib/fluent/plugin_helper/server.rb +8 -0
  35. data/lib/fluent/plugin_helper/thread.rb +3 -3
  36. data/lib/fluent/plugin_id.rb +1 -1
  37. data/lib/fluent/supervisor.rb +1 -1
  38. data/lib/fluent/version.rb +1 -1
  39. data/templates/new_gem/test/helper.rb.erb +0 -1
  40. data/test/plugin/in_tail/test_position_file.rb +31 -1
  41. data/test/plugin/test_base.rb +1 -1
  42. data/test/plugin/test_buffer_chunk.rb +11 -0
  43. data/test/plugin/test_in_forward.rb +9 -9
  44. data/test/plugin/test_in_tail.rb +379 -0
  45. data/test/plugin/test_in_tcp.rb +74 -4
  46. data/test/plugin/test_in_udp.rb +28 -0
  47. data/test/plugin/test_in_unix.rb +2 -2
  48. data/test/plugin/test_multi_output.rb +1 -1
  49. data/test/plugin/test_out_exec_filter.rb +2 -2
  50. data/test/plugin/test_out_file.rb +2 -2
  51. data/test/plugin/test_output.rb +12 -12
  52. data/test/plugin/test_output_as_buffered.rb +44 -44
  53. data/test/plugin/test_output_as_buffered_compress.rb +32 -18
  54. data/test/plugin/test_output_as_buffered_retries.rb +1 -1
  55. data/test/plugin/test_output_as_buffered_secondary.rb +2 -2
  56. data/test/plugin_helper/test_child_process.rb +2 -2
  57. data/test/plugin_helper/test_server.rb +50 -1
  58. data/test/test_log.rb +38 -1
  59. data/test/test_msgpack_factory.rb +32 -0
  60. data/test/test_supervisor.rb +13 -0
  61. metadata +5 -4
@@ -370,17 +370,30 @@ module Fluent::Plugin
370
370
  def refresh_watchers
371
371
  target_paths_hash = expand_paths
372
372
  existence_paths_hash = existence_path
373
-
373
+
374
374
  log.debug {
375
375
  target_paths_str = target_paths_hash.collect { |key, target_info| target_info.path }.join(",")
376
376
  existence_paths_str = existence_paths_hash.collect { |key, target_info| target_info.path }.join(",")
377
377
  "tailing paths: target = #{target_paths_str} | existing = #{existence_paths_str}"
378
378
  }
379
379
 
380
- unwatched_hash = existence_paths_hash.reject {|key, value| target_paths_hash.key?(key)}
380
+ if !@follow_inodes
381
+ need_unwatch_in_stop_watchers = true
382
+ else
383
+ # When using @follow_inodes, need this to unwatch the rotated old inode when it disappears.
384
+ # After `update_watcher` detaches an old TailWatcher, the inode is lost from the `@tails`.
385
+ # So that inode can't be contained in `removed_hash`, and can't be unwatched by `stop_watchers`.
386
+ #
387
+ # This logic may work for `@follow_inodes false` too.
388
+ # Just limiting the case to supress the impact to existing logics.
389
+ @pf&.unwatch_removed_targets(target_paths_hash)
390
+ need_unwatch_in_stop_watchers = false
391
+ end
392
+
393
+ removed_hash = existence_paths_hash.reject {|key, value| target_paths_hash.key?(key)}
381
394
  added_hash = target_paths_hash.reject {|key, value| existence_paths_hash.key?(key)}
382
395
 
383
- stop_watchers(unwatched_hash, immediate: false, unwatched: true) unless unwatched_hash.empty?
396
+ stop_watchers(removed_hash, unwatched: need_unwatch_in_stop_watchers) unless removed_hash.empty?
384
397
  start_watchers(added_hash) unless added_hash.empty?
385
398
  @startup = false if @startup
386
399
  end
@@ -484,8 +497,26 @@ module Fluent::Plugin
484
497
  end
485
498
 
486
499
  # refresh_watchers calls @tails.keys so we don't use stop_watcher -> start_watcher sequence for safety.
487
- def update_watcher(target_info, pe)
488
- path = target_info.path
500
+ def update_watcher(tail_watcher, pe, new_inode)
501
+ # TODO we should use another callback for this.
502
+ # To supress impact to existing logics, limit the case to `@follow_inodes`.
503
+ # We may not need `@follow_inodes` condition.
504
+ if @follow_inodes && new_inode.nil?
505
+ # nil inode means the file disappeared, so we only need to stop it.
506
+ @tails.delete(tail_watcher.path)
507
+ # https://github.com/fluent/fluentd/pull/4237#issuecomment-1633358632
508
+ # Because of this problem, log duplication can occur during `rotate_wait`.
509
+ # Need to set `rotate_wait 0` for a workaround.
510
+ # Duplication will occur if `refresh_watcher` is called during the `rotate_wait`.
511
+ # In that case, `refresh_watcher` will add the new TailWatcher to tail the same target,
512
+ # and it causes the log duplication.
513
+ # (Other `detach_watcher_after_rotate_wait` may have the same problem.
514
+ # We need the mechanism not to add duplicated TailWathcer with detaching TailWatcher.)
515
+ detach_watcher_after_rotate_wait(tail_watcher, pe.read_inode)
516
+ return
517
+ end
518
+
519
+ path = tail_watcher.path
489
520
 
490
521
  log.info("detected rotation of #{path}; waiting #{@rotate_wait} seconds")
491
522
 
@@ -499,23 +530,22 @@ module Fluent::Plugin
499
530
  end
500
531
  end
501
532
 
502
- rotated_tw = @tails[path]
533
+ new_target_info = TargetInfo.new(path, new_inode)
503
534
 
504
535
  if @follow_inodes
505
- new_position_entry = @pf[target_info]
506
-
536
+ new_position_entry = @pf[new_target_info]
537
+ # If `refresh_watcher` find the new file before, this will not be zero.
538
+ # In this case, only we have to do is detaching the current tail_watcher.
507
539
  if new_position_entry.read_inode == 0
508
- # When follow_inodes is true, it's not cleaned up by refresh_watcher.
509
- # So it should be unwatched here explicitly.
510
- rotated_tw.unwatched = true if rotated_tw
511
- @tails[path] = setup_watcher(target_info, new_position_entry)
540
+ @tails[path] = setup_watcher(new_target_info, new_position_entry)
512
541
  @tails[path].on_notify
513
542
  end
514
543
  else
515
- @tails[path] = setup_watcher(target_info, pe)
544
+ @tails[path] = setup_watcher(new_target_info, pe)
516
545
  @tails[path].on_notify
517
546
  end
518
- detach_watcher_after_rotate_wait(rotated_tw, pe.read_inode) if rotated_tw
547
+
548
+ detach_watcher_after_rotate_wait(tail_watcher, pe.read_inode)
519
549
  end
520
550
 
521
551
  # TailWatcher#close is called by another thread at shutdown phase.
@@ -523,6 +553,10 @@ module Fluent::Plugin
523
553
  # so adding close_io argument to avoid this problem.
524
554
  # At shutdown, IOHandler's io will be released automatically after detached the event loop
525
555
  def detach_watcher(tw, ino, close_io = true)
556
+ if @follow_inodes && tw.ino != ino
557
+ log.warn("detach_watcher could be detaching an unexpected tail_watcher with a different ino.",
558
+ path: tw.path, actual_ino_in_tw: tw.ino, expect_ino_to_close: ino)
559
+ end
526
560
  tw.watchers.each do |watcher|
527
561
  event_loop_detach(watcher)
528
562
  end
@@ -778,7 +812,7 @@ module Fluent::Plugin
778
812
  attr_accessor :group_watcher
779
813
 
780
814
  def tag
781
- @parsed_tag ||= @path.tr('/', '.').gsub(/\.+/, '.').gsub(/^\./, '')
815
+ @parsed_tag ||= @path.tr('/', '.').squeeze('.').gsub(/^\./, '')
782
816
  end
783
817
 
784
818
  def register_watcher(watcher)
@@ -874,21 +908,21 @@ module Fluent::Plugin
874
908
 
875
909
  if watcher_needs_update
876
910
  if @follow_inodes
877
- # No need to update a watcher if stat is nil (file not present), because moving to inodes will create
878
- # new watcher, and old watcher will be closed by stop_watcher in refresh_watchers method
879
- # don't want to swap state because we need latest read offset in pos file even after rotate_wait
880
- if stat
881
- target_info = TargetInfo.new(@path, stat.ino)
882
- @update_watcher.call(target_info, @pe)
883
- end
911
+ # If stat is nil (file not present), NEED to stop and discard this watcher.
912
+ # When the file is disappeared but is resurrected soon, then `#refresh_watcher`
913
+ # can't recognize this TailWatcher needs to be stopped.
914
+ # This can happens when the file is rotated.
915
+ # If a notify comes before the new file for the path is created during rotation,
916
+ # then it appears as if the file was resurrected once it disappeared.
917
+ # Don't want to swap state because we need latest read offset in pos file even after rotate_wait
918
+ @update_watcher.call(self, @pe, stat&.ino)
884
919
  else
885
920
  # Permit to handle if stat is nil (file not present).
886
921
  # If a file is mv-ed and a new file is created during
887
922
  # calling `#refresh_watchers`s, and `#refresh_watchers` won't run `#start_watchers`
888
923
  # and `#stop_watchers()` for the path because `target_paths_hash`
889
924
  # always contains the path.
890
- target_info = TargetInfo.new(@path, stat ? stat.ino : nil)
891
- @update_watcher.call(target_info, swap_state(@pe))
925
+ @update_watcher.call(self, swap_state(@pe), stat&.ino)
892
926
  end
893
927
  else
894
928
  @log.info "detected rotation of #{@path}"
@@ -36,6 +36,10 @@ module Fluent::Plugin
36
36
  desc "The field name of the client's address."
37
37
  config_param :source_address_key, :string, default: nil
38
38
 
39
+ # Setting default to nil for backward compatibility
40
+ desc "The max bytes of message."
41
+ config_param :message_length_limit, :size, default: nil
42
+
39
43
  config_param :blocking_timeout, :time, default: 0.5
40
44
 
41
45
  desc 'The payload is read up to this character.'
@@ -102,6 +106,7 @@ module Fluent::Plugin
102
106
 
103
107
  log.info "listening tcp socket", bind: @bind, port: @port
104
108
  del_size = @delimiter.length
109
+ discard_till_next_delimiter = false
105
110
  if @_extract_enabled && @_extract_tag_key
106
111
  server_create(:in_tcp_server_single_emit, @port, bind: @bind, resolve_name: !!@source_hostname_key, send_keepalive_packet: @send_keepalive_packet) do |data, conn|
107
112
  unless check_client(conn)
@@ -116,6 +121,16 @@ module Fluent::Plugin
116
121
  msg = buf[pos...i]
117
122
  pos = i + del_size
118
123
 
124
+ if discard_till_next_delimiter
125
+ discard_till_next_delimiter = false
126
+ next
127
+ end
128
+
129
+ if !@message_length_limit.nil? && @message_length_limit < msg.bytesize
130
+ log.info "The received data is larger than 'message_length_limit', dropped:", limit: @message_length_limit, size: msg.bytesize, head: msg[...32]
131
+ next
132
+ end
133
+
119
134
  @parser.parse(msg) do |time, record|
120
135
  unless time && record
121
136
  log.warn "pattern not matched", message: msg
@@ -131,6 +146,15 @@ module Fluent::Plugin
131
146
  end
132
147
  end
133
148
  buf.slice!(0, pos) if pos > 0
149
+ # If the buffer size exceeds the limit here, it means that the next message will definitely exceed the limit.
150
+ # So we should clear the buffer here. Otherwise, it will keep storing useless data until the next delimiter comes.
151
+ if !@message_length_limit.nil? && @message_length_limit < buf.bytesize
152
+ log.info "The buffer size exceeds 'message_length_limit', cleared:", limit: @message_length_limit, size: buf.bytesize, head: buf[...32]
153
+ buf.clear
154
+ # We should discard the subsequent data until the next delimiter comes.
155
+ discard_till_next_delimiter = true
156
+ next
157
+ end
134
158
  end
135
159
  else
136
160
  server_create(:in_tcp_server_batch_emit, @port, bind: @bind, resolve_name: !!@source_hostname_key, send_keepalive_packet: @send_keepalive_packet) do |data, conn|
@@ -147,6 +171,16 @@ module Fluent::Plugin
147
171
  msg = buf[pos...i]
148
172
  pos = i + del_size
149
173
 
174
+ if discard_till_next_delimiter
175
+ discard_till_next_delimiter = false
176
+ next
177
+ end
178
+
179
+ if !@message_length_limit.nil? && @message_length_limit < msg.bytesize
180
+ log.info "The received data is larger than 'message_length_limit', dropped:", limit: @message_length_limit, size: msg.bytesize, head: msg[...32]
181
+ next
182
+ end
183
+
150
184
  @parser.parse(msg) do |time, record|
151
185
  unless time && record
152
186
  log.warn "pattern not matched", message: msg
@@ -161,6 +195,15 @@ module Fluent::Plugin
161
195
  end
162
196
  router.emit_stream(@tag, es)
163
197
  buf.slice!(0, pos) if pos > 0
198
+ # If the buffer size exceeds the limit here, it means that the next message will definitely exceed the limit.
199
+ # So we should clear the buffer here. Otherwise, it will keep storing useless data until the next delimiter comes.
200
+ if !@message_length_limit.nil? && @message_length_limit < buf.bytesize
201
+ log.info "The buffer size exceeds 'message_length_limit', cleared:", limit: @message_length_limit, size: buf.bytesize, head: buf[...32]
202
+ buf.clear
203
+ # We should discard the subsequent data until the next delimiter comes.
204
+ discard_till_next_delimiter = true
205
+ next
206
+ end
164
207
  end
165
208
  end
166
209
  end
@@ -163,7 +163,7 @@ module Fluent::Plugin
163
163
  0
164
164
  elsif (@child_respawn == 'inf') || (@child_respawn == '-1')
165
165
  -1
166
- elsif @child_respawn =~ /^\d+$/
166
+ elsif /^\d+$/.match?(@child_respawn)
167
167
  @child_respawn.to_i
168
168
  else
169
169
  raise ConfigError, "child_respawn option argument invalid: none(or 0), inf(or -1) or positive number"
@@ -187,7 +187,7 @@ module Fluent::Plugin
187
187
  @rr = 0
188
188
 
189
189
  exit_callback = ->(status){
190
- c = @children.select{|child| child.pid == status.pid }.first
190
+ c = @children.find{|child| child.pid == status.pid }
191
191
  if c
192
192
  unless self.stopped?
193
193
  log.warn "child process exits with error code", code: status.to_i, status: status.exitstatus, signal: status.termsig
@@ -426,7 +426,7 @@ module Fluent
426
426
  end
427
427
  @secondary.acts_as_secondary(self)
428
428
  @secondary.configure(secondary_conf)
429
- if (@secondary.class != SecondaryFileOutput) &&
429
+ if (@secondary.class.to_s != "Fluent::Plugin::SecondaryFileOutput") &&
430
430
  (self.class != @secondary.class) &&
431
431
  (@custom_format || @secondary.implement?(:custom_format))
432
432
  log.warn "Use different plugin for secondary. Check the plugin works with primary like secondary_file", primary: self.class.to_s, secondary: @secondary.class.to_s
@@ -824,7 +824,7 @@ module Fluent
824
824
  if str.include?('${tag}')
825
825
  rvalue = rvalue.gsub('${tag}', metadata.tag)
826
826
  end
827
- if str =~ CHUNK_TAG_PLACEHOLDER_PATTERN
827
+ if CHUNK_TAG_PLACEHOLDER_PATTERN.match?(str)
828
828
  hash = {}
829
829
  tag_parts = metadata.tag.split('.')
830
830
  tag_parts.each_with_index do |part, i|
@@ -60,7 +60,7 @@ module Fluent
60
60
  rescue LoadError => ex
61
61
  name = :yajl
62
62
  if log
63
- if /\boj\z/ =~ ex.message
63
+ if /\boj\z/.match?(ex.message)
64
64
  log.info "Oj is not installed, and failing back to Yajl for json parser"
65
65
  else
66
66
  log.warn ex.message
@@ -99,7 +99,7 @@ module Fluent
99
99
 
100
100
  def shutdown
101
101
  @_event_loop_mutex.synchronize do
102
- @_event_loop_attached_watchers.reverse.each do |w|
102
+ @_event_loop_attached_watchers.reverse_each do |w|
103
103
  if w.attached?
104
104
  begin
105
105
  w.detach
@@ -116,7 +116,7 @@ module Fluent
116
116
  def after_shutdown
117
117
  timeout_at = Fluent::Clock.now + EVENT_LOOP_SHUTDOWN_TIMEOUT
118
118
  @_event_loop_mutex.synchronize do
119
- @_event_loop.watchers.reverse.each do |w|
119
+ @_event_loop.watchers.reverse_each do |w|
120
120
  begin
121
121
  w.detach
122
122
  rescue => e
@@ -119,7 +119,7 @@ module Fluent
119
119
  def self.validate_dot_keys(keys)
120
120
  keys.each { |key|
121
121
  next unless key.is_a?(String)
122
- if /\s+/.match(key)
122
+ if /\s+/.match?(key)
123
123
  raise Fluent::ConfigError, "whitespace character is not allowed in dot notation. Use bracket notation: #{key}"
124
124
  end
125
125
  }
@@ -545,6 +545,10 @@ module Fluent
545
545
  data = @sock.recv(@max_bytes, @flags)
546
546
  rescue Errno::EAGAIN, Errno::EWOULDBLOCK, Errno::EINTR, Errno::ECONNRESET, IOError, Errno::EBADF
547
547
  return
548
+ rescue Errno::EMSGSIZE
549
+ # Windows ONLY: This happens when the data size is larger than `@max_bytes`.
550
+ @log.info "A received data was ignored since it was too large."
551
+ return
548
552
  end
549
553
  @callback.call(data)
550
554
  rescue => e
@@ -558,6 +562,10 @@ module Fluent
558
562
  data, addr = @sock.recvfrom(@max_bytes)
559
563
  rescue Errno::EAGAIN, Errno::EWOULDBLOCK, Errno::EINTR, Errno::ECONNRESET, IOError, Errno::EBADF
560
564
  return
565
+ rescue Errno::EMSGSIZE
566
+ # Windows ONLY: This happens when the data size is larger than `@max_bytes`.
567
+ @log.info "A received data was ignored since it was too large."
568
+ return
561
569
  end
562
570
  @callback.call(data, UDPCallbackSocket.new(@sock, addr, close_socket: @close_socket))
563
571
  rescue => e
@@ -101,16 +101,16 @@ module Fluent
101
101
  end
102
102
 
103
103
  def thread_exist?(title)
104
- @_threads.values.select{|thread| title == thread[:_fluentd_plugin_helper_thread_title] }.size > 0
104
+ @_threads.values.count{|thread| title == thread[:_fluentd_plugin_helper_thread_title] } > 0
105
105
  end
106
106
 
107
107
  def thread_started?(title)
108
- t = @_threads.values.select{|thread| title == thread[:_fluentd_plugin_helper_thread_title] }.first
108
+ t = @_threads.values.find{|thread| title == thread[:_fluentd_plugin_helper_thread_title] }
109
109
  t && t[:_fluentd_plugin_helper_thread_started]
110
110
  end
111
111
 
112
112
  def thread_running?(title)
113
- t = @_threads.values.select{|thread| title == thread[:_fluentd_plugin_helper_thread_title] }.first
113
+ t = @_threads.values.find{|thread| title == thread[:_fluentd_plugin_helper_thread_title] }
114
114
  t && t[:_fluentd_plugin_helper_thread_running]
115
115
  end
116
116
 
@@ -49,7 +49,7 @@ module Fluent
49
49
  # Thread::Backtrace::Location#path returns base filename or absolute path.
50
50
  # #absolute_path returns absolute_path always.
51
51
  # https://bugs.ruby-lang.org/issues/12159
52
- if location.absolute_path =~ /\/test_[^\/]+\.rb$/ # location.path =~ /test_.+\.rb$/
52
+ if /\/test_[^\/]+\.rb$/.match?(location.absolute_path) # location.path =~ /test_.+\.rb$/
53
53
  return true
54
54
  end
55
55
  end
@@ -697,7 +697,7 @@ module Fluent
697
697
  actual_log_path = @log_path
698
698
 
699
699
  # We need to prepare a unique path for each worker since Windows locks files.
700
- if Fluent.windows? && rotate
700
+ if Fluent.windows? && rotate && @log_path && @log_path != "-"
701
701
  actual_log_path = Fluent::Log.per_process_path(@log_path, process_type, worker_id)
702
702
  end
703
703
 
@@ -16,6 +16,6 @@
16
16
 
17
17
  module Fluent
18
18
 
19
- VERSION = '1.16.0'
19
+ VERSION = '1.16.2'
20
20
 
21
21
  end
@@ -1,4 +1,3 @@
1
- $LOAD_PATH.unshift(File.expand_path("../../", __FILE__))
2
1
  require "test-unit"
3
2
  require "fluent/test"
4
3
  require "fluent/test/driver/<%= type %>"
@@ -26,6 +26,10 @@ class IntailPositionFileTest < Test::Unit::TestCase
26
26
  "valid_path" => Fluent::Plugin::TailInput::TargetInfo.new("valid_path", 1),
27
27
  "inode23bit" => Fluent::Plugin::TailInput::TargetInfo.new("inode23bit", 0),
28
28
  }
29
+ TEST_CONTENT_INODES = {
30
+ 1 => Fluent::Plugin::TailInput::TargetInfo.new("valid_path", 1),
31
+ 0 => Fluent::Plugin::TailInput::TargetInfo.new("inode23bit", 0),
32
+ }
29
33
 
30
34
  def write_data(f, content)
31
35
  f.write(content)
@@ -221,7 +225,7 @@ class IntailPositionFileTest < Test::Unit::TestCase
221
225
  end
222
226
 
223
227
  sub_test_case '#unwatch' do
224
- test 'deletes entry by path' do
228
+ test 'unwatch entry by path' do
225
229
  write_data(@file, TEST_CONTENT)
226
230
  pf = Fluent::Plugin::TailInput::PositionFile.load(@file, false, {}, logger: $log)
227
231
  inode1 = File.stat(@file).ino
@@ -239,6 +243,32 @@ class IntailPositionFileTest < Test::Unit::TestCase
239
243
 
240
244
  assert_not_equal p1, p2
241
245
  end
246
+
247
+ test 'unwatch entries by inode' do
248
+ write_data(@file, TEST_CONTENT)
249
+ pf = Fluent::Plugin::TailInput::PositionFile.load(@file, true, TEST_CONTENT_INODES, logger: $log)
250
+
251
+ existing_targets = TEST_CONTENT_INODES.select do |inode, target_info|
252
+ inode == 1
253
+ end
254
+ pe_to_unwatch = pf[TEST_CONTENT_INODES[0]]
255
+
256
+ pf.unwatch_removed_targets(existing_targets)
257
+
258
+ assert_equal(
259
+ {
260
+ map_keys: [TEST_CONTENT_INODES[1].ino],
261
+ unwatched_pe_pos: Fluent::Plugin::TailInput::PositionFile::UNWATCHED_POSITION,
262
+ },
263
+ {
264
+ map_keys: pf.instance_variable_get(:@map).keys,
265
+ unwatched_pe_pos: pe_to_unwatch.read_pos,
266
+ }
267
+ )
268
+
269
+ unwatched_pe_retaken = pf[TEST_CONTENT_INODES[0]]
270
+ assert_not_equal pe_to_unwatch, unwatched_pe_retaken
271
+ end
242
272
  end
243
273
 
244
274
  sub_test_case 'FilePositionEntry' do
@@ -108,7 +108,7 @@ class BaseTest < Test::Unit::TestCase
108
108
  @p.extend m
109
109
  assert_equal [], logger.logs
110
110
 
111
- ret = @p.string_safe_encoding("abc\xff.\x01f"){|s| s.split(/\./) }
111
+ ret = @p.string_safe_encoding("abc\xff.\x01f"){|s| s.split(".") }
112
112
  assert_equal ['abc?', "\u0001f"], ret
113
113
  assert_equal 1, logger.logs.size
114
114
  assert{ logger.logs.first.include?("invalid byte sequence is replaced in ") }
@@ -57,6 +57,17 @@ class BufferChunkTest < Test::Unit::TestCase
57
57
  assert chunk.respond_to?(:msgpack_each)
58
58
  end
59
59
 
60
+ test 'unpacker arg is not implemented for ChunkMessagePackEventStreamer' do
61
+ meta = Object.new
62
+ chunk = Fluent::Plugin::Buffer::Chunk.new(meta)
63
+ chunk.extend Fluent::ChunkMessagePackEventStreamer
64
+
65
+ unpacker = Fluent::MessagePackFactory.thread_local_msgpack_unpacker
66
+
67
+ assert_raise(NotImplementedError){ chunk.each(unpacker: unpacker) }
68
+ assert_raise(NotImplementedError){ chunk.msgpack_each(unpacker: unpacker) }
69
+ end
70
+
60
71
  test 'some methods raise ArgumentError with an option of `compressed: :gzip` and without extending Compressble`' do
61
72
  meta = Object.new
62
73
  chunk = Fluent::Plugin::Buffer::Chunk.new(meta)
@@ -367,7 +367,7 @@ class ForwardInputTest < Test::Unit::TestCase
367
367
  end
368
368
 
369
369
  logs = d.instance.log.out.logs
370
- assert{ logs.select{|line| line =~ /skip invalid event/ }.size == 2 }
370
+ assert{ logs.count{|line| line =~ /skip invalid event/ } == 2 }
371
371
 
372
372
  d.instance_shutdown
373
373
  end
@@ -593,10 +593,10 @@ class ForwardInputTest < Test::Unit::TestCase
593
593
 
594
594
  # check log
595
595
  logs = d.instance.log.logs
596
- assert_equal 1, logs.select{|line|
596
+ assert_equal 1, logs.count{|line|
597
597
  line =~ / \[warn\]: Input chunk size is larger than 'chunk_size_warn_limit':/ &&
598
598
  line =~ / tag="test.tag" host="#{LOCALHOST_HOSTNAME}" limit=16777216 size=16777501/
599
- }.size, "large chunk warning is not logged"
599
+ }, "large chunk warning is not logged"
600
600
 
601
601
  d.instance_shutdown
602
602
  end
@@ -619,10 +619,10 @@ class ForwardInputTest < Test::Unit::TestCase
619
619
 
620
620
  # check log
621
621
  logs = d.instance.log.logs
622
- assert_equal 1, logs.select{ |line|
622
+ assert_equal 1, logs.count{ |line|
623
623
  line =~ / \[warn\]: Input chunk size is larger than 'chunk_size_warn_limit':/ &&
624
624
  line =~ / tag="test.tag" host="#{LOCALHOST_HOSTNAME}" limit=16777216 size=16777501/
625
- }.size, "large chunk warning is not logged"
625
+ }, "large chunk warning is not logged"
626
626
 
627
627
  d.instance_shutdown
628
628
  end
@@ -653,10 +653,10 @@ class ForwardInputTest < Test::Unit::TestCase
653
653
 
654
654
  # check log
655
655
  logs = d.instance.log.logs
656
- assert_equal 1, logs.select{|line|
656
+ assert_equal 1, logs.count{|line|
657
657
  line =~ / \[warn\]: Input chunk size is larger than 'chunk_size_limit', dropped:/ &&
658
658
  line =~ / tag="test.tag" host="#{LOCALHOST_HOSTNAME}" limit=33554432 size=33554989/
659
- }.size, "large chunk warning is not logged"
659
+ }, "large chunk warning is not logged"
660
660
 
661
661
  d.instance_shutdown
662
662
  end
@@ -676,9 +676,9 @@ class ForwardInputTest < Test::Unit::TestCase
676
676
 
677
677
  # check log
678
678
  logs = d.instance.log.logs
679
- assert_equal 1, logs.select{|line|
679
+ assert_equal 1, logs.count{|line|
680
680
  line =~ / \[warn\]: incoming chunk is broken: host="#{LOCALHOST_HOSTNAME}" msg=#{data.inspect}/
681
- }.size, "should not accept broken chunk"
681
+ }, "should not accept broken chunk"
682
682
 
683
683
  d.instance_shutdown
684
684
  end