fluentd 1.16.0-x64-mingw-ucrt → 1.16.2-x64-mingw-ucrt
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.github/ISSUE_TEMPLATE/bug_report.yaml +1 -0
- data/.github/ISSUE_TEMPLATE/feature_request.yaml +1 -0
- data/.github/workflows/stale-actions.yml +24 -0
- data/CHANGELOG.md +74 -0
- data/CONTRIBUTING.md +1 -1
- data/MAINTAINERS.md +3 -3
- data/SECURITY.md +5 -9
- data/fluentd.gemspec +1 -1
- data/lib/fluent/command/ctl.rb +2 -2
- data/lib/fluent/command/plugin_config_formatter.rb +1 -1
- data/lib/fluent/config/dsl.rb +1 -1
- data/lib/fluent/config/v1_parser.rb +2 -2
- data/lib/fluent/counter/server.rb +1 -1
- data/lib/fluent/counter/validator.rb +3 -3
- data/lib/fluent/engine.rb +1 -1
- data/lib/fluent/event.rb +8 -4
- data/lib/fluent/log.rb +9 -0
- data/lib/fluent/match.rb +1 -1
- data/lib/fluent/msgpack_factory.rb +6 -1
- data/lib/fluent/plugin/base.rb +1 -1
- data/lib/fluent/plugin/filter_record_transformer.rb +1 -1
- data/lib/fluent/plugin/in_forward.rb +1 -1
- data/lib/fluent/plugin/in_http.rb +8 -8
- data/lib/fluent/plugin/in_sample.rb +1 -1
- data/lib/fluent/plugin/in_tail/position_file.rb +32 -18
- data/lib/fluent/plugin/in_tail.rb +58 -24
- data/lib/fluent/plugin/in_tcp.rb +43 -0
- data/lib/fluent/plugin/out_exec_filter.rb +2 -2
- data/lib/fluent/plugin/output.rb +2 -2
- data/lib/fluent/plugin/parser_json.rb +1 -1
- data/lib/fluent/plugin_helper/event_loop.rb +2 -2
- data/lib/fluent/plugin_helper/record_accessor.rb +1 -1
- data/lib/fluent/plugin_helper/server.rb +8 -0
- data/lib/fluent/plugin_helper/thread.rb +3 -3
- data/lib/fluent/plugin_id.rb +1 -1
- data/lib/fluent/supervisor.rb +1 -1
- data/lib/fluent/version.rb +1 -1
- data/templates/new_gem/test/helper.rb.erb +0 -1
- data/test/plugin/in_tail/test_position_file.rb +31 -1
- data/test/plugin/test_base.rb +1 -1
- data/test/plugin/test_buffer_chunk.rb +11 -0
- data/test/plugin/test_in_forward.rb +9 -9
- data/test/plugin/test_in_tail.rb +379 -0
- data/test/plugin/test_in_tcp.rb +74 -4
- data/test/plugin/test_in_udp.rb +28 -0
- data/test/plugin/test_in_unix.rb +2 -2
- data/test/plugin/test_multi_output.rb +1 -1
- data/test/plugin/test_out_exec_filter.rb +2 -2
- data/test/plugin/test_out_file.rb +2 -2
- data/test/plugin/test_output.rb +12 -12
- data/test/plugin/test_output_as_buffered.rb +44 -44
- data/test/plugin/test_output_as_buffered_compress.rb +32 -18
- data/test/plugin/test_output_as_buffered_retries.rb +1 -1
- data/test/plugin/test_output_as_buffered_secondary.rb +2 -2
- data/test/plugin_helper/test_child_process.rb +2 -2
- data/test/plugin_helper/test_server.rb +50 -1
- data/test/test_log.rb +38 -1
- data/test/test_msgpack_factory.rb +32 -0
- data/test/test_supervisor.rb +13 -0
- metadata +5 -4
@@ -370,17 +370,30 @@ module Fluent::Plugin
|
|
370
370
|
def refresh_watchers
|
371
371
|
target_paths_hash = expand_paths
|
372
372
|
existence_paths_hash = existence_path
|
373
|
-
|
373
|
+
|
374
374
|
log.debug {
|
375
375
|
target_paths_str = target_paths_hash.collect { |key, target_info| target_info.path }.join(",")
|
376
376
|
existence_paths_str = existence_paths_hash.collect { |key, target_info| target_info.path }.join(",")
|
377
377
|
"tailing paths: target = #{target_paths_str} | existing = #{existence_paths_str}"
|
378
378
|
}
|
379
379
|
|
380
|
-
|
380
|
+
if !@follow_inodes
|
381
|
+
need_unwatch_in_stop_watchers = true
|
382
|
+
else
|
383
|
+
# When using @follow_inodes, need this to unwatch the rotated old inode when it disappears.
|
384
|
+
# After `update_watcher` detaches an old TailWatcher, the inode is lost from the `@tails`.
|
385
|
+
# So that inode can't be contained in `removed_hash`, and can't be unwatched by `stop_watchers`.
|
386
|
+
#
|
387
|
+
# This logic may work for `@follow_inodes false` too.
|
388
|
+
# Just limiting the case to supress the impact to existing logics.
|
389
|
+
@pf&.unwatch_removed_targets(target_paths_hash)
|
390
|
+
need_unwatch_in_stop_watchers = false
|
391
|
+
end
|
392
|
+
|
393
|
+
removed_hash = existence_paths_hash.reject {|key, value| target_paths_hash.key?(key)}
|
381
394
|
added_hash = target_paths_hash.reject {|key, value| existence_paths_hash.key?(key)}
|
382
395
|
|
383
|
-
stop_watchers(
|
396
|
+
stop_watchers(removed_hash, unwatched: need_unwatch_in_stop_watchers) unless removed_hash.empty?
|
384
397
|
start_watchers(added_hash) unless added_hash.empty?
|
385
398
|
@startup = false if @startup
|
386
399
|
end
|
@@ -484,8 +497,26 @@ module Fluent::Plugin
|
|
484
497
|
end
|
485
498
|
|
486
499
|
# refresh_watchers calls @tails.keys so we don't use stop_watcher -> start_watcher sequence for safety.
|
487
|
-
def update_watcher(
|
488
|
-
|
500
|
+
def update_watcher(tail_watcher, pe, new_inode)
|
501
|
+
# TODO we should use another callback for this.
|
502
|
+
# To supress impact to existing logics, limit the case to `@follow_inodes`.
|
503
|
+
# We may not need `@follow_inodes` condition.
|
504
|
+
if @follow_inodes && new_inode.nil?
|
505
|
+
# nil inode means the file disappeared, so we only need to stop it.
|
506
|
+
@tails.delete(tail_watcher.path)
|
507
|
+
# https://github.com/fluent/fluentd/pull/4237#issuecomment-1633358632
|
508
|
+
# Because of this problem, log duplication can occur during `rotate_wait`.
|
509
|
+
# Need to set `rotate_wait 0` for a workaround.
|
510
|
+
# Duplication will occur if `refresh_watcher` is called during the `rotate_wait`.
|
511
|
+
# In that case, `refresh_watcher` will add the new TailWatcher to tail the same target,
|
512
|
+
# and it causes the log duplication.
|
513
|
+
# (Other `detach_watcher_after_rotate_wait` may have the same problem.
|
514
|
+
# We need the mechanism not to add duplicated TailWathcer with detaching TailWatcher.)
|
515
|
+
detach_watcher_after_rotate_wait(tail_watcher, pe.read_inode)
|
516
|
+
return
|
517
|
+
end
|
518
|
+
|
519
|
+
path = tail_watcher.path
|
489
520
|
|
490
521
|
log.info("detected rotation of #{path}; waiting #{@rotate_wait} seconds")
|
491
522
|
|
@@ -499,23 +530,22 @@ module Fluent::Plugin
|
|
499
530
|
end
|
500
531
|
end
|
501
532
|
|
502
|
-
|
533
|
+
new_target_info = TargetInfo.new(path, new_inode)
|
503
534
|
|
504
535
|
if @follow_inodes
|
505
|
-
new_position_entry = @pf[
|
506
|
-
|
536
|
+
new_position_entry = @pf[new_target_info]
|
537
|
+
# If `refresh_watcher` find the new file before, this will not be zero.
|
538
|
+
# In this case, only we have to do is detaching the current tail_watcher.
|
507
539
|
if new_position_entry.read_inode == 0
|
508
|
-
|
509
|
-
# So it should be unwatched here explicitly.
|
510
|
-
rotated_tw.unwatched = true if rotated_tw
|
511
|
-
@tails[path] = setup_watcher(target_info, new_position_entry)
|
540
|
+
@tails[path] = setup_watcher(new_target_info, new_position_entry)
|
512
541
|
@tails[path].on_notify
|
513
542
|
end
|
514
543
|
else
|
515
|
-
@tails[path] = setup_watcher(
|
544
|
+
@tails[path] = setup_watcher(new_target_info, pe)
|
516
545
|
@tails[path].on_notify
|
517
546
|
end
|
518
|
-
|
547
|
+
|
548
|
+
detach_watcher_after_rotate_wait(tail_watcher, pe.read_inode)
|
519
549
|
end
|
520
550
|
|
521
551
|
# TailWatcher#close is called by another thread at shutdown phase.
|
@@ -523,6 +553,10 @@ module Fluent::Plugin
|
|
523
553
|
# so adding close_io argument to avoid this problem.
|
524
554
|
# At shutdown, IOHandler's io will be released automatically after detached the event loop
|
525
555
|
def detach_watcher(tw, ino, close_io = true)
|
556
|
+
if @follow_inodes && tw.ino != ino
|
557
|
+
log.warn("detach_watcher could be detaching an unexpected tail_watcher with a different ino.",
|
558
|
+
path: tw.path, actual_ino_in_tw: tw.ino, expect_ino_to_close: ino)
|
559
|
+
end
|
526
560
|
tw.watchers.each do |watcher|
|
527
561
|
event_loop_detach(watcher)
|
528
562
|
end
|
@@ -778,7 +812,7 @@ module Fluent::Plugin
|
|
778
812
|
attr_accessor :group_watcher
|
779
813
|
|
780
814
|
def tag
|
781
|
-
@parsed_tag ||= @path.tr('/', '.').
|
815
|
+
@parsed_tag ||= @path.tr('/', '.').squeeze('.').gsub(/^\./, '')
|
782
816
|
end
|
783
817
|
|
784
818
|
def register_watcher(watcher)
|
@@ -874,21 +908,21 @@ module Fluent::Plugin
|
|
874
908
|
|
875
909
|
if watcher_needs_update
|
876
910
|
if @follow_inodes
|
877
|
-
#
|
878
|
-
#
|
879
|
-
#
|
880
|
-
|
881
|
-
|
882
|
-
|
883
|
-
|
911
|
+
# If stat is nil (file not present), NEED to stop and discard this watcher.
|
912
|
+
# When the file is disappeared but is resurrected soon, then `#refresh_watcher`
|
913
|
+
# can't recognize this TailWatcher needs to be stopped.
|
914
|
+
# This can happens when the file is rotated.
|
915
|
+
# If a notify comes before the new file for the path is created during rotation,
|
916
|
+
# then it appears as if the file was resurrected once it disappeared.
|
917
|
+
# Don't want to swap state because we need latest read offset in pos file even after rotate_wait
|
918
|
+
@update_watcher.call(self, @pe, stat&.ino)
|
884
919
|
else
|
885
920
|
# Permit to handle if stat is nil (file not present).
|
886
921
|
# If a file is mv-ed and a new file is created during
|
887
922
|
# calling `#refresh_watchers`s, and `#refresh_watchers` won't run `#start_watchers`
|
888
923
|
# and `#stop_watchers()` for the path because `target_paths_hash`
|
889
924
|
# always contains the path.
|
890
|
-
|
891
|
-
@update_watcher.call(target_info, swap_state(@pe))
|
925
|
+
@update_watcher.call(self, swap_state(@pe), stat&.ino)
|
892
926
|
end
|
893
927
|
else
|
894
928
|
@log.info "detected rotation of #{@path}"
|
data/lib/fluent/plugin/in_tcp.rb
CHANGED
@@ -36,6 +36,10 @@ module Fluent::Plugin
|
|
36
36
|
desc "The field name of the client's address."
|
37
37
|
config_param :source_address_key, :string, default: nil
|
38
38
|
|
39
|
+
# Setting default to nil for backward compatibility
|
40
|
+
desc "The max bytes of message."
|
41
|
+
config_param :message_length_limit, :size, default: nil
|
42
|
+
|
39
43
|
config_param :blocking_timeout, :time, default: 0.5
|
40
44
|
|
41
45
|
desc 'The payload is read up to this character.'
|
@@ -102,6 +106,7 @@ module Fluent::Plugin
|
|
102
106
|
|
103
107
|
log.info "listening tcp socket", bind: @bind, port: @port
|
104
108
|
del_size = @delimiter.length
|
109
|
+
discard_till_next_delimiter = false
|
105
110
|
if @_extract_enabled && @_extract_tag_key
|
106
111
|
server_create(:in_tcp_server_single_emit, @port, bind: @bind, resolve_name: !!@source_hostname_key, send_keepalive_packet: @send_keepalive_packet) do |data, conn|
|
107
112
|
unless check_client(conn)
|
@@ -116,6 +121,16 @@ module Fluent::Plugin
|
|
116
121
|
msg = buf[pos...i]
|
117
122
|
pos = i + del_size
|
118
123
|
|
124
|
+
if discard_till_next_delimiter
|
125
|
+
discard_till_next_delimiter = false
|
126
|
+
next
|
127
|
+
end
|
128
|
+
|
129
|
+
if !@message_length_limit.nil? && @message_length_limit < msg.bytesize
|
130
|
+
log.info "The received data is larger than 'message_length_limit', dropped:", limit: @message_length_limit, size: msg.bytesize, head: msg[...32]
|
131
|
+
next
|
132
|
+
end
|
133
|
+
|
119
134
|
@parser.parse(msg) do |time, record|
|
120
135
|
unless time && record
|
121
136
|
log.warn "pattern not matched", message: msg
|
@@ -131,6 +146,15 @@ module Fluent::Plugin
|
|
131
146
|
end
|
132
147
|
end
|
133
148
|
buf.slice!(0, pos) if pos > 0
|
149
|
+
# If the buffer size exceeds the limit here, it means that the next message will definitely exceed the limit.
|
150
|
+
# So we should clear the buffer here. Otherwise, it will keep storing useless data until the next delimiter comes.
|
151
|
+
if !@message_length_limit.nil? && @message_length_limit < buf.bytesize
|
152
|
+
log.info "The buffer size exceeds 'message_length_limit', cleared:", limit: @message_length_limit, size: buf.bytesize, head: buf[...32]
|
153
|
+
buf.clear
|
154
|
+
# We should discard the subsequent data until the next delimiter comes.
|
155
|
+
discard_till_next_delimiter = true
|
156
|
+
next
|
157
|
+
end
|
134
158
|
end
|
135
159
|
else
|
136
160
|
server_create(:in_tcp_server_batch_emit, @port, bind: @bind, resolve_name: !!@source_hostname_key, send_keepalive_packet: @send_keepalive_packet) do |data, conn|
|
@@ -147,6 +171,16 @@ module Fluent::Plugin
|
|
147
171
|
msg = buf[pos...i]
|
148
172
|
pos = i + del_size
|
149
173
|
|
174
|
+
if discard_till_next_delimiter
|
175
|
+
discard_till_next_delimiter = false
|
176
|
+
next
|
177
|
+
end
|
178
|
+
|
179
|
+
if !@message_length_limit.nil? && @message_length_limit < msg.bytesize
|
180
|
+
log.info "The received data is larger than 'message_length_limit', dropped:", limit: @message_length_limit, size: msg.bytesize, head: msg[...32]
|
181
|
+
next
|
182
|
+
end
|
183
|
+
|
150
184
|
@parser.parse(msg) do |time, record|
|
151
185
|
unless time && record
|
152
186
|
log.warn "pattern not matched", message: msg
|
@@ -161,6 +195,15 @@ module Fluent::Plugin
|
|
161
195
|
end
|
162
196
|
router.emit_stream(@tag, es)
|
163
197
|
buf.slice!(0, pos) if pos > 0
|
198
|
+
# If the buffer size exceeds the limit here, it means that the next message will definitely exceed the limit.
|
199
|
+
# So we should clear the buffer here. Otherwise, it will keep storing useless data until the next delimiter comes.
|
200
|
+
if !@message_length_limit.nil? && @message_length_limit < buf.bytesize
|
201
|
+
log.info "The buffer size exceeds 'message_length_limit', cleared:", limit: @message_length_limit, size: buf.bytesize, head: buf[...32]
|
202
|
+
buf.clear
|
203
|
+
# We should discard the subsequent data until the next delimiter comes.
|
204
|
+
discard_till_next_delimiter = true
|
205
|
+
next
|
206
|
+
end
|
164
207
|
end
|
165
208
|
end
|
166
209
|
end
|
@@ -163,7 +163,7 @@ module Fluent::Plugin
|
|
163
163
|
0
|
164
164
|
elsif (@child_respawn == 'inf') || (@child_respawn == '-1')
|
165
165
|
-1
|
166
|
-
elsif @child_respawn
|
166
|
+
elsif /^\d+$/.match?(@child_respawn)
|
167
167
|
@child_respawn.to_i
|
168
168
|
else
|
169
169
|
raise ConfigError, "child_respawn option argument invalid: none(or 0), inf(or -1) or positive number"
|
@@ -187,7 +187,7 @@ module Fluent::Plugin
|
|
187
187
|
@rr = 0
|
188
188
|
|
189
189
|
exit_callback = ->(status){
|
190
|
-
c = @children.
|
190
|
+
c = @children.find{|child| child.pid == status.pid }
|
191
191
|
if c
|
192
192
|
unless self.stopped?
|
193
193
|
log.warn "child process exits with error code", code: status.to_i, status: status.exitstatus, signal: status.termsig
|
data/lib/fluent/plugin/output.rb
CHANGED
@@ -426,7 +426,7 @@ module Fluent
|
|
426
426
|
end
|
427
427
|
@secondary.acts_as_secondary(self)
|
428
428
|
@secondary.configure(secondary_conf)
|
429
|
-
if (@secondary.class != SecondaryFileOutput) &&
|
429
|
+
if (@secondary.class.to_s != "Fluent::Plugin::SecondaryFileOutput") &&
|
430
430
|
(self.class != @secondary.class) &&
|
431
431
|
(@custom_format || @secondary.implement?(:custom_format))
|
432
432
|
log.warn "Use different plugin for secondary. Check the plugin works with primary like secondary_file", primary: self.class.to_s, secondary: @secondary.class.to_s
|
@@ -824,7 +824,7 @@ module Fluent
|
|
824
824
|
if str.include?('${tag}')
|
825
825
|
rvalue = rvalue.gsub('${tag}', metadata.tag)
|
826
826
|
end
|
827
|
-
if str
|
827
|
+
if CHUNK_TAG_PLACEHOLDER_PATTERN.match?(str)
|
828
828
|
hash = {}
|
829
829
|
tag_parts = metadata.tag.split('.')
|
830
830
|
tag_parts.each_with_index do |part, i|
|
@@ -99,7 +99,7 @@ module Fluent
|
|
99
99
|
|
100
100
|
def shutdown
|
101
101
|
@_event_loop_mutex.synchronize do
|
102
|
-
@_event_loop_attached_watchers.
|
102
|
+
@_event_loop_attached_watchers.reverse_each do |w|
|
103
103
|
if w.attached?
|
104
104
|
begin
|
105
105
|
w.detach
|
@@ -116,7 +116,7 @@ module Fluent
|
|
116
116
|
def after_shutdown
|
117
117
|
timeout_at = Fluent::Clock.now + EVENT_LOOP_SHUTDOWN_TIMEOUT
|
118
118
|
@_event_loop_mutex.synchronize do
|
119
|
-
@_event_loop.watchers.
|
119
|
+
@_event_loop.watchers.reverse_each do |w|
|
120
120
|
begin
|
121
121
|
w.detach
|
122
122
|
rescue => e
|
@@ -119,7 +119,7 @@ module Fluent
|
|
119
119
|
def self.validate_dot_keys(keys)
|
120
120
|
keys.each { |key|
|
121
121
|
next unless key.is_a?(String)
|
122
|
-
if /\s+/.match(key)
|
122
|
+
if /\s+/.match?(key)
|
123
123
|
raise Fluent::ConfigError, "whitespace character is not allowed in dot notation. Use bracket notation: #{key}"
|
124
124
|
end
|
125
125
|
}
|
@@ -545,6 +545,10 @@ module Fluent
|
|
545
545
|
data = @sock.recv(@max_bytes, @flags)
|
546
546
|
rescue Errno::EAGAIN, Errno::EWOULDBLOCK, Errno::EINTR, Errno::ECONNRESET, IOError, Errno::EBADF
|
547
547
|
return
|
548
|
+
rescue Errno::EMSGSIZE
|
549
|
+
# Windows ONLY: This happens when the data size is larger than `@max_bytes`.
|
550
|
+
@log.info "A received data was ignored since it was too large."
|
551
|
+
return
|
548
552
|
end
|
549
553
|
@callback.call(data)
|
550
554
|
rescue => e
|
@@ -558,6 +562,10 @@ module Fluent
|
|
558
562
|
data, addr = @sock.recvfrom(@max_bytes)
|
559
563
|
rescue Errno::EAGAIN, Errno::EWOULDBLOCK, Errno::EINTR, Errno::ECONNRESET, IOError, Errno::EBADF
|
560
564
|
return
|
565
|
+
rescue Errno::EMSGSIZE
|
566
|
+
# Windows ONLY: This happens when the data size is larger than `@max_bytes`.
|
567
|
+
@log.info "A received data was ignored since it was too large."
|
568
|
+
return
|
561
569
|
end
|
562
570
|
@callback.call(data, UDPCallbackSocket.new(@sock, addr, close_socket: @close_socket))
|
563
571
|
rescue => e
|
@@ -101,16 +101,16 @@ module Fluent
|
|
101
101
|
end
|
102
102
|
|
103
103
|
def thread_exist?(title)
|
104
|
-
@_threads.values.
|
104
|
+
@_threads.values.count{|thread| title == thread[:_fluentd_plugin_helper_thread_title] } > 0
|
105
105
|
end
|
106
106
|
|
107
107
|
def thread_started?(title)
|
108
|
-
t = @_threads.values.
|
108
|
+
t = @_threads.values.find{|thread| title == thread[:_fluentd_plugin_helper_thread_title] }
|
109
109
|
t && t[:_fluentd_plugin_helper_thread_started]
|
110
110
|
end
|
111
111
|
|
112
112
|
def thread_running?(title)
|
113
|
-
t = @_threads.values.
|
113
|
+
t = @_threads.values.find{|thread| title == thread[:_fluentd_plugin_helper_thread_title] }
|
114
114
|
t && t[:_fluentd_plugin_helper_thread_running]
|
115
115
|
end
|
116
116
|
|
data/lib/fluent/plugin_id.rb
CHANGED
@@ -49,7 +49,7 @@ module Fluent
|
|
49
49
|
# Thread::Backtrace::Location#path returns base filename or absolute path.
|
50
50
|
# #absolute_path returns absolute_path always.
|
51
51
|
# https://bugs.ruby-lang.org/issues/12159
|
52
|
-
if
|
52
|
+
if /\/test_[^\/]+\.rb$/.match?(location.absolute_path) # location.path =~ /test_.+\.rb$/
|
53
53
|
return true
|
54
54
|
end
|
55
55
|
end
|
data/lib/fluent/supervisor.rb
CHANGED
@@ -697,7 +697,7 @@ module Fluent
|
|
697
697
|
actual_log_path = @log_path
|
698
698
|
|
699
699
|
# We need to prepare a unique path for each worker since Windows locks files.
|
700
|
-
if Fluent.windows? && rotate
|
700
|
+
if Fluent.windows? && rotate && @log_path && @log_path != "-"
|
701
701
|
actual_log_path = Fluent::Log.per_process_path(@log_path, process_type, worker_id)
|
702
702
|
end
|
703
703
|
|
data/lib/fluent/version.rb
CHANGED
@@ -26,6 +26,10 @@ class IntailPositionFileTest < Test::Unit::TestCase
|
|
26
26
|
"valid_path" => Fluent::Plugin::TailInput::TargetInfo.new("valid_path", 1),
|
27
27
|
"inode23bit" => Fluent::Plugin::TailInput::TargetInfo.new("inode23bit", 0),
|
28
28
|
}
|
29
|
+
TEST_CONTENT_INODES = {
|
30
|
+
1 => Fluent::Plugin::TailInput::TargetInfo.new("valid_path", 1),
|
31
|
+
0 => Fluent::Plugin::TailInput::TargetInfo.new("inode23bit", 0),
|
32
|
+
}
|
29
33
|
|
30
34
|
def write_data(f, content)
|
31
35
|
f.write(content)
|
@@ -221,7 +225,7 @@ class IntailPositionFileTest < Test::Unit::TestCase
|
|
221
225
|
end
|
222
226
|
|
223
227
|
sub_test_case '#unwatch' do
|
224
|
-
test '
|
228
|
+
test 'unwatch entry by path' do
|
225
229
|
write_data(@file, TEST_CONTENT)
|
226
230
|
pf = Fluent::Plugin::TailInput::PositionFile.load(@file, false, {}, logger: $log)
|
227
231
|
inode1 = File.stat(@file).ino
|
@@ -239,6 +243,32 @@ class IntailPositionFileTest < Test::Unit::TestCase
|
|
239
243
|
|
240
244
|
assert_not_equal p1, p2
|
241
245
|
end
|
246
|
+
|
247
|
+
test 'unwatch entries by inode' do
|
248
|
+
write_data(@file, TEST_CONTENT)
|
249
|
+
pf = Fluent::Plugin::TailInput::PositionFile.load(@file, true, TEST_CONTENT_INODES, logger: $log)
|
250
|
+
|
251
|
+
existing_targets = TEST_CONTENT_INODES.select do |inode, target_info|
|
252
|
+
inode == 1
|
253
|
+
end
|
254
|
+
pe_to_unwatch = pf[TEST_CONTENT_INODES[0]]
|
255
|
+
|
256
|
+
pf.unwatch_removed_targets(existing_targets)
|
257
|
+
|
258
|
+
assert_equal(
|
259
|
+
{
|
260
|
+
map_keys: [TEST_CONTENT_INODES[1].ino],
|
261
|
+
unwatched_pe_pos: Fluent::Plugin::TailInput::PositionFile::UNWATCHED_POSITION,
|
262
|
+
},
|
263
|
+
{
|
264
|
+
map_keys: pf.instance_variable_get(:@map).keys,
|
265
|
+
unwatched_pe_pos: pe_to_unwatch.read_pos,
|
266
|
+
}
|
267
|
+
)
|
268
|
+
|
269
|
+
unwatched_pe_retaken = pf[TEST_CONTENT_INODES[0]]
|
270
|
+
assert_not_equal pe_to_unwatch, unwatched_pe_retaken
|
271
|
+
end
|
242
272
|
end
|
243
273
|
|
244
274
|
sub_test_case 'FilePositionEntry' do
|
data/test/plugin/test_base.rb
CHANGED
@@ -108,7 +108,7 @@ class BaseTest < Test::Unit::TestCase
|
|
108
108
|
@p.extend m
|
109
109
|
assert_equal [], logger.logs
|
110
110
|
|
111
|
-
ret = @p.string_safe_encoding("abc\xff.\x01f"){|s| s.split(
|
111
|
+
ret = @p.string_safe_encoding("abc\xff.\x01f"){|s| s.split(".") }
|
112
112
|
assert_equal ['abc?', "\u0001f"], ret
|
113
113
|
assert_equal 1, logger.logs.size
|
114
114
|
assert{ logger.logs.first.include?("invalid byte sequence is replaced in ") }
|
@@ -57,6 +57,17 @@ class BufferChunkTest < Test::Unit::TestCase
|
|
57
57
|
assert chunk.respond_to?(:msgpack_each)
|
58
58
|
end
|
59
59
|
|
60
|
+
test 'unpacker arg is not implemented for ChunkMessagePackEventStreamer' do
|
61
|
+
meta = Object.new
|
62
|
+
chunk = Fluent::Plugin::Buffer::Chunk.new(meta)
|
63
|
+
chunk.extend Fluent::ChunkMessagePackEventStreamer
|
64
|
+
|
65
|
+
unpacker = Fluent::MessagePackFactory.thread_local_msgpack_unpacker
|
66
|
+
|
67
|
+
assert_raise(NotImplementedError){ chunk.each(unpacker: unpacker) }
|
68
|
+
assert_raise(NotImplementedError){ chunk.msgpack_each(unpacker: unpacker) }
|
69
|
+
end
|
70
|
+
|
60
71
|
test 'some methods raise ArgumentError with an option of `compressed: :gzip` and without extending Compressble`' do
|
61
72
|
meta = Object.new
|
62
73
|
chunk = Fluent::Plugin::Buffer::Chunk.new(meta)
|
@@ -367,7 +367,7 @@ class ForwardInputTest < Test::Unit::TestCase
|
|
367
367
|
end
|
368
368
|
|
369
369
|
logs = d.instance.log.out.logs
|
370
|
-
assert{ logs.
|
370
|
+
assert{ logs.count{|line| line =~ /skip invalid event/ } == 2 }
|
371
371
|
|
372
372
|
d.instance_shutdown
|
373
373
|
end
|
@@ -593,10 +593,10 @@ class ForwardInputTest < Test::Unit::TestCase
|
|
593
593
|
|
594
594
|
# check log
|
595
595
|
logs = d.instance.log.logs
|
596
|
-
assert_equal 1, logs.
|
596
|
+
assert_equal 1, logs.count{|line|
|
597
597
|
line =~ / \[warn\]: Input chunk size is larger than 'chunk_size_warn_limit':/ &&
|
598
598
|
line =~ / tag="test.tag" host="#{LOCALHOST_HOSTNAME}" limit=16777216 size=16777501/
|
599
|
-
}
|
599
|
+
}, "large chunk warning is not logged"
|
600
600
|
|
601
601
|
d.instance_shutdown
|
602
602
|
end
|
@@ -619,10 +619,10 @@ class ForwardInputTest < Test::Unit::TestCase
|
|
619
619
|
|
620
620
|
# check log
|
621
621
|
logs = d.instance.log.logs
|
622
|
-
assert_equal 1, logs.
|
622
|
+
assert_equal 1, logs.count{ |line|
|
623
623
|
line =~ / \[warn\]: Input chunk size is larger than 'chunk_size_warn_limit':/ &&
|
624
624
|
line =~ / tag="test.tag" host="#{LOCALHOST_HOSTNAME}" limit=16777216 size=16777501/
|
625
|
-
}
|
625
|
+
}, "large chunk warning is not logged"
|
626
626
|
|
627
627
|
d.instance_shutdown
|
628
628
|
end
|
@@ -653,10 +653,10 @@ class ForwardInputTest < Test::Unit::TestCase
|
|
653
653
|
|
654
654
|
# check log
|
655
655
|
logs = d.instance.log.logs
|
656
|
-
assert_equal 1, logs.
|
656
|
+
assert_equal 1, logs.count{|line|
|
657
657
|
line =~ / \[warn\]: Input chunk size is larger than 'chunk_size_limit', dropped:/ &&
|
658
658
|
line =~ / tag="test.tag" host="#{LOCALHOST_HOSTNAME}" limit=33554432 size=33554989/
|
659
|
-
}
|
659
|
+
}, "large chunk warning is not logged"
|
660
660
|
|
661
661
|
d.instance_shutdown
|
662
662
|
end
|
@@ -676,9 +676,9 @@ class ForwardInputTest < Test::Unit::TestCase
|
|
676
676
|
|
677
677
|
# check log
|
678
678
|
logs = d.instance.log.logs
|
679
|
-
assert_equal 1, logs.
|
679
|
+
assert_equal 1, logs.count{|line|
|
680
680
|
line =~ / \[warn\]: incoming chunk is broken: host="#{LOCALHOST_HOSTNAME}" msg=#{data.inspect}/
|
681
|
-
}
|
681
|
+
}, "should not accept broken chunk"
|
682
682
|
|
683
683
|
d.instance_shutdown
|
684
684
|
end
|