fluentd 1.14.5-x64-mingw-ucrt → 1.15.1-x64-mingw-ucrt
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of fluentd might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/.github/ISSUE_TEMPLATE/config.yml +2 -2
- data/.github/workflows/linux-test.yaml +1 -1
- data/.github/workflows/macos-test.yaml +5 -1
- data/.github/workflows/windows-test.yaml +9 -6
- data/CHANGELOG.md +105 -21
- data/CONTRIBUTING.md +1 -1
- data/MAINTAINERS.md +2 -2
- data/README.md +1 -1
- data/fluentd.gemspec +2 -1
- data/lib/fluent/command/ctl.rb +4 -1
- data/lib/fluent/command/fluentd.rb +14 -0
- data/lib/fluent/config/literal_parser.rb +2 -2
- data/lib/fluent/config/yaml_parser/fluent_value.rb +47 -0
- data/lib/fluent/config/yaml_parser/loader.rb +91 -0
- data/lib/fluent/config/yaml_parser/parser.rb +166 -0
- data/lib/fluent/config/yaml_parser/section_builder.rb +107 -0
- data/lib/fluent/config/yaml_parser.rb +56 -0
- data/lib/fluent/config.rb +14 -1
- data/lib/fluent/error.rb +3 -0
- data/lib/fluent/event_router.rb +19 -1
- data/lib/fluent/plugin/bare_output.rb +1 -1
- data/lib/fluent/plugin/base.rb +19 -0
- data/lib/fluent/plugin/file_wrapper.rb +52 -107
- data/lib/fluent/plugin/in_forward.rb +1 -1
- data/lib/fluent/plugin/in_tail/group_watch.rb +204 -0
- data/lib/fluent/plugin/in_tail/position_file.rb +1 -15
- data/lib/fluent/plugin/in_tail.rb +68 -48
- data/lib/fluent/plugin/out_file.rb +11 -1
- data/lib/fluent/plugin/out_forward/socket_cache.rb +2 -0
- data/lib/fluent/plugin/output.rb +43 -37
- data/lib/fluent/plugin/parser.rb +3 -4
- data/lib/fluent/plugin/parser_syslog.rb +1 -1
- data/lib/fluent/plugin_helper/retry_state.rb +14 -4
- data/lib/fluent/plugin_helper/server.rb +23 -4
- data/lib/fluent/plugin_helper/service_discovery.rb +2 -2
- data/lib/fluent/rpc.rb +4 -3
- data/lib/fluent/supervisor.rb +119 -28
- data/lib/fluent/system_config.rb +2 -1
- data/lib/fluent/version.rb +1 -1
- data/lib/fluent/winsvc.rb +2 -0
- data/test/command/test_ctl.rb +0 -1
- data/test/command/test_fluentd.rb +33 -0
- data/test/config/test_system_config.rb +3 -1
- data/test/config/test_types.rb +1 -1
- data/test/plugin/in_tail/test_io_handler.rb +14 -4
- data/test/plugin/in_tail/test_position_file.rb +0 -63
- data/test/plugin/out_forward/test_socket_cache.rb +26 -1
- data/test/plugin/test_base.rb +34 -0
- data/test/plugin/test_file_wrapper.rb +0 -68
- data/test/plugin/test_in_forward.rb +0 -2
- data/test/plugin/test_in_object_space.rb +9 -3
- data/test/plugin/test_in_syslog.rb +1 -1
- data/test/plugin/test_in_tail.rb +629 -353
- data/test/plugin/test_out_forward.rb +30 -20
- data/test/plugin/test_output_as_buffered_retries.rb +7 -7
- data/test/plugin/test_output_as_buffered_secondary.rb +1 -1
- data/test/plugin/test_parser_syslog.rb +1 -1
- data/test/plugin_helper/test_cert_option.rb +1 -1
- data/test/plugin_helper/test_child_process.rb +16 -4
- data/test/plugin_helper/test_retry_state.rb +602 -38
- data/test/plugin_helper/test_server.rb +18 -0
- data/test/test_config.rb +135 -4
- data/test/test_event_router.rb +17 -0
- data/test/test_supervisor.rb +196 -6
- metadata +25 -5
@@ -0,0 +1,204 @@
|
|
1
|
+
#
|
2
|
+
# Fluentd
|
3
|
+
#
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
# you may not use this file except in compliance with the License.
|
6
|
+
# You may obtain a copy of the License at
|
7
|
+
#
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9
|
+
#
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
# See the License for the specific language governing permissions and
|
14
|
+
# limitations under the License.
|
15
|
+
#
|
16
|
+
|
17
|
+
require 'fluent/plugin/input'
|
18
|
+
|
19
|
+
module Fluent::Plugin
|
20
|
+
class TailInput < Fluent::Plugin::Input
|
21
|
+
module GroupWatchParams
|
22
|
+
include Fluent::Configurable
|
23
|
+
|
24
|
+
DEFAULT_KEY = /.*/
|
25
|
+
DEFAULT_LIMIT = -1
|
26
|
+
REGEXP_JOIN = "_"
|
27
|
+
|
28
|
+
config_section :group, param_name: :group, required: false, multi: false do
|
29
|
+
desc 'Regex for extracting group\'s metadata'
|
30
|
+
config_param :pattern,
|
31
|
+
:regexp,
|
32
|
+
default: /^\/var\/log\/containers\/(?<podname>[a-z0-9]([-a-z0-9]*[a-z0-9])?(\/[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)_(?<namespace>[^_]+)_(?<container>.+)-(?<docker_id>[a-z0-9]{64})\.log$/
|
33
|
+
|
34
|
+
desc 'Period of time in which the group_line_limit is applied'
|
35
|
+
config_param :rate_period, :time, default: 5
|
36
|
+
|
37
|
+
config_section :rule, param_name: :rule, required: true, multi: true do
|
38
|
+
desc 'Key-value pairs for grouping'
|
39
|
+
config_param :match, :hash, value_type: :regexp, default: { namespace: [DEFAULT_KEY], podname: [DEFAULT_KEY] }
|
40
|
+
desc 'Maximum number of log lines allowed per group over a period of rate_period'
|
41
|
+
config_param :limit, :integer, default: DEFAULT_LIMIT
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
46
|
+
module GroupWatch
|
47
|
+
def self.included(mod)
|
48
|
+
mod.include GroupWatchParams
|
49
|
+
end
|
50
|
+
|
51
|
+
attr_reader :group_watchers, :default_group_key
|
52
|
+
|
53
|
+
def initialize
|
54
|
+
super
|
55
|
+
@group_watchers = {}
|
56
|
+
@group_keys = nil
|
57
|
+
@default_group_key = nil
|
58
|
+
end
|
59
|
+
|
60
|
+
def configure(conf)
|
61
|
+
super
|
62
|
+
|
63
|
+
unless @group.nil?
|
64
|
+
## Ensuring correct time period syntax
|
65
|
+
@group.rule.each { |rule|
|
66
|
+
raise "Metadata Group Limit >= DEFAULT_LIMIT" unless rule.limit >= GroupWatchParams::DEFAULT_LIMIT
|
67
|
+
}
|
68
|
+
|
69
|
+
@group_keys = Regexp.compile(@group.pattern).named_captures.keys
|
70
|
+
@default_group_key = ([GroupWatchParams::DEFAULT_KEY] * @group_keys.length).join(GroupWatchParams::REGEXP_JOIN)
|
71
|
+
|
72
|
+
## Ensures that "specific" rules (with larger number of `rule.match` keys)
|
73
|
+
## have a higher priority against "generic" rules (with less number of `rule.match` keys).
|
74
|
+
## This will be helpful when a file satisfies more than one rule.
|
75
|
+
@group.rule.sort_by! { |rule| -rule.match.length() }
|
76
|
+
construct_groupwatchers
|
77
|
+
@group_watchers[@default_group_key] ||= GroupWatcher.new(@group.rate_period, GroupWatchParams::DEFAULT_LIMIT)
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
81
|
+
def add_path_to_group_watcher(path)
|
82
|
+
return nil if @group.nil?
|
83
|
+
group_watcher = find_group_from_metadata(path)
|
84
|
+
group_watcher.add(path) unless group_watcher.include?(path)
|
85
|
+
group_watcher
|
86
|
+
end
|
87
|
+
|
88
|
+
def remove_path_from_group_watcher(path)
|
89
|
+
return if @group.nil?
|
90
|
+
group_watcher = find_group_from_metadata(path)
|
91
|
+
group_watcher.delete(path)
|
92
|
+
end
|
93
|
+
|
94
|
+
def construct_group_key(named_captures)
|
95
|
+
match_rule = []
|
96
|
+
@group_keys.each { |key|
|
97
|
+
match_rule.append(named_captures.fetch(key, GroupWatchParams::DEFAULT_KEY))
|
98
|
+
}
|
99
|
+
match_rule = match_rule.join(GroupWatchParams::REGEXP_JOIN)
|
100
|
+
|
101
|
+
match_rule
|
102
|
+
end
|
103
|
+
|
104
|
+
def construct_groupwatchers
|
105
|
+
@group.rule.each { |rule|
|
106
|
+
match_rule = construct_group_key(rule.match)
|
107
|
+
@group_watchers[match_rule] ||= GroupWatcher.new(@group.rate_period, rule.limit)
|
108
|
+
}
|
109
|
+
end
|
110
|
+
|
111
|
+
def find_group(metadata)
|
112
|
+
metadata_key = construct_group_key(metadata)
|
113
|
+
gw_key = @group_watchers.keys.find { |regexp| metadata_key.match?(regexp) && regexp != @default_group_key }
|
114
|
+
gw_key ||= @default_group_key
|
115
|
+
|
116
|
+
@group_watchers[gw_key]
|
117
|
+
end
|
118
|
+
|
119
|
+
def find_group_from_metadata(path)
|
120
|
+
begin
|
121
|
+
metadata = @group.pattern.match(path).named_captures
|
122
|
+
group_watcher = find_group(metadata)
|
123
|
+
rescue
|
124
|
+
log.warn "Cannot find group from metadata, Adding file in the default group"
|
125
|
+
group_watcher = @group_watchers[@default_group_key]
|
126
|
+
end
|
127
|
+
|
128
|
+
group_watcher
|
129
|
+
end
|
130
|
+
end
|
131
|
+
|
132
|
+
class GroupWatcher
|
133
|
+
attr_accessor :current_paths, :limit, :number_lines_read, :start_reading_time, :rate_period
|
134
|
+
|
135
|
+
FileCounter = Struct.new(
|
136
|
+
:number_lines_read,
|
137
|
+
:start_reading_time,
|
138
|
+
)
|
139
|
+
|
140
|
+
def initialize(rate_period = 60, limit = -1)
|
141
|
+
@current_paths = {}
|
142
|
+
@rate_period = rate_period
|
143
|
+
@limit = limit
|
144
|
+
end
|
145
|
+
|
146
|
+
def add(path)
|
147
|
+
@current_paths[path] = FileCounter.new(0, nil)
|
148
|
+
end
|
149
|
+
|
150
|
+
def include?(path)
|
151
|
+
@current_paths.key?(path)
|
152
|
+
end
|
153
|
+
|
154
|
+
def size
|
155
|
+
@current_paths.size
|
156
|
+
end
|
157
|
+
|
158
|
+
def delete(path)
|
159
|
+
@current_paths.delete(path)
|
160
|
+
end
|
161
|
+
|
162
|
+
def update_reading_time(path)
|
163
|
+
@current_paths[path].start_reading_time ||= Fluent::Clock.now
|
164
|
+
end
|
165
|
+
|
166
|
+
def update_lines_read(path, value)
|
167
|
+
@current_paths[path].number_lines_read += value
|
168
|
+
end
|
169
|
+
|
170
|
+
def reset_counter(path)
|
171
|
+
@current_paths[path].start_reading_time = nil
|
172
|
+
@current_paths[path].number_lines_read = 0
|
173
|
+
end
|
174
|
+
|
175
|
+
def time_spent_reading(path)
|
176
|
+
Fluent::Clock.now - @current_paths[path].start_reading_time
|
177
|
+
end
|
178
|
+
|
179
|
+
def limit_time_period_reached?(path)
|
180
|
+
time_spent_reading(path) < @rate_period
|
181
|
+
end
|
182
|
+
|
183
|
+
def limit_lines_reached?(path)
|
184
|
+
return true unless include?(path)
|
185
|
+
return true if @limit == 0
|
186
|
+
|
187
|
+
return false if @limit < 0
|
188
|
+
return false if @current_paths[path].number_lines_read < @limit / size
|
189
|
+
|
190
|
+
# update_reading_time(path)
|
191
|
+
if limit_time_period_reached?(path) # Exceeds limit
|
192
|
+
true
|
193
|
+
else # Does not exceed limit
|
194
|
+
reset_counter(path)
|
195
|
+
false
|
196
|
+
end
|
197
|
+
end
|
198
|
+
|
199
|
+
def to_s
|
200
|
+
super + " current_paths: #{@current_paths} rate_period: #{@rate_period} limit: #{@limit}"
|
201
|
+
end
|
202
|
+
end
|
203
|
+
end
|
204
|
+
end
|
@@ -250,20 +250,6 @@ module Fluent::Plugin
|
|
250
250
|
end
|
251
251
|
end
|
252
252
|
|
253
|
-
TargetInfo = Struct.new(:path, :ino)
|
254
|
-
def ==(other)
|
255
|
-
return false unless other.is_a?(TargetInfo)
|
256
|
-
self.path == other.path
|
257
|
-
end
|
258
|
-
|
259
|
-
def hash
|
260
|
-
self.path.hash
|
261
|
-
end
|
262
|
-
|
263
|
-
def eql?(other)
|
264
|
-
return false unless other.is_a?(TargetInfo)
|
265
|
-
self.path == other.path
|
266
|
-
end
|
267
|
-
end
|
253
|
+
TargetInfo = Struct.new(:path, :ino)
|
268
254
|
end
|
269
255
|
end
|
@@ -24,6 +24,7 @@ require 'fluent/plugin/parser_multiline'
|
|
24
24
|
require 'fluent/variable_store'
|
25
25
|
require 'fluent/capability'
|
26
26
|
require 'fluent/plugin/in_tail/position_file'
|
27
|
+
require 'fluent/plugin/in_tail/group_watch'
|
27
28
|
|
28
29
|
if Fluent.windows?
|
29
30
|
require_relative 'file_wrapper'
|
@@ -33,6 +34,8 @@ end
|
|
33
34
|
|
34
35
|
module Fluent::Plugin
|
35
36
|
class TailInput < Fluent::Plugin::Input
|
37
|
+
include GroupWatch
|
38
|
+
|
36
39
|
Fluent::Plugin.register_input('tail', self)
|
37
40
|
|
38
41
|
helpers :timer, :event_loop, :parser, :compat_parameters
|
@@ -354,11 +357,11 @@ module Fluent::Plugin
|
|
354
357
|
|
355
358
|
def existence_path
|
356
359
|
hash = {}
|
357
|
-
@tails.
|
360
|
+
@tails.each {|path, tw|
|
358
361
|
if @follow_inodes
|
359
|
-
hash[
|
362
|
+
hash[tw.ino] = TargetInfo.new(tw.path, tw.ino)
|
360
363
|
else
|
361
|
-
hash[
|
364
|
+
hash[tw.path] = TargetInfo.new(tw.path, tw.ino)
|
362
365
|
end
|
363
366
|
}
|
364
367
|
hash
|
@@ -406,6 +409,8 @@ module Fluent::Plugin
|
|
406
409
|
event_loop_attach(watcher)
|
407
410
|
end
|
408
411
|
|
412
|
+
tw.group_watcher = add_path_to_group_watcher(target_info.path)
|
413
|
+
|
409
414
|
tw
|
410
415
|
rescue => e
|
411
416
|
if tw
|
@@ -420,36 +425,31 @@ module Fluent::Plugin
|
|
420
425
|
end
|
421
426
|
|
422
427
|
def construct_watcher(target_info)
|
428
|
+
path = target_info.path
|
429
|
+
|
430
|
+
# The file might be rotated or removed after collecting paths, so check inode again here.
|
431
|
+
begin
|
432
|
+
target_info.ino = Fluent::FileWrapper.stat(path).ino
|
433
|
+
rescue Errno::ENOENT, Errno::EACCES
|
434
|
+
$log.warn "stat() for #{path} failed. Continuing without tailing it."
|
435
|
+
return
|
436
|
+
end
|
437
|
+
|
423
438
|
pe = nil
|
424
439
|
if @pf
|
425
440
|
pe = @pf[target_info]
|
426
|
-
if @read_from_head && pe.read_inode.zero?
|
427
|
-
begin
|
428
|
-
pe.update(Fluent::FileWrapper.stat(target_info.path).ino, 0)
|
429
|
-
rescue Errno::ENOENT, Errno::EACCES
|
430
|
-
$log.warn "stat() for #{target_info.path} failed. Continuing without tailing it."
|
431
|
-
end
|
432
|
-
end
|
441
|
+
pe.update(target_info.ino, 0) if @read_from_head && pe.read_inode.zero?
|
433
442
|
end
|
434
443
|
|
435
444
|
begin
|
436
445
|
tw = setup_watcher(target_info, pe)
|
437
446
|
rescue WatcherSetupError => e
|
438
|
-
log.warn "Skip #{
|
447
|
+
log.warn "Skip #{path} because unexpected setup error happens: #{e}"
|
439
448
|
return
|
440
449
|
end
|
441
450
|
|
442
|
-
|
443
|
-
|
444
|
-
@tails.delete(target_info)
|
445
|
-
@tails[target_info] = tw
|
446
|
-
tw.on_notify
|
447
|
-
rescue Errno::ENOENT, Errno::EACCES => e
|
448
|
-
$log.warn "stat() for #{target_info.path} failed with #{e.class.name}. Drop tail watcher for now."
|
449
|
-
# explicitly detach and unwatch watcher `tw`.
|
450
|
-
tw.unwatched = true
|
451
|
-
detach_watcher(tw, target_info.ino, false)
|
452
|
-
end
|
451
|
+
@tails[path] = tw
|
452
|
+
tw.on_notify
|
453
453
|
end
|
454
454
|
|
455
455
|
def start_watchers(targets_info)
|
@@ -461,10 +461,12 @@ module Fluent::Plugin
|
|
461
461
|
|
462
462
|
def stop_watchers(targets_info, immediate: false, unwatched: false, remove_watcher: true)
|
463
463
|
targets_info.each_value { |target_info|
|
464
|
+
remove_path_from_group_watcher(target_info.path)
|
465
|
+
|
464
466
|
if remove_watcher
|
465
|
-
tw = @tails.delete(target_info)
|
467
|
+
tw = @tails.delete(target_info.path)
|
466
468
|
else
|
467
|
-
tw = @tails[target_info]
|
469
|
+
tw = @tails[target_info.path]
|
468
470
|
end
|
469
471
|
if tw
|
470
472
|
tw.unwatched = unwatched
|
@@ -478,8 +480,8 @@ module Fluent::Plugin
|
|
478
480
|
end
|
479
481
|
|
480
482
|
def close_watcher_handles
|
481
|
-
@tails.keys.each do |
|
482
|
-
tw = @tails.delete(
|
483
|
+
@tails.keys.each do |path|
|
484
|
+
tw = @tails.delete(path)
|
483
485
|
if tw
|
484
486
|
tw.close
|
485
487
|
end
|
@@ -488,20 +490,21 @@ module Fluent::Plugin
|
|
488
490
|
|
489
491
|
# refresh_watchers calls @tails.keys so we don't use stop_watcher -> start_watcher sequence for safety.
|
490
492
|
def update_watcher(target_info, pe)
|
491
|
-
|
493
|
+
path = target_info.path
|
494
|
+
|
495
|
+
log.info("detected rotation of #{path}; waiting #{@rotate_wait} seconds")
|
492
496
|
|
493
497
|
if @pf
|
494
498
|
pe_inode = pe.read_inode
|
495
|
-
target_info_from_position_entry = TargetInfo.new(
|
499
|
+
target_info_from_position_entry = TargetInfo.new(path, pe_inode)
|
496
500
|
unless pe_inode == @pf[target_info_from_position_entry].read_inode
|
497
|
-
log.
|
501
|
+
log.warn "Skip update_watcher because watcher has been already updated by other inotify event",
|
502
|
+
path: path, inode: pe.read_inode, inode_in_pos_file: @pf[target_info_from_position_entry].read_inode
|
498
503
|
return
|
499
504
|
end
|
500
505
|
end
|
501
506
|
|
502
|
-
|
503
|
-
rotated_tw = @tails[rotated_target_info]
|
504
|
-
new_target_info = target_info.dup
|
507
|
+
rotated_tw = @tails[path]
|
505
508
|
|
506
509
|
if @follow_inodes
|
507
510
|
new_position_entry = @pf[target_info]
|
@@ -509,17 +512,13 @@ module Fluent::Plugin
|
|
509
512
|
if new_position_entry.read_inode == 0
|
510
513
|
# When follow_inodes is true, it's not cleaned up by refresh_watcher.
|
511
514
|
# So it should be unwatched here explicitly.
|
512
|
-
rotated_tw.unwatched = true
|
513
|
-
|
514
|
-
@tails.
|
515
|
-
@tails[new_target_info] = setup_watcher(new_target_info, new_position_entry)
|
516
|
-
@tails[new_target_info].on_notify
|
515
|
+
rotated_tw.unwatched = true if rotated_tw
|
516
|
+
@tails[path] = setup_watcher(target_info, new_position_entry)
|
517
|
+
@tails[path].on_notify
|
517
518
|
end
|
518
519
|
else
|
519
|
-
|
520
|
-
@tails.
|
521
|
-
@tails[new_target_info] = setup_watcher(new_target_info, pe)
|
522
|
-
@tails[new_target_info].on_notify
|
520
|
+
@tails[path] = setup_watcher(target_info, pe)
|
521
|
+
@tails[path].on_notify
|
523
522
|
end
|
524
523
|
detach_watcher_after_rotate_wait(rotated_tw, pe.read_inode) if rotated_tw
|
525
524
|
end
|
@@ -542,18 +541,19 @@ module Fluent::Plugin
|
|
542
541
|
end
|
543
542
|
end
|
544
543
|
|
544
|
+
def throttling_is_enabled?(tw)
|
545
|
+
return true if @read_bytes_limit_per_second > 0
|
546
|
+
return true if tw.group_watcher && tw.group_watcher.limit >= 0
|
547
|
+
false
|
548
|
+
end
|
549
|
+
|
545
550
|
def detach_watcher_after_rotate_wait(tw, ino)
|
546
551
|
# Call event_loop_attach/event_loop_detach is high-cost for short-live object.
|
547
552
|
# If this has a problem with large number of files, use @_event_loop directly instead of timer_execute.
|
548
553
|
if @open_on_every_update
|
549
554
|
# Detach now because it's already closed, waiting it doesn't make sense.
|
550
555
|
detach_watcher(tw, ino)
|
551
|
-
elsif
|
552
|
-
# throttling isn't enabled, just wait @rotate_wait
|
553
|
-
timer_execute(:in_tail_close_watcher, @rotate_wait, repeat: false) do
|
554
|
-
detach_watcher(tw, ino)
|
555
|
-
end
|
556
|
-
else
|
556
|
+
elsif throttling_is_enabled?(tw)
|
557
557
|
# When the throttling feature is enabled, it might not reach EOF yet.
|
558
558
|
# Should ensure to read all contents before closing it, with keeping throttling.
|
559
559
|
start_time_to_wait = Fluent::Clock.now
|
@@ -564,6 +564,11 @@ module Fluent::Plugin
|
|
564
564
|
detach_watcher(tw, ino)
|
565
565
|
end
|
566
566
|
end
|
567
|
+
else
|
568
|
+
# when the throttling feature isn't enabled, just wait @rotate_wait
|
569
|
+
timer_execute(:in_tail_close_watcher, @rotate_wait, repeat: false) do
|
570
|
+
detach_watcher(tw, ino)
|
571
|
+
end
|
567
572
|
end
|
568
573
|
end
|
569
574
|
|
@@ -775,6 +780,7 @@ module Fluent::Plugin
|
|
775
780
|
attr_reader :line_buffer_timer_flusher
|
776
781
|
attr_accessor :unwatched # This is used for removing position entry from PositionFile
|
777
782
|
attr_reader :watchers
|
783
|
+
attr_accessor :group_watcher
|
778
784
|
|
779
785
|
def tag
|
780
786
|
@parsed_tag ||= @path.tr('/', '.').gsub(/\.+/, '.').gsub(/^\./, '')
|
@@ -997,6 +1003,10 @@ module Fluent::Plugin
|
|
997
1003
|
@log.info "following tail of #{@path}"
|
998
1004
|
end
|
999
1005
|
|
1006
|
+
def group_watcher
|
1007
|
+
@watcher.group_watcher
|
1008
|
+
end
|
1009
|
+
|
1000
1010
|
def on_notify
|
1001
1011
|
@notify_mutex.synchronize { handle_notify }
|
1002
1012
|
end
|
@@ -1054,6 +1064,7 @@ module Fluent::Plugin
|
|
1054
1064
|
|
1055
1065
|
def handle_notify
|
1056
1066
|
return if limit_bytes_per_second_reached?
|
1067
|
+
return if group_watcher&.limit_lines_reached?(@path)
|
1057
1068
|
|
1058
1069
|
with_io do |io|
|
1059
1070
|
begin
|
@@ -1063,17 +1074,26 @@ module Fluent::Plugin
|
|
1063
1074
|
begin
|
1064
1075
|
while true
|
1065
1076
|
@start_reading_time ||= Fluent::Clock.now
|
1077
|
+
group_watcher&.update_reading_time(@path)
|
1078
|
+
|
1066
1079
|
data = io.readpartial(BYTES_TO_READ, @iobuf)
|
1067
1080
|
@eof = false
|
1068
1081
|
@number_bytes_read += data.bytesize
|
1069
1082
|
@fifo << data
|
1083
|
+
|
1084
|
+
n_lines_before_read = @lines.size
|
1070
1085
|
@fifo.read_lines(@lines)
|
1086
|
+
group_watcher&.update_lines_read(@path, @lines.size - n_lines_before_read)
|
1087
|
+
|
1088
|
+
group_watcher_limit = group_watcher&.limit_lines_reached?(@path)
|
1089
|
+
@log.debug "Reading Limit exceeded #{@path} #{group_watcher.number_lines_read}" if group_watcher_limit
|
1071
1090
|
|
1072
|
-
if limit_bytes_per_second_reached? || should_shutdown_now?
|
1091
|
+
if group_watcher_limit || limit_bytes_per_second_reached? || should_shutdown_now?
|
1073
1092
|
# Just get out from tailing loop.
|
1074
1093
|
read_more = false
|
1075
1094
|
break
|
1076
1095
|
end
|
1096
|
+
|
1077
1097
|
if @lines.size >= @read_lines_limit
|
1078
1098
|
# not to use too much memory in case the file is very large
|
1079
1099
|
read_more = true
|
@@ -188,6 +188,10 @@ module Fluent::Plugin
|
|
188
188
|
condition = Gem::Dependency.new('', [">= 2.7.0", "< 3.1.0"])
|
189
189
|
@need_ruby_on_macos_workaround = true if condition.match?('', RUBY_VERSION)
|
190
190
|
end
|
191
|
+
|
192
|
+
if @need_lock && @append && @fluentd_lock_dir.nil?
|
193
|
+
raise Fluent::InvalidLockDirectory, "must set FLUENTD_LOCK_DIR on multi-worker append mode"
|
194
|
+
end
|
191
195
|
end
|
192
196
|
|
193
197
|
def multi_workers_ready?
|
@@ -217,7 +221,13 @@ module Fluent::Plugin
|
|
217
221
|
end
|
218
222
|
|
219
223
|
if @append
|
220
|
-
|
224
|
+
if @need_lock
|
225
|
+
acquire_worker_lock(path) do
|
226
|
+
writer.call(path, chunk)
|
227
|
+
end
|
228
|
+
else
|
229
|
+
writer.call(path, chunk)
|
230
|
+
end
|
221
231
|
else
|
222
232
|
find_filepath_available(path, with_lock: @need_lock) do |actual_path|
|
223
233
|
writer.call(actual_path, chunk)
|
@@ -50,6 +50,7 @@ module Fluent::Plugin
|
|
50
50
|
def checkin(sock)
|
51
51
|
@mutex.synchronize do
|
52
52
|
if (s = @inflight_sockets.delete(sock))
|
53
|
+
s.timeout = timeout
|
53
54
|
@available_sockets[s.key] << s
|
54
55
|
else
|
55
56
|
@log.debug("there is no socket #{sock}")
|
@@ -122,6 +123,7 @@ module Fluent::Plugin
|
|
122
123
|
t = Time.now
|
123
124
|
if (s = @available_sockets[key].find { |sock| !expired_socket?(sock, time: t) })
|
124
125
|
@inflight_sockets[s.sock] = @available_sockets[key].delete(s)
|
126
|
+
s.timeout = timeout
|
125
127
|
s
|
126
128
|
else
|
127
129
|
nil
|
data/lib/fluent/plugin/output.rb
CHANGED
@@ -235,6 +235,7 @@ module Fluent
|
|
235
235
|
@dequeued_chunks_mutex = nil
|
236
236
|
@output_enqueue_thread = nil
|
237
237
|
@output_flush_threads = nil
|
238
|
+
@output_flush_thread_current_position = 0
|
238
239
|
|
239
240
|
@simple_chunking = nil
|
240
241
|
@chunk_keys = @chunk_key_accessors = @chunk_key_time = @chunk_key_tag = nil
|
@@ -273,7 +274,7 @@ module Fluent
|
|
273
274
|
super
|
274
275
|
|
275
276
|
@num_errors_metrics = metrics_create(namespace: "fluentd", subsystem: "output", name: "num_errors", help_text: "Number of count num errors")
|
276
|
-
@emit_count_metrics = metrics_create(namespace: "fluentd", subsystem: "output", name: "
|
277
|
+
@emit_count_metrics = metrics_create(namespace: "fluentd", subsystem: "output", name: "emit_count", help_text: "Number of count emits")
|
277
278
|
@emit_records_metrics = metrics_create(namespace: "fluentd", subsystem: "output", name: "emit_records", help_text: "Number of emit records")
|
278
279
|
@emit_size_metrics = metrics_create(namespace: "fluentd", subsystem: "output", name: "emit_size", help_text: "Total size of emit events")
|
279
280
|
@write_count_metrics = metrics_create(namespace: "fluentd", subsystem: "output", name: "write_count", help_text: "Number of writing events")
|
@@ -492,6 +493,7 @@ module Fluent
|
|
492
493
|
@dequeued_chunks = []
|
493
494
|
@dequeued_chunks_mutex = Mutex.new
|
494
495
|
|
496
|
+
@output_flush_thread_current_position = 0
|
495
497
|
@buffer_config.flush_thread_count.times do |i|
|
496
498
|
thread_title = "flush_thread_#{i}".to_sym
|
497
499
|
thread_state = FlushThreadState.new(nil, nil, Mutex.new, ConditionVariable.new)
|
@@ -503,7 +505,6 @@ module Fluent
|
|
503
505
|
@output_flush_threads << thread_state
|
504
506
|
end
|
505
507
|
end
|
506
|
-
@output_flush_thread_current_position = 0
|
507
508
|
|
508
509
|
if !@under_plugin_development && (@flush_mode == :interval || @chunk_key_time)
|
509
510
|
@output_enqueue_thread = thread_create(:enqueue_thread, &method(:enqueue_thread_run))
|
@@ -1275,52 +1276,57 @@ module Fluent
|
|
1275
1276
|
|
1276
1277
|
unless @retry
|
1277
1278
|
@retry = retry_state(@buffer_config.retry_randomize)
|
1279
|
+
|
1278
1280
|
if @retry.limit?
|
1279
|
-
|
1280
|
-
|
1281
|
-
|
1282
|
-
log.warn "failed to flush the buffer.", retry_times: @retry.steps, next_retry_time: @retry.next_time.round, chunk: chunk_id_hex, error: error
|
1283
|
-
log.warn_backtrace error.backtrace
|
1284
|
-
end
|
1285
|
-
return
|
1281
|
+
handle_limit_reached(error)
|
1282
|
+
elsif error
|
1283
|
+
log_retry_error(error, chunk_id_hex, using_secondary)
|
1286
1284
|
end
|
1285
|
+
|
1286
|
+
return
|
1287
1287
|
end
|
1288
1288
|
|
1289
1289
|
# @retry exists
|
1290
1290
|
|
1291
|
-
|
1292
|
-
|
1293
|
-
|
1294
|
-
|
1295
|
-
log.error msg, retry_times: @retry.steps, records: records, error: error
|
1296
|
-
log.error_backtrace error.backtrace
|
1297
|
-
end
|
1298
|
-
@buffer.clear_queue!
|
1299
|
-
log.debug "buffer queue cleared"
|
1300
|
-
@retry = nil
|
1291
|
+
# Ensure that the current time is greater than or equal to @retry.next_time to avoid the situation when
|
1292
|
+
# @retry.step is called almost as many times as the number of flush threads in a short time.
|
1293
|
+
if Time.now >= @retry.next_time
|
1294
|
+
@retry.step
|
1301
1295
|
else
|
1302
|
-
#
|
1303
|
-
|
1304
|
-
|
1305
|
-
|
1306
|
-
|
1307
|
-
|
1308
|
-
|
1309
|
-
if error
|
1310
|
-
if using_secondary
|
1311
|
-
msg = "failed to flush the buffer with secondary output."
|
1312
|
-
log.warn msg, retry_times: @retry.steps, next_retry_time: @retry.next_time.round, chunk: chunk_id_hex, error: error
|
1313
|
-
log.warn_backtrace error.backtrace
|
1314
|
-
else
|
1315
|
-
msg = "failed to flush the buffer."
|
1316
|
-
log.warn msg, retry_times: @retry.steps, next_retry_time: @retry.next_time.round, chunk: chunk_id_hex, error: error
|
1317
|
-
log.warn_backtrace error.backtrace
|
1318
|
-
end
|
1319
|
-
end
|
1296
|
+
@retry.recalc_next_time # to prevent all flush threads from retrying at the same time
|
1297
|
+
end
|
1298
|
+
|
1299
|
+
if @retry.limit?
|
1300
|
+
handle_limit_reached(error)
|
1301
|
+
elsif error
|
1302
|
+
log_retry_error(error, chunk_id_hex, using_secondary)
|
1320
1303
|
end
|
1321
1304
|
end
|
1322
1305
|
end
|
1323
1306
|
|
1307
|
+
def log_retry_error(error, chunk_id_hex, using_secondary)
|
1308
|
+
return unless error
|
1309
|
+
if using_secondary
|
1310
|
+
msg = "failed to flush the buffer with secondary output."
|
1311
|
+
else
|
1312
|
+
msg = "failed to flush the buffer."
|
1313
|
+
end
|
1314
|
+
log.warn(msg, retry_times: @retry.steps, next_retry_time: @retry.next_time.round, chunk: chunk_id_hex, error: error)
|
1315
|
+
log.warn_backtrace(error.backtrace)
|
1316
|
+
end
|
1317
|
+
|
1318
|
+
def handle_limit_reached(error)
|
1319
|
+
if error
|
1320
|
+
records = @buffer.queued_records
|
1321
|
+
msg = "Hit limit for retries. dropping all chunks in the buffer queue."
|
1322
|
+
log.error msg, retry_times: @retry.steps, records: records, error: error
|
1323
|
+
log.error_backtrace error.backtrace
|
1324
|
+
end
|
1325
|
+
@buffer.clear_queue!
|
1326
|
+
log.debug "buffer queue cleared"
|
1327
|
+
@retry = nil
|
1328
|
+
end
|
1329
|
+
|
1324
1330
|
def retry_state(randomize)
|
1325
1331
|
if @secondary
|
1326
1332
|
retry_state_create(
|
data/lib/fluent/plugin/parser.rb
CHANGED
@@ -89,7 +89,7 @@ module Fluent
|
|
89
89
|
# : format[, timezone]
|
90
90
|
|
91
91
|
config_param :time_key, :string, default: nil
|
92
|
-
config_param :null_value_pattern, :
|
92
|
+
config_param :null_value_pattern, :regexp, default: nil
|
93
93
|
config_param :null_empty_string, :bool, default: false
|
94
94
|
config_param :estimate_current_event, :bool, default: true
|
95
95
|
config_param :keep_time_key, :bool, default: false
|
@@ -115,9 +115,8 @@ module Fluent
|
|
115
115
|
super
|
116
116
|
|
117
117
|
@time_parser = time_parser_create
|
118
|
-
@null_value_regexp = @null_value_pattern && Regexp.new(@null_value_pattern)
|
119
118
|
@type_converters = build_type_converters(@types)
|
120
|
-
@execute_convert_values = @type_converters || @
|
119
|
+
@execute_convert_values = @type_converters || @null_value_pattern || @null_empty_string
|
121
120
|
@timeout_checker = if @timeout
|
122
121
|
class << self
|
123
122
|
alias_method :parse_orig, :parse
|
@@ -220,7 +219,7 @@ module Fluent
|
|
220
219
|
return time, record
|
221
220
|
end
|
222
221
|
|
223
|
-
def string_like_null(value, null_empty_string = @null_empty_string, null_value_regexp = @
|
222
|
+
def string_like_null(value, null_empty_string = @null_empty_string, null_value_regexp = @null_value_pattern)
|
224
223
|
null_empty_string && value.empty? || null_value_regexp && string_safe_encoding(value){|s| null_value_regexp.match(s) }
|
225
224
|
end
|
226
225
|
|