fluentd 0.14.10-x86-mingw32 → 0.14.11-x86-mingw32
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of fluentd might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/.travis.yml +14 -6
- data/ChangeLog +28 -2
- data/appveyor.yml +1 -0
- data/lib/fluent/engine.rb +4 -7
- data/lib/fluent/error.rb +30 -0
- data/lib/fluent/log.rb +0 -7
- data/lib/fluent/plugin/base.rb +11 -0
- data/lib/fluent/plugin/buf_file.rb +9 -7
- data/lib/fluent/plugin/formatter_csv.rb +4 -2
- data/lib/fluent/plugin/in_forward.rb +46 -17
- data/lib/fluent/plugin/in_http.rb +2 -0
- data/lib/fluent/plugin/in_monitor_agent.rb +27 -2
- data/lib/fluent/plugin/in_syslog.rb +52 -36
- data/lib/fluent/plugin/in_tail.rb +1 -0
- data/lib/fluent/plugin/out_forward.rb +39 -29
- data/lib/fluent/plugin/output.rb +17 -0
- data/lib/fluent/plugin/storage_local.rb +16 -13
- data/lib/fluent/plugin_helper/storage.rb +21 -9
- data/lib/fluent/plugin_id.rb +17 -0
- data/lib/fluent/supervisor.rb +73 -45
- data/lib/fluent/system_config.rb +24 -21
- data/lib/fluent/version.rb +1 -1
- data/test/command/test_fluentd.rb +348 -0
- data/test/config/test_system_config.rb +39 -31
- data/test/plugin/test_base.rb +20 -0
- data/test/plugin/test_buf_file.rb +40 -0
- data/test/plugin/test_formatter_csv.rb +8 -0
- data/test/plugin/test_in_forward.rb +56 -21
- data/test/plugin/test_in_monitor_agent.rb +80 -8
- data/test/plugin/test_in_syslog.rb +75 -45
- data/test/plugin/test_out_file.rb +0 -1
- data/test/plugin/test_out_forward.rb +19 -11
- data/test/plugin/test_output.rb +44 -0
- data/test/plugin/test_storage_local.rb +290 -2
- data/test/plugin_helper/test_child_process.rb +40 -39
- data/test/plugin_helper/test_storage.rb +4 -3
- data/test/test_log.rb +1 -1
- data/test/test_output.rb +3 -0
- data/test/test_plugin_id.rb +101 -0
- data/test/test_supervisor.rb +3 -0
- metadata +7 -2
@@ -35,6 +35,7 @@ module Fluent::Plugin
|
|
35
35
|
config_param :tag, :string, default: nil
|
36
36
|
config_param :emit_interval, :time, default: 60
|
37
37
|
config_param :include_config, :bool, default: true
|
38
|
+
config_param :include_retry, :bool, default: true
|
38
39
|
|
39
40
|
class MonitorServlet < WEBrick::HTTPServlet::AbstractServlet
|
40
41
|
def initialize(server, agent)
|
@@ -78,7 +79,7 @@ module Fluent::Plugin
|
|
78
79
|
|
79
80
|
# if ?debug=1 is set, set :with_debug_info for get_monitor_info
|
80
81
|
# and :pretty_json for render_json_error
|
81
|
-
opts = {with_config: @agent.include_config}
|
82
|
+
opts = {with_config: @agent.include_config, with_retry: @agent.include_retry}
|
82
83
|
if s = qs['debug'] and s[0]
|
83
84
|
opts[:with_debug_info] = true
|
84
85
|
opts[:pretty_json] = true
|
@@ -88,6 +89,10 @@ module Fluent::Plugin
|
|
88
89
|
opts[:with_config] = Fluent::Config.bool_value(with_config)
|
89
90
|
end
|
90
91
|
|
92
|
+
if with_retry = get_search_parameter(qs, 'with_retry'.freeze)
|
93
|
+
opts[:with_retry] = Fluent::Config.bool_value(with_retry)
|
94
|
+
end
|
95
|
+
|
91
96
|
if tag = get_search_parameter(qs, 'tag'.freeze)
|
92
97
|
# ?tag= to search an output plugin by match pattern
|
93
98
|
if obj = @agent.plugin_info_by_tag(tag, opts)
|
@@ -231,7 +236,7 @@ module Fluent::Plugin
|
|
231
236
|
if @tag
|
232
237
|
log.debug "tag parameter is specified. Emit plugins info to '#{@tag}'"
|
233
238
|
|
234
|
-
opts = {with_config: false}
|
239
|
+
opts = {with_config: false, with_retry: false}
|
235
240
|
timer_execute(:in_monitor_agent_emit, @emit_interval, repeat: true) {
|
236
241
|
es = Fluent::MultiEventStream.new
|
237
242
|
now = Fluent::Engine.now
|
@@ -346,6 +351,8 @@ module Fluent::Plugin
|
|
346
351
|
end
|
347
352
|
}
|
348
353
|
|
354
|
+
obj['retry'] = get_retry_info(pe.retry) if opts[:with_retry] and pe.instance_variable_defined?(:@retry)
|
355
|
+
|
349
356
|
# include all instance variables if :with_debug_info is set
|
350
357
|
if opts[:with_debug_info]
|
351
358
|
iv = {}
|
@@ -362,6 +369,24 @@ module Fluent::Plugin
|
|
362
369
|
obj
|
363
370
|
end
|
364
371
|
|
372
|
+
RETRY_INFO = {
|
373
|
+
'start' => '@start',
|
374
|
+
'steps' => '@steps',
|
375
|
+
'next_time' => '@next_time',
|
376
|
+
}
|
377
|
+
|
378
|
+
def get_retry_info(pe_retry)
|
379
|
+
retry_variables = {}
|
380
|
+
|
381
|
+
if pe_retry
|
382
|
+
RETRY_INFO.each_pair { |key, param|
|
383
|
+
retry_variables[key] = pe_retry.instance_variable_get(param)
|
384
|
+
}
|
385
|
+
end
|
386
|
+
|
387
|
+
retry_variables
|
388
|
+
end
|
389
|
+
|
365
390
|
def plugin_category(pe)
|
366
391
|
case pe
|
367
392
|
when Fluent::Plugin::Input
|
@@ -14,18 +14,17 @@
|
|
14
14
|
# limitations under the License.
|
15
15
|
#
|
16
16
|
|
17
|
-
require 'cool.io'
|
18
|
-
require 'yajl'
|
19
|
-
|
20
17
|
require 'fluent/plugin/input'
|
21
18
|
require 'fluent/config/error'
|
22
19
|
require 'fluent/plugin/parser'
|
23
20
|
|
21
|
+
require 'yajl'
|
22
|
+
|
24
23
|
module Fluent::Plugin
|
25
24
|
class SyslogInput < Input
|
26
25
|
Fluent::Plugin.register_input('syslog', self)
|
27
26
|
|
28
|
-
helpers :parser, :compat_parameters, :
|
27
|
+
helpers :parser, :compat_parameters, :server
|
29
28
|
|
30
29
|
DEFAULT_PARSER = 'syslog'
|
31
30
|
SYSLOG_REGEXP = /^\<([0-9]+)\>(.*)/
|
@@ -68,11 +67,6 @@ module Fluent::Plugin
|
|
68
67
|
7 => 'debug'
|
69
68
|
}
|
70
69
|
|
71
|
-
def initialize
|
72
|
-
super
|
73
|
-
require 'fluent/plugin/socket_util'
|
74
|
-
end
|
75
|
-
|
76
70
|
desc 'The port to listen to.'
|
77
71
|
config_param :port, :integer, default: 5140
|
78
72
|
desc 'The bind address to listen to.'
|
@@ -81,14 +75,22 @@ module Fluent::Plugin
|
|
81
75
|
config_param :tag, :string
|
82
76
|
desc 'The transport protocol used to receive logs.(udp, tcp)'
|
83
77
|
config_param :protocol_type, :enum, list: [:tcp, :udp], default: :udp
|
78
|
+
|
84
79
|
desc 'If true, add source host to event record.'
|
85
|
-
config_param :include_source_host, :bool, default: false
|
80
|
+
config_param :include_source_host, :bool, default: false, deprecated: 'use "source_hostname_key" or "source_address_key" instead.'
|
86
81
|
desc 'Specify key of source host when include_source_host is true.'
|
87
82
|
config_param :source_host_key, :string, default: 'source_host'.freeze
|
83
|
+
|
84
|
+
desc 'The field name of hostname of sender.'
|
85
|
+
config_param :source_hostname_key, :string, default: nil
|
86
|
+
desc 'The field name of source address of sender.'
|
87
|
+
config_param :source_address_key, :string, default: nil
|
88
|
+
|
88
89
|
desc 'The field name of the priority.'
|
89
90
|
config_param :priority_key, :string, default: nil
|
90
91
|
desc 'The field name of the facility.'
|
91
92
|
config_param :facility_key, :string, default: nil
|
93
|
+
|
92
94
|
config_param :blocking_timeout, :time, default: 0.5
|
93
95
|
config_param :message_length_limit, :size, default: 2048
|
94
96
|
|
@@ -107,25 +109,57 @@ module Fluent::Plugin
|
|
107
109
|
@parser = parser_create
|
108
110
|
@parser_parse_priority = @parser.respond_to?(:with_priority) && @parser.with_priority
|
109
111
|
|
112
|
+
if @include_source_host
|
113
|
+
if @source_address_key
|
114
|
+
raise Fluent::ConfigError, "specify either source_address_key or include_source_host"
|
115
|
+
end
|
116
|
+
@source_address_key = @source_host_key
|
117
|
+
end
|
118
|
+
@resolve_name = !!@source_hostname_key
|
119
|
+
|
110
120
|
@_event_loop_run_timeout = @blocking_timeout
|
111
121
|
end
|
112
122
|
|
113
123
|
def start
|
114
124
|
super
|
115
125
|
|
116
|
-
@
|
117
|
-
|
126
|
+
log.info "listening syslog socket on #{@bind}:#{@port} with #{@protocol_type}"
|
127
|
+
case @protocol_type
|
128
|
+
when :udp then start_udp_server
|
129
|
+
when :tcp then start_tcp_server
|
130
|
+
else
|
131
|
+
raise "BUG: invalid protocol_type value:#{@protocol_type}"
|
132
|
+
end
|
118
133
|
end
|
119
134
|
|
120
|
-
def
|
121
|
-
@
|
135
|
+
def start_udp_server
|
136
|
+
server_create_udp(:in_syslog_udp_server, @port, bind: @bind, max_bytes: @message_length_limit, resolve_name: @resolve_name) do |data, sock|
|
137
|
+
message_handler(data.chomp, sock)
|
138
|
+
end
|
139
|
+
end
|
122
140
|
|
123
|
-
|
141
|
+
def start_tcp_server
|
142
|
+
# syslog family add "\n" to each message and this seems only way to split messages in tcp stream
|
143
|
+
delimiter = "\n"
|
144
|
+
delimiter_size = delimiter.size
|
145
|
+
server_create_connection(:in_syslog_tcp_server, @port, bind: @bind, resolve_name: @resolve_name) do |conn|
|
146
|
+
buffer = ""
|
147
|
+
conn.data do |data|
|
148
|
+
buffer << data
|
149
|
+
pos = 0
|
150
|
+
while idx = buffer.index(delimiter, pos)
|
151
|
+
msg = buffer[pos...idx]
|
152
|
+
pos = idx + delimiter_size
|
153
|
+
message_handler(msg, conn)
|
154
|
+
end
|
155
|
+
buffer.slice!(0, pos) if pos > 0
|
156
|
+
end
|
157
|
+
end
|
124
158
|
end
|
125
159
|
|
126
160
|
private
|
127
161
|
|
128
|
-
def message_handler(data,
|
162
|
+
def message_handler(data, sock)
|
129
163
|
pri = nil
|
130
164
|
text = data
|
131
165
|
unless @parser_parse_priority
|
@@ -150,7 +184,8 @@ module Fluent::Plugin
|
|
150
184
|
|
151
185
|
record[@priority_key] = priority if @priority_key
|
152
186
|
record[@facility_key] = facility if @facility_key
|
153
|
-
record[@
|
187
|
+
record[@source_address_key] = sock.remote_addr if @source_address_key
|
188
|
+
record[@source_hostname_key] = sock.remote_host if @source_hostname_key
|
154
189
|
|
155
190
|
tag = "#{@tag}.#{facility}.#{priority}"
|
156
191
|
emit(tag, time, record)
|
@@ -160,25 +195,6 @@ module Fluent::Plugin
|
|
160
195
|
log.error_backtrace
|
161
196
|
end
|
162
197
|
|
163
|
-
private
|
164
|
-
|
165
|
-
def listen(callback)
|
166
|
-
log.info "listening syslog socket on #{@bind}:#{@port} with #{@protocol_type}"
|
167
|
-
socket_manager_path = ENV['SERVERENGINE_SOCKETMANAGER_PATH']
|
168
|
-
if Fluent.windows?
|
169
|
-
socket_manager_path = socket_manager_path.to_i
|
170
|
-
end
|
171
|
-
client = ServerEngine::SocketManager::Client.new(socket_manager_path)
|
172
|
-
if @protocol_type == :udp
|
173
|
-
@usock = client.listen_udp(@bind, @port)
|
174
|
-
Fluent::SocketUtil::UdpHandler.new(@usock, log, @message_length_limit, callback)
|
175
|
-
else
|
176
|
-
# syslog family add "\n" to each message and this seems only way to split messages in tcp stream
|
177
|
-
lsock = client.listen_tcp(@bind, @port)
|
178
|
-
Coolio::TCPServer.new(lsock, nil, Fluent::SocketUtil::TcpHandler, log, "\n", callback)
|
179
|
-
end
|
180
|
-
end
|
181
|
-
|
182
198
|
def emit(tag, time, record)
|
183
199
|
router.emit(tag, time, record)
|
184
200
|
rescue => e
|
@@ -97,6 +97,7 @@ module Fluent::Plugin
|
|
97
97
|
raise Fluent::ConfigError, "tail: 'path' parameter is required on tail input"
|
98
98
|
end
|
99
99
|
|
100
|
+
# TODO: Use plugin_root_dir and storage plugin to store positions if available
|
100
101
|
unless @pos_file
|
101
102
|
$log.warn "'pos_file PATH' parameter is not set to a 'tail' source."
|
102
103
|
$log.warn "this parameter is highly recommended to save the position to resume tailing."
|
@@ -178,7 +178,7 @@ module Fluent::Plugin
|
|
178
178
|
# But it should be overwritten by ack_response_timeout to rollback chunks after timeout
|
179
179
|
if @ack_response_timeout && @delayed_commit_timeout != @ack_response_timeout
|
180
180
|
log.info "delayed_commit_timeout is overwritten by ack_response_timeout"
|
181
|
-
@delayed_commit_timeout = @ack_response_timeout
|
181
|
+
@delayed_commit_timeout = @ack_response_timeout + 2 # minimum ack_reader IO.select interval is 1s
|
182
182
|
end
|
183
183
|
|
184
184
|
@rand_seed = Random.new.seed
|
@@ -214,22 +214,23 @@ module Fluent::Plugin
|
|
214
214
|
select_a_healthy_node{|node| node.send_data(tag, chunk) }
|
215
215
|
end
|
216
216
|
|
217
|
-
ACKWaitingSockInfo = Struct.new(:sock, :chunk_id, :node, :time, :timeout) do
|
217
|
+
ACKWaitingSockInfo = Struct.new(:sock, :chunk_id, :chunk_id_base64, :node, :time, :timeout) do
|
218
218
|
def expired?(now)
|
219
219
|
time + timeout < now
|
220
220
|
end
|
221
221
|
end
|
222
222
|
|
223
223
|
def try_write(chunk)
|
224
|
+
log.trace "writing a chunk to destination", chunk_id: dump_unique_id_hex(chunk.unique_id)
|
224
225
|
if chunk.empty?
|
225
226
|
commit_write(chunk.unique_id)
|
226
227
|
return
|
227
228
|
end
|
228
229
|
tag = chunk.metadata.tag
|
229
230
|
sock, node = select_a_healthy_node{|n| n.send_data(tag, chunk) }
|
230
|
-
|
231
|
+
chunk_id_base64 = Base64.encode64(chunk.unique_id)
|
231
232
|
current_time = Process.clock_gettime(PROCESS_CLOCK_ID)
|
232
|
-
info = ACKWaitingSockInfo.new(sock,
|
233
|
+
info = ACKWaitingSockInfo.new(sock, chunk.unique_id, chunk_id_base64, node, current_time, @ack_response_timeout)
|
233
234
|
@sock_ack_waiting_mutex.synchronize do
|
234
235
|
@sock_ack_waiting << info
|
235
236
|
end
|
@@ -341,7 +342,7 @@ module Fluent::Plugin
|
|
341
342
|
end
|
342
343
|
end
|
343
344
|
|
344
|
-
# return chunk id
|
345
|
+
# return chunk id to be committed
|
345
346
|
def read_ack_from_sock(sock, unpacker)
|
346
347
|
begin
|
347
348
|
raw_data = sock.recv(@read_length)
|
@@ -359,11 +360,14 @@ module Fluent::Plugin
|
|
359
360
|
else
|
360
361
|
unpacker.feed(raw_data)
|
361
362
|
res = unpacker.read
|
362
|
-
|
363
|
+
log.trace "getting response from destination", host: info.node.host, port: info.node.port, chunk_id: dump_unique_id_hex(info.chunk_id), response: res
|
364
|
+
if res['ack'] != info.chunk_id_base64
|
363
365
|
# Some errors may have occured when ack and chunk id is different, so send the chunk again.
|
364
|
-
log.warn "ack in response and chunk id in sent data are different", chunk_id: info.chunk_id, ack: res['ack']
|
366
|
+
log.warn "ack in response and chunk id in sent data are different", chunk_id: dump_unique_id_hex(info.chunk_id), ack: res['ack']
|
365
367
|
rollback_write(info.chunk_id)
|
366
368
|
return nil
|
369
|
+
else
|
370
|
+
log.trace "got a correct ack response", chunk_id: dump_unique_id_hex(info.chunk_id)
|
367
371
|
end
|
368
372
|
return info.chunk_id
|
369
373
|
end
|
@@ -378,9 +382,9 @@ module Fluent::Plugin
|
|
378
382
|
|
379
383
|
def ack_reader
|
380
384
|
select_interval = if @delayed_commit_timeout > 3
|
381
|
-
|
385
|
+
1
|
382
386
|
else
|
383
|
-
@delayed_commit_timeout /
|
387
|
+
@delayed_commit_timeout / 3.0
|
384
388
|
end
|
385
389
|
|
386
390
|
unpacker = Fluent::Engine.msgpack_unpacker
|
@@ -388,30 +392,36 @@ module Fluent::Plugin
|
|
388
392
|
while thread_current_running?
|
389
393
|
now = Process.clock_gettime(PROCESS_CLOCK_ID)
|
390
394
|
sockets = []
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
|
397
|
-
|
398
|
-
|
399
|
-
|
400
|
-
|
401
|
-
|
402
|
-
|
403
|
-
|
404
|
-
|
395
|
+
begin
|
396
|
+
@sock_ack_waiting_mutex.synchronize do
|
397
|
+
new_list = []
|
398
|
+
@sock_ack_waiting.each do |info|
|
399
|
+
if info.expired?(now)
|
400
|
+
# There are 2 types of cases when no response has been received from socket:
|
401
|
+
# (1) the node does not support sending responses
|
402
|
+
# (2) the node does support sending response but responses have not arrived for some reasons.
|
403
|
+
log.warn "no response from node. regard it as unavailable.", host: info.node.host, port: info.node.port
|
404
|
+
info.node.disable!
|
405
|
+
info.sock.close rescue nil
|
406
|
+
rollback_write(info.chunk_id)
|
407
|
+
else
|
408
|
+
sockets << info.sock
|
409
|
+
new_list << info
|
410
|
+
end
|
405
411
|
end
|
412
|
+
@sock_ack_waiting = new_list
|
406
413
|
end
|
407
|
-
@sock_ack_waiting = new_list
|
408
|
-
end
|
409
414
|
|
410
|
-
|
411
|
-
|
415
|
+
readable_sockets, _, _ = IO.select(sockets, nil, nil, select_interval)
|
416
|
+
next unless readable_sockets
|
412
417
|
|
413
|
-
|
414
|
-
|
418
|
+
readable_sockets.each do |sock|
|
419
|
+
chunk_id = read_ack_from_sock(sock, unpacker)
|
420
|
+
commit_write(chunk_id)
|
421
|
+
end
|
422
|
+
rescue => e
|
423
|
+
log.error "unexpected error while receiving ack", error: e
|
424
|
+
log.error_backtrace
|
415
425
|
end
|
416
426
|
end
|
417
427
|
end
|
data/lib/fluent/plugin/output.rb
CHANGED
@@ -43,6 +43,8 @@ module Fluent
|
|
43
43
|
PROCESS_CLOCK_ID = Process::CLOCK_MONOTONIC_RAW rescue Process::CLOCK_MONOTONIC
|
44
44
|
|
45
45
|
config_param :time_as_integer, :bool, default: false
|
46
|
+
desc 'The threshold to show slow flush logs'
|
47
|
+
config_param :slow_flush_log_threshold, :float, default: 20.0
|
46
48
|
|
47
49
|
# `<buffer>` and `<secondary>` sections are available only when '#format' and '#write' are implemented
|
48
50
|
config_section :buffer, param_name: :buffer_config, init: true, required: false, multi: false, final: true do
|
@@ -985,6 +987,8 @@ module Fluent
|
|
985
987
|
end
|
986
988
|
|
987
989
|
begin
|
990
|
+
chunk_write_start = Process.clock_gettime(PROCESS_CLOCK_ID)
|
991
|
+
|
988
992
|
if output.delayed_commit
|
989
993
|
log.trace "executing delayed write and commit", chunk: dump_unique_id_hex(chunk.unique_id)
|
990
994
|
@counters_monitor.synchronize{ @write_count += 1 }
|
@@ -992,14 +996,19 @@ module Fluent
|
|
992
996
|
# delayed_commit_timeout for secondary is configured in <buffer> of primary (<secondary> don't get <buffer>)
|
993
997
|
@dequeued_chunks << DequeuedChunkInfo.new(chunk.unique_id, Time.now, self.delayed_commit_timeout)
|
994
998
|
end
|
999
|
+
|
995
1000
|
output.try_write(chunk)
|
1001
|
+
check_slow_flush(chunk_write_start)
|
996
1002
|
else # output plugin without delayed purge
|
997
1003
|
chunk_id = chunk.unique_id
|
998
1004
|
dump_chunk_id = dump_unique_id_hex(chunk_id)
|
999
1005
|
log.trace "adding write count", instance: self.object_id
|
1000
1006
|
@counters_monitor.synchronize{ @write_count += 1 }
|
1001
1007
|
log.trace "executing sync write", chunk: dump_chunk_id
|
1008
|
+
|
1002
1009
|
output.write(chunk)
|
1010
|
+
check_slow_flush(chunk_write_start)
|
1011
|
+
|
1003
1012
|
log.trace "write operation done, committing", chunk: dump_chunk_id
|
1004
1013
|
commit_write(chunk_id, delayed: false, secondary: using_secondary)
|
1005
1014
|
log.trace "done to commit a chunk", chunk: dump_chunk_id
|
@@ -1019,6 +1028,14 @@ module Fluent
|
|
1019
1028
|
end
|
1020
1029
|
end
|
1021
1030
|
|
1031
|
+
def check_slow_flush(start)
|
1032
|
+
elapsed_time = Process.clock_gettime(PROCESS_CLOCK_ID) - start
|
1033
|
+
if elapsed_time > @slow_flush_log_threshold
|
1034
|
+
log.warn "buffer flush took longer time than slow_flush_log_threshold:",
|
1035
|
+
elapsed_time: elapsed_time, slow_flush_log_threshold: @slow_flush_log_threshold, plugin_id: self.plugin_id
|
1036
|
+
end
|
1037
|
+
end
|
1038
|
+
|
1022
1039
|
def update_retry_state(chunk_id, using_secondary, error = nil)
|
1023
1040
|
@retry_mutex.synchronize do
|
1024
1041
|
@counters_monitor.synchronize{ @num_errors += 1 }
|
@@ -29,10 +29,16 @@ module Fluent
|
|
29
29
|
DEFAULT_FILE_MODE = 0644
|
30
30
|
|
31
31
|
config_param :path, :string, default: nil
|
32
|
-
config_param :mode,
|
33
|
-
|
32
|
+
config_param :mode, default: DEFAULT_FILE_MODE do |v|
|
33
|
+
v.to_i(8)
|
34
|
+
end
|
35
|
+
config_param :dir_mode, default: DEFAULT_DIR_MODE do |v|
|
36
|
+
v.to_i(8)
|
37
|
+
end
|
34
38
|
config_param :pretty_print, :bool, default: false
|
35
39
|
|
40
|
+
attr_reader :store # for test
|
41
|
+
|
36
42
|
def initialize
|
37
43
|
super
|
38
44
|
@store = {}
|
@@ -42,9 +48,13 @@ module Fluent
|
|
42
48
|
super
|
43
49
|
|
44
50
|
@on_memory = false
|
45
|
-
if
|
51
|
+
if @path
|
52
|
+
# use it
|
53
|
+
elsif root_dir = owner.plugin_root_dir
|
54
|
+
@path = File.join(root_dir, 'storage.json')
|
55
|
+
else
|
46
56
|
if @persistent
|
47
|
-
raise Fluent::ConfigError, "Plugin @id or path for <storage> required
|
57
|
+
raise Fluent::ConfigError, "Plugin @id or path for <storage> required when 'persistent' is true"
|
48
58
|
else
|
49
59
|
if @autosave
|
50
60
|
log.warn "both of Plugin @id and path for <storage> are not specified. Using on-memory store."
|
@@ -53,18 +63,11 @@ module Fluent
|
|
53
63
|
end
|
54
64
|
@on_memory = true
|
55
65
|
end
|
56
|
-
elsif @path
|
57
|
-
# ok
|
58
|
-
else # @_plugin_id_configured is true
|
59
|
-
log.warn "path for <storage> is not specified. Using on-memory store temporarily, but will use file store after support global storage path"
|
60
|
-
@on_memory = true
|
61
|
-
## TODO: get process-wide directory for plugin storage, and generate path for this plugin storage instance
|
62
|
-
# path =
|
63
66
|
end
|
64
67
|
|
65
68
|
if !@on_memory
|
66
69
|
dir = File.dirname(@path)
|
67
|
-
FileUtils.mkdir_p(dir, mode: @dir_mode) unless
|
70
|
+
FileUtils.mkdir_p(dir, mode: @dir_mode) unless Dir.exist?(dir)
|
68
71
|
if File.exist?(@path)
|
69
72
|
raise Fluent::ConfigError, "Plugin storage path '#{@path}' is not readable/writable" unless File.readable?(@path) && File.writable?(@path)
|
70
73
|
begin
|
@@ -75,7 +78,7 @@ module Fluent
|
|
75
78
|
raise Fluent::ConfigError, "Unexpected error: failed to read data from plugin storage file: '#{@path}'"
|
76
79
|
end
|
77
80
|
else
|
78
|
-
raise Fluent::ConfigError, "Directory is not writable for plugin storage file '#{
|
81
|
+
raise Fluent::ConfigError, "Directory is not writable for plugin storage file '#{@path}'" unless File.stat(dir).writable?
|
79
82
|
end
|
80
83
|
end
|
81
84
|
end
|