fluentd 1.15.3-x64-mingw32 → 1.16.0-x64-mingw32

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/linux-test.yaml +2 -2
  3. data/.github/workflows/macos-test.yaml +2 -2
  4. data/.github/workflows/windows-test.yaml +2 -2
  5. data/CHANGELOG.md +77 -0
  6. data/MAINTAINERS.md +2 -0
  7. data/README.md +0 -1
  8. data/fluentd.gemspec +2 -2
  9. data/lib/fluent/command/fluentd.rb +55 -53
  10. data/lib/fluent/daemon.rb +2 -4
  11. data/lib/fluent/log/console_adapter.rb +66 -0
  12. data/lib/fluent/log.rb +35 -5
  13. data/lib/fluent/plugin/base.rb +5 -7
  14. data/lib/fluent/plugin/buf_file.rb +32 -3
  15. data/lib/fluent/plugin/buf_file_single.rb +32 -3
  16. data/lib/fluent/plugin/buffer/file_chunk.rb +1 -1
  17. data/lib/fluent/plugin/buffer.rb +21 -0
  18. data/lib/fluent/plugin/in_tcp.rb +4 -2
  19. data/lib/fluent/plugin/out_forward/ack_handler.rb +19 -4
  20. data/lib/fluent/plugin/out_forward.rb +2 -2
  21. data/lib/fluent/plugin/out_secondary_file.rb +39 -22
  22. data/lib/fluent/plugin/output.rb +49 -12
  23. data/lib/fluent/plugin_helper/http_server/server.rb +2 -1
  24. data/lib/fluent/supervisor.rb +157 -251
  25. data/lib/fluent/test/driver/base.rb +11 -5
  26. data/lib/fluent/test/driver/filter.rb +4 -0
  27. data/lib/fluent/test/startup_shutdown.rb +6 -8
  28. data/lib/fluent/version.rb +1 -1
  29. data/test/command/test_ctl.rb +1 -1
  30. data/test/command/test_fluentd.rb +137 -6
  31. data/test/command/test_plugin_config_formatter.rb +0 -1
  32. data/test/compat/test_parser.rb +5 -5
  33. data/test/config/test_system_config.rb +0 -8
  34. data/test/log/test_console_adapter.rb +110 -0
  35. data/test/plugin/out_forward/test_ack_handler.rb +39 -0
  36. data/test/plugin/test_base.rb +98 -0
  37. data/test/plugin/test_buf_file.rb +62 -23
  38. data/test/plugin/test_buf_file_single.rb +65 -0
  39. data/test/plugin/test_in_http.rb +2 -3
  40. data/test/plugin/test_in_monitor_agent.rb +2 -3
  41. data/test/plugin/test_in_tcp.rb +15 -0
  42. data/test/plugin/test_out_forward.rb +14 -18
  43. data/test/plugin/test_out_http.rb +1 -0
  44. data/test/plugin/test_output.rb +269 -0
  45. data/test/plugin/test_parser_regexp.rb +1 -6
  46. data/test/plugin_helper/test_http_server_helper.rb +1 -1
  47. data/test/plugin_helper/test_server.rb +10 -5
  48. data/test/test_config.rb +0 -21
  49. data/test/test_formatter.rb +23 -20
  50. data/test/test_log.rb +71 -36
  51. data/test/test_supervisor.rb +277 -282
  52. metadata +12 -19
  53. data/.drone.yml +0 -35
  54. data/.gitlab-ci.yml +0 -103
  55. data/test/test_logger_initializer.rb +0 -46
@@ -139,13 +139,20 @@ module Fluent
139
139
  def resume
140
140
  stage = {}
141
141
  queue = []
142
+ exist_broken_file = false
142
143
 
143
144
  patterns = [@path]
144
145
  patterns.unshift @additional_resume_path if @additional_resume_path
145
146
  Dir.glob(escaped_patterns(patterns)) do |path|
146
147
  next unless File.file?(path)
147
148
 
148
- log.debug { "restoring buffer file: path = #{path}" }
149
+ if owner.respond_to?(:buffer_config) && owner.buffer_config&.flush_at_shutdown
150
+ # When `flush_at_shutdown` is `true`, the remaining chunk files during resuming are possibly broken
151
+ # since there may be a power failure or similar failure.
152
+ log.warn { "restoring buffer file: path = #{path}" }
153
+ else
154
+ log.debug { "restoring buffer file: path = #{path}" }
155
+ end
149
156
 
150
157
  m = new_metadata() # this metadata will be overwritten by resuming .meta file content
151
158
  # so it should not added into @metadata_list for now
@@ -158,6 +165,7 @@ module Fluent
158
165
  begin
159
166
  chunk = Fluent::Plugin::Buffer::FileChunk.new(m, path, mode, compress: @compress) # file chunk resumes contents of metadata
160
167
  rescue Fluent::Plugin::Buffer::FileChunk::FileChunkError => e
168
+ exist_broken_file = true
161
169
  handle_broken_files(path, mode, e)
162
170
  next
163
171
  end
@@ -182,6 +190,15 @@ module Fluent
182
190
 
183
191
  queue.sort_by!{ |chunk| chunk.modified_at }
184
192
 
193
+ # If one of the files is corrupted, other files may also be corrupted and be undetected.
194
+ # The time priods of each chunk are helpful to check the data.
195
+ if exist_broken_file
196
+ log.info "Since a broken chunk file was found, it is possible that other files remaining at the time of resuming were also broken. Here is the list of the files."
197
+ (stage.values + queue).each { |chunk|
198
+ log.info " #{chunk.path}:", :created_at => chunk.created_at, :modified_at => chunk.modified_at
199
+ }
200
+ end
201
+
185
202
  return stage, queue
186
203
  end
187
204
 
@@ -195,8 +212,20 @@ module Fluent
195
212
  end
196
213
 
197
214
  def handle_broken_files(path, mode, e)
198
- log.error "found broken chunk file during resume. Deleted corresponding files:", :path => path, :mode => mode, :err_msg => e.message
199
- # After support 'backup_dir' feature, these files are moved to backup_dir instead of unlink.
215
+ log.error "found broken chunk file during resume.", :path => path, :mode => mode, :err_msg => e.message
216
+ unique_id = Fluent::Plugin::Buffer::FileChunk.unique_id_from_path(path)
217
+ backup(unique_id) { |f|
218
+ File.open(path, 'rb') { |chunk|
219
+ chunk.set_encoding(Encoding::ASCII_8BIT)
220
+ chunk.sync = true
221
+ chunk.binmode
222
+ IO.copy_stream(chunk, f)
223
+ }
224
+ }
225
+ rescue => error
226
+ log.error "backup failed. Delete corresponding files.", :err_msg => error.message
227
+ ensure
228
+ log.warn "disable_chunk_backup is true. #{dump_unique_id_hex(unique_id)} chunk is thrown away." if @disable_chunk_backup
200
229
  File.unlink(path, path + '.meta') rescue nil
201
230
  end
202
231
 
@@ -160,13 +160,20 @@ module Fluent
160
160
  def resume
161
161
  stage = {}
162
162
  queue = []
163
+ exist_broken_file = false
163
164
 
164
165
  patterns = [@path]
165
166
  patterns.unshift @additional_resume_path if @additional_resume_path
166
167
  Dir.glob(escaped_patterns(patterns)) do |path|
167
168
  next unless File.file?(path)
168
169
 
169
- log.debug { "restoring buffer file: path = #{path}" }
170
+ if owner.respond_to?(:buffer_config) && owner.buffer_config&.flush_at_shutdown
171
+ # When `flush_at_shutdown` is `true`, the remaining chunk files during resuming are possibly broken
172
+ # since there may be a power failure or similar failure.
173
+ log.warn { "restoring buffer file: path = #{path}" }
174
+ else
175
+ log.debug { "restoring buffer file: path = #{path}" }
176
+ end
170
177
 
171
178
  m = new_metadata() # this metadata will be updated in FileSingleChunk.new
172
179
  mode = Fluent::Plugin::Buffer::FileSingleChunk.assume_chunk_state(path)
@@ -179,6 +186,7 @@ module Fluent
179
186
  chunk = Fluent::Plugin::Buffer::FileSingleChunk.new(m, path, mode, @key_in_path, compress: @compress)
180
187
  chunk.restore_size(@chunk_format) if @calc_num_records
181
188
  rescue Fluent::Plugin::Buffer::FileSingleChunk::FileChunkError => e
189
+ exist_broken_file = true
182
190
  handle_broken_files(path, mode, e)
183
191
  next
184
192
  end
@@ -193,6 +201,15 @@ module Fluent
193
201
 
194
202
  queue.sort_by!(&:modified_at)
195
203
 
204
+ # If one of the files is corrupted, other files may also be corrupted and be undetected.
205
+ # The time priods of each chunk are helpful to check the data.
206
+ if exist_broken_file
207
+ log.info "Since a broken chunk file was found, it is possible that other files remaining at the time of resuming were also broken. Here is the list of the files."
208
+ (stage.values + queue).each { |chunk|
209
+ log.info " #{chunk.path}:", :created_at => chunk.created_at, :modified_at => chunk.modified_at
210
+ }
211
+ end
212
+
196
213
  return stage, queue
197
214
  end
198
215
 
@@ -207,8 +224,20 @@ module Fluent
207
224
  end
208
225
 
209
226
  def handle_broken_files(path, mode, e)
210
- log.error "found broken chunk file during resume. Delete corresponding files:", path: path, mode: mode, err_msg: e.message
211
- # After support 'backup_dir' feature, these files are moved to backup_dir instead of unlink.
227
+ log.error "found broken chunk file during resume.", :path => path, :mode => mode, :err_msg => e.message
228
+ unique_id, _ = Fluent::Plugin::Buffer::FileSingleChunk.unique_id_and_key_from_path(path)
229
+ backup(unique_id) { |f|
230
+ File.open(path, 'rb') { |chunk|
231
+ chunk.set_encoding(Encoding::ASCII_8BIT)
232
+ chunk.sync = true
233
+ chunk.binmode
234
+ IO.copy_stream(chunk, f)
235
+ }
236
+ }
237
+ rescue => error
238
+ log.error "backup failed. Delete corresponding files.", :err_msg => error.message
239
+ ensure
240
+ log.warn "disable_chunk_backup is true. #{dump_unique_id_hex(unique_id)} chunk is thrown away." if @disable_chunk_backup
212
241
  File.unlink(path) rescue nil
213
242
  end
214
243
 
@@ -204,7 +204,7 @@ module Fluent
204
204
  end
205
205
  end
206
206
 
207
- # used only for queued v0.12 buffer path
207
+ # used only for queued v0.12 buffer path or broken files
208
208
  def self.unique_id_from_path(path)
209
209
  if /\.(b|q)([0-9a-f]+)\.[^\/]*\Z/n =~ path # //n switch means explicit 'ASCII-8BIT' pattern
210
210
  return $2.scan(/../).map{|x| x.to_i(16) }.pack('C*')
@@ -66,6 +66,9 @@ module Fluent
66
66
  desc 'Compress buffered data.'
67
67
  config_param :compress, :enum, list: [:text, :gzip], default: :text
68
68
 
69
+ desc 'If true, chunks are thrown away when unrecoverable error happens'
70
+ config_param :disable_chunk_backup, :bool, default: false
71
+
69
72
  Metadata = Struct.new(:timekey, :tag, :variables, :seq) do
70
73
  def initialize(timekey, tag, variables)
71
74
  super(timekey, tag, variables, 0)
@@ -903,6 +906,24 @@ module Fluent
903
906
  { 'buffer' => stats }
904
907
  end
905
908
 
909
+ def backup(chunk_unique_id)
910
+ unique_id = dump_unique_id_hex(chunk_unique_id)
911
+
912
+ if @disable_chunk_backup
913
+ log.warn "disable_chunk_backup is true. #{unique_id} chunk is not backed up."
914
+ return
915
+ end
916
+
917
+ safe_owner_id = owner.plugin_id.gsub(/[ "\/\\:;|*<>?]/, '_')
918
+ backup_base_dir = system_config.root_dir || DEFAULT_BACKUP_DIR
919
+ backup_file = File.join(backup_base_dir, 'backup', "worker#{fluentd_worker_id}", safe_owner_id, "#{unique_id}.log")
920
+ backup_dir = File.dirname(backup_file)
921
+
922
+ log.warn "bad chunk is moved to #{backup_file}"
923
+ FileUtils.mkdir_p(backup_dir, mode: system_config.dir_permission || Fluent::DEFAULT_DIR_PERMISSION) unless Dir.exist?(backup_dir)
924
+ File.open(backup_file, 'ab', system_config.file_permission || Fluent::DEFAULT_FILE_PERMISSION) { |f| yield f }
925
+ end
926
+
906
927
  private
907
928
 
908
929
  def optimistic_queued?(metadata = nil)
@@ -40,6 +40,8 @@ module Fluent::Plugin
40
40
 
41
41
  desc 'The payload is read up to this character.'
42
42
  config_param :delimiter, :string, default: "\n" # syslog family add "\n" to each message and this seems only way to split messages in tcp stream
43
+ desc 'Check the remote connection is still available by sending a keepalive packet if this value is true.'
44
+ config_param :send_keepalive_packet, :bool, default: false
43
45
 
44
46
  # in_forward like host/network restriction
45
47
  config_section :security, required: false, multi: false do
@@ -101,7 +103,7 @@ module Fluent::Plugin
101
103
  log.info "listening tcp socket", bind: @bind, port: @port
102
104
  del_size = @delimiter.length
103
105
  if @_extract_enabled && @_extract_tag_key
104
- server_create(:in_tcp_server_single_emit, @port, bind: @bind, resolve_name: !!@source_hostname_key) do |data, conn|
106
+ server_create(:in_tcp_server_single_emit, @port, bind: @bind, resolve_name: !!@source_hostname_key, send_keepalive_packet: @send_keepalive_packet) do |data, conn|
105
107
  unless check_client(conn)
106
108
  conn.close
107
109
  next
@@ -131,7 +133,7 @@ module Fluent::Plugin
131
133
  buf.slice!(0, pos) if pos > 0
132
134
  end
133
135
  else
134
- server_create(:in_tcp_server_batch_emit, @port, bind: @bind, resolve_name: !!@source_hostname_key) do |data, conn|
136
+ server_create(:in_tcp_server_batch_emit, @port, bind: @bind, resolve_name: !!@source_hostname_key, send_keepalive_packet: @send_keepalive_packet) do |data, conn|
135
137
  unless check_client(conn)
136
138
  conn.close
137
139
  next
@@ -59,7 +59,13 @@ module Fluent::Plugin
59
59
  @ack_waitings = new_list
60
60
  end
61
61
 
62
- readable_sockets, _, _ = IO.select(sockets, nil, nil, select_interval)
62
+ begin
63
+ readable_sockets, _, _ = IO.select(sockets, nil, nil, select_interval)
64
+ rescue IOError
65
+ @log.info "connection closed while waiting for readable sockets"
66
+ readable_sockets = nil
67
+ end
68
+
63
69
  if readable_sockets
64
70
  readable_sockets.each do |sock|
65
71
  results << read_ack_from_sock(sock)
@@ -109,13 +115,22 @@ module Fluent::Plugin
109
115
  raw_data = sock.instance_of?(Fluent::PluginHelper::Socket::WrappedSocket::TLS) ? sock.readpartial(@read_length) : sock.recv(@read_length)
110
116
  rescue Errno::ECONNRESET, EOFError # ECONNRESET for #recv, #EOFError for #readpartial
111
117
  raw_data = ''
118
+ rescue IOError
119
+ @log.info "socket closed while receiving ack response"
120
+ return nil, Result::FAILED
112
121
  end
113
122
 
114
123
  info = find(sock)
115
124
 
116
- # When connection is closed by remote host, socket is ready to read and #recv returns an empty string that means EOF.
117
- # If this happens we assume the data wasn't delivered and retry it.
118
- if raw_data.empty?
125
+ if info.nil?
126
+ # The info can be deleted by another thread during `sock.recv()` and `find()`.
127
+ # This is OK since another thread has completed to process the ack, so we can skip this.
128
+ # Note: exclusion mechanism about `collect_response()` may need to be considered.
129
+ @log.debug "could not find the ack info. this ack may be processed by another thread."
130
+ return nil, Result::FAILED
131
+ elsif raw_data.empty?
132
+ # When connection is closed by remote host, socket is ready to read and #recv returns an empty string that means EOF.
133
+ # If this happens we assume the data wasn't delivered and retry it.
119
134
  @log.warn 'destination node closed the connection. regard it as unavailable.', host: info.node.host, port: info.node.port
120
135
  # info.node.disable!
121
136
  return info, Result::FAILED
@@ -521,8 +521,8 @@ module Fluent::Plugin
521
521
  when AckHandler::Result::SUCCESS
522
522
  commit_write(chunk_id)
523
523
  when AckHandler::Result::FAILED
524
- node.disable!
525
- rollback_write(chunk_id, update_retry: false)
524
+ node&.disable!
525
+ rollback_write(chunk_id, update_retry: false) if chunk_id
526
526
  when AckHandler::Result::CHUNKID_UNMATCHED
527
527
  rollback_write(chunk_id, update_retry: false)
528
528
  else
@@ -64,31 +64,29 @@ module Fluent::Plugin
64
64
  end
65
65
 
66
66
  def multi_workers_ready?
67
- ### TODO: add hack to synchronize for multi workers
68
67
  true
69
68
  end
70
69
 
71
70
  def write(chunk)
72
71
  path_without_suffix = extract_placeholders(@path_without_suffix, chunk)
73
- path = generate_path(path_without_suffix)
74
- FileUtils.mkdir_p File.dirname(path), mode: @dir_perm
75
-
76
- case @compress
77
- when :text
78
- File.open(path, "ab", @file_perm) {|f|
79
- f.flock(File::LOCK_EX)
80
- chunk.write_to(f)
81
- }
82
- when :gzip
83
- File.open(path, "ab", @file_perm) {|f|
84
- f.flock(File::LOCK_EX)
85
- gz = Zlib::GzipWriter.new(f)
86
- chunk.write_to(gz)
87
- gz.close
88
- }
72
+ generate_path(path_without_suffix) do |path|
73
+ FileUtils.mkdir_p File.dirname(path), mode: @dir_perm
74
+
75
+ case @compress
76
+ when :text
77
+ File.open(path, "ab", @file_perm) {|f|
78
+ f.flock(File::LOCK_EX)
79
+ chunk.write_to(f)
80
+ }
81
+ when :gzip
82
+ File.open(path, "ab", @file_perm) {|f|
83
+ f.flock(File::LOCK_EX)
84
+ gz = Zlib::GzipWriter.new(f)
85
+ chunk.write_to(gz)
86
+ gz.close
87
+ }
88
+ end
89
89
  end
90
-
91
- path
92
90
  end
93
91
 
94
92
  private
@@ -117,15 +115,34 @@ module Fluent::Plugin
117
115
 
118
116
  def generate_path(path_without_suffix)
119
117
  if @append
120
- "#{path_without_suffix}#{@suffix}"
121
- else
118
+ path = "#{path_without_suffix}#{@suffix}"
119
+ synchronize_path(path) do
120
+ yield path
121
+ end
122
+ return path
123
+ end
124
+
125
+ begin
122
126
  i = 0
123
127
  loop do
124
128
  path = "#{path_without_suffix}.#{i}#{@suffix}"
125
- return path unless File.exist?(path)
129
+ break unless File.exist?(path)
126
130
  i += 1
127
131
  end
132
+ synchronize_path(path) do
133
+ # If multiple processes or threads select the same path and another
134
+ # one entered this locking block first, the file should already
135
+ # exist and this one should retry to find new path.
136
+ raise FileAlreadyExist if File.exist?(path)
137
+ yield path
138
+ end
139
+ rescue FileAlreadyExist
140
+ retry
128
141
  end
142
+ path
143
+ end
144
+
145
+ class FileAlreadyExist < StandardError
129
146
  end
130
147
  end
131
148
  end
@@ -99,7 +99,6 @@ module Fluent
99
99
  config_param :retry_max_interval, :time, default: nil, desc: 'The maximum interval seconds for exponential backoff between retries while failing.'
100
100
 
101
101
  config_param :retry_randomize, :bool, default: true, desc: 'If true, output plugin will retry after randomized interval not to do burst retries.'
102
- config_param :disable_chunk_backup, :bool, default: false, desc: 'If true, chunks are thrown away when unrecoverable error happens'
103
102
  end
104
103
 
105
104
  config_section :secondary, param_name: :secondary_config, required: false, multi: false, final: true do
@@ -199,6 +198,7 @@ module Fluent
199
198
  def initialize
200
199
  super
201
200
  @counter_mutex = Mutex.new
201
+ @flush_thread_mutex = Mutex.new
202
202
  @buffering = false
203
203
  @delayed_commit = false
204
204
  @as_secondary = false
@@ -378,6 +378,7 @@ module Fluent
378
378
  buffer_conf = conf.elements(name: 'buffer').first || Fluent::Config::Element.new('buffer', '', {}, [])
379
379
  @buffer = Plugin.new_buffer(buffer_type, parent: self)
380
380
  @buffer.configure(buffer_conf)
381
+ keep_buffer_config_compat
381
382
  @buffer.enable_update_timekeys if @chunk_key_time
382
383
 
383
384
  @flush_at_shutdown = @buffer_config.flush_at_shutdown
@@ -425,7 +426,9 @@ module Fluent
425
426
  end
426
427
  @secondary.acts_as_secondary(self)
427
428
  @secondary.configure(secondary_conf)
428
- if (self.class != @secondary.class) && (@custom_format || @secondary.implement?(:custom_format))
429
+ if (@secondary.class != SecondaryFileOutput) &&
430
+ (self.class != @secondary.class) &&
431
+ (@custom_format || @secondary.implement?(:custom_format))
429
432
  log.warn "Use different plugin for secondary. Check the plugin works with primary like secondary_file", primary: self.class.to_s, secondary: @secondary.class.to_s
430
433
  end
431
434
  else
@@ -435,6 +438,12 @@ module Fluent
435
438
  self
436
439
  end
437
440
 
441
+ def keep_buffer_config_compat
442
+ # Need this to call `@buffer_config.disable_chunk_backup` just as before,
443
+ # since some plugins may use this option in this way.
444
+ @buffer_config[:disable_chunk_backup] = @buffer.disable_chunk_backup
445
+ end
446
+
438
447
  def start
439
448
  super
440
449
 
@@ -591,6 +600,42 @@ module Fluent
591
600
  super
592
601
  end
593
602
 
603
+ def actual_flush_thread_count
604
+ return 0 unless @buffering
605
+ return @buffer_config.flush_thread_count unless @as_secondary
606
+ @primary_instance.buffer_config.flush_thread_count
607
+ end
608
+
609
+ # Ensures `path` (filename or filepath) processable
610
+ # only by the current thread in the current process.
611
+ # For multiple workers, the lock is shared if `path` is the same value.
612
+ # For multiple threads, the lock is shared by all threads in the same process.
613
+ def synchronize_path(path)
614
+ synchronize_path_in_workers(path) do
615
+ synchronize_in_threads do
616
+ yield
617
+ end
618
+ end
619
+ end
620
+
621
+ def synchronize_path_in_workers(path)
622
+ need_worker_lock = system_config.workers > 1
623
+ if need_worker_lock
624
+ acquire_worker_lock(path) { yield }
625
+ else
626
+ yield
627
+ end
628
+ end
629
+
630
+ def synchronize_in_threads
631
+ need_thread_lock = actual_flush_thread_count > 1
632
+ if need_thread_lock
633
+ @flush_thread_mutex.synchronize { yield }
634
+ else
635
+ yield
636
+ end
637
+ end
638
+
594
639
  def support_in_v12_style?(feature)
595
640
  # for plugins written in v0.12 styles
596
641
  case feature
@@ -1240,18 +1285,10 @@ module Fluent
1240
1285
  end
1241
1286
 
1242
1287
  def backup_chunk(chunk, using_secondary, delayed_commit)
1243
- if @buffer_config.disable_chunk_backup
1288
+ if @buffer.disable_chunk_backup
1244
1289
  log.warn "disable_chunk_backup is true. #{dump_unique_id_hex(chunk.unique_id)} chunk is thrown away"
1245
1290
  else
1246
- unique_id = dump_unique_id_hex(chunk.unique_id)
1247
- safe_plugin_id = plugin_id.gsub(/[ "\/\\:;|*<>?]/, '_')
1248
- backup_base_dir = system_config.root_dir || DEFAULT_BACKUP_DIR
1249
- backup_file = File.join(backup_base_dir, 'backup', "worker#{fluentd_worker_id}", safe_plugin_id, "#{unique_id}.log")
1250
- backup_dir = File.dirname(backup_file)
1251
-
1252
- log.warn "bad chunk is moved to #{backup_file}"
1253
- FileUtils.mkdir_p(backup_dir, mode: system_config.dir_permission || Fluent::DEFAULT_DIR_PERMISSION) unless Dir.exist?(backup_dir)
1254
- File.open(backup_file, 'ab', system_config.file_permission || Fluent::DEFAULT_FILE_PERMISSION) { |f|
1291
+ @buffer.backup(chunk.unique_id) { |f|
1255
1292
  chunk.write_to(f)
1256
1293
  }
1257
1294
  end
@@ -21,6 +21,7 @@ require 'async/http/endpoint'
21
21
  require 'fluent/plugin_helper/http_server/app'
22
22
  require 'fluent/plugin_helper/http_server/router'
23
23
  require 'fluent/plugin_helper/http_server/methods'
24
+ require 'fluent/log/console_adapter'
24
25
 
25
26
  module Fluent
26
27
  module PluginHelper
@@ -38,7 +39,7 @@ module Fluent
38
39
  scheme = tls_context ? 'https' : 'http'
39
40
  @uri = URI("#{scheme}://#{@addr}:#{@port}").to_s
40
41
  @router = Router.new(default_app)
41
- @reactor = Async::Reactor.new(nil, logger: @logger)
42
+ @reactor = Async::Reactor.new(nil, logger: Fluent::Log::ConsoleAdapter.wrap(@logger))
42
43
 
43
44
  opts = if tls_context
44
45
  { ssl_context: tls_context }