fluentd 1.18.0 → 1.19.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. checksums.yaml +4 -4
  2. data/.rubocop.yml +116 -0
  3. data/CHANGELOG.md +235 -12
  4. data/MAINTAINERS.md +8 -2
  5. data/README.md +3 -7
  6. data/Rakefile +2 -0
  7. data/SECURITY.md +5 -3
  8. data/lib/fluent/command/cap_ctl.rb +2 -2
  9. data/lib/fluent/command/fluentd.rb +6 -2
  10. data/lib/fluent/compat/formatter.rb +6 -0
  11. data/lib/fluent/compat/socket_util.rb +2 -2
  12. data/lib/fluent/config/configure_proxy.rb +1 -1
  13. data/lib/fluent/config/element.rb +2 -2
  14. data/lib/fluent/config/literal_parser.rb +3 -3
  15. data/lib/fluent/config/parser.rb +15 -3
  16. data/lib/fluent/config/section.rb +2 -2
  17. data/lib/fluent/config/types.rb +1 -1
  18. data/lib/fluent/config/v1_parser.rb +3 -3
  19. data/lib/fluent/counter/store.rb +1 -1
  20. data/lib/fluent/engine.rb +1 -1
  21. data/lib/fluent/env.rb +3 -2
  22. data/lib/fluent/event.rb +7 -6
  23. data/lib/fluent/log/console_adapter.rb +5 -7
  24. data/lib/fluent/log.rb +23 -0
  25. data/lib/fluent/plugin/bare_output.rb +0 -16
  26. data/lib/fluent/plugin/base.rb +2 -2
  27. data/lib/fluent/plugin/buf_file.rb +15 -1
  28. data/lib/fluent/plugin/buf_file_single.rb +15 -1
  29. data/lib/fluent/plugin/buffer/chunk.rb +74 -10
  30. data/lib/fluent/plugin/buffer/file_chunk.rb +9 -5
  31. data/lib/fluent/plugin/buffer/file_single_chunk.rb +3 -3
  32. data/lib/fluent/plugin/buffer/memory_chunk.rb +2 -2
  33. data/lib/fluent/plugin/buffer.rb +34 -6
  34. data/lib/fluent/plugin/compressable.rb +68 -22
  35. data/lib/fluent/plugin/filter.rb +0 -8
  36. data/lib/fluent/plugin/filter_record_transformer.rb +1 -1
  37. data/lib/fluent/plugin/formatter_csv.rb +18 -4
  38. data/lib/fluent/plugin/formatter_json.rb +7 -4
  39. data/lib/fluent/plugin/formatter_out_file.rb +5 -2
  40. data/lib/fluent/plugin/in_forward.rb +9 -5
  41. data/lib/fluent/plugin/in_http.rb +9 -4
  42. data/lib/fluent/plugin/in_monitor_agent.rb +4 -8
  43. data/lib/fluent/plugin/in_tail/position_file.rb +1 -1
  44. data/lib/fluent/plugin/in_tail.rb +80 -57
  45. data/lib/fluent/plugin/in_tcp.rb +2 -2
  46. data/lib/fluent/plugin/in_udp.rb +1 -1
  47. data/lib/fluent/plugin/input.rb +0 -8
  48. data/lib/fluent/plugin/multi_output.rb +1 -17
  49. data/lib/fluent/plugin/out_exec_filter.rb +2 -2
  50. data/lib/fluent/plugin/out_file.rb +37 -30
  51. data/lib/fluent/plugin/out_forward/connection_manager.rb +2 -2
  52. data/lib/fluent/plugin/out_forward.rb +23 -13
  53. data/lib/fluent/plugin/out_http.rb +1 -1
  54. data/lib/fluent/plugin/out_secondary_file.rb +2 -2
  55. data/lib/fluent/plugin/out_stdout.rb +10 -3
  56. data/lib/fluent/plugin/out_stream.rb +3 -3
  57. data/lib/fluent/plugin/output.rb +24 -35
  58. data/lib/fluent/plugin/owned_by_mixin.rb +2 -2
  59. data/lib/fluent/plugin/parser.rb +3 -3
  60. data/lib/fluent/plugin/parser_json.rb +3 -3
  61. data/lib/fluent/plugin/sd_file.rb +2 -2
  62. data/lib/fluent/plugin/storage_local.rb +8 -4
  63. data/lib/fluent/plugin.rb +1 -1
  64. data/lib/fluent/plugin_helper/child_process.rb +2 -2
  65. data/lib/fluent/plugin_helper/http_server/request.rb +13 -2
  66. data/lib/fluent/plugin_helper/http_server/server.rb +4 -14
  67. data/lib/fluent/plugin_helper/http_server.rb +1 -8
  68. data/lib/fluent/plugin_helper/metrics.rb +7 -0
  69. data/lib/fluent/plugin_helper/server.rb +4 -1
  70. data/lib/fluent/plugin_helper/service_discovery.rb +1 -1
  71. data/lib/fluent/plugin_helper/socket_option.rb +2 -2
  72. data/lib/fluent/plugin_helper/storage.rb +1 -1
  73. data/lib/fluent/plugin_id.rb +3 -3
  74. data/lib/fluent/root_agent.rb +4 -3
  75. data/lib/fluent/static_config_analysis.rb +3 -2
  76. data/lib/fluent/supervisor.rb +51 -5
  77. data/lib/fluent/system_config.rb +13 -4
  78. data/lib/fluent/test/base.rb +1 -1
  79. data/lib/fluent/test/driver/base.rb +2 -2
  80. data/lib/fluent/test/filter_test.rb +2 -2
  81. data/lib/fluent/test/formatter_test.rb +1 -1
  82. data/lib/fluent/test/helpers.rb +4 -0
  83. data/lib/fluent/test/input_test.rb +2 -2
  84. data/lib/fluent/test/output_test.rb +4 -4
  85. data/lib/fluent/test/parser_test.rb +1 -1
  86. data/lib/fluent/tls.rb +24 -0
  87. data/lib/fluent/variable_store.rb +1 -1
  88. data/lib/fluent/version.rb +1 -1
  89. data/lib/fluent/winsvc.rb +38 -8
  90. metadata +85 -16
  91. data/lib/fluent/plugin_helper/http_server/compat/server.rb +0 -92
  92. data/lib/fluent/plugin_helper/http_server/compat/ssl_context_extractor.rb +0 -52
  93. data/lib/fluent/plugin_helper/http_server/compat/webrick_handler.rb +0 -58
@@ -87,7 +87,7 @@ module Fluent::Plugin
87
87
  config_param :verify_connection_at_startup, :bool, default: false
88
88
 
89
89
  desc 'Compress buffered data.'
90
- config_param :compress, :enum, list: [:text, :gzip], default: :text
90
+ config_param :compress, :enum, list: [:text, :gzip, :zstd], default: :text
91
91
 
92
92
  desc 'The default version of TLS transport.'
93
93
  config_param :tls_version, :enum, list: Fluent::TLS::SUPPORTED_VERSIONS, default: Fluent::TLS::DEFAULT_VERSION
@@ -251,10 +251,14 @@ module Fluent::Plugin
251
251
  end
252
252
 
253
253
  unless @as_secondary
254
- if @compress == :gzip && @buffer.compress == :text
255
- @buffer.compress = :gzip
256
- elsif @compress == :text && @buffer.compress == :gzip
257
- log.info "buffer is compressed. If you also want to save the bandwidth of a network, Add `compress` configuration in <match>"
254
+ if @buffer.compress == :text
255
+ @buffer.compress = @compress
256
+ else
257
+ if @compress == :text
258
+ log.info "buffer is compressed. If you also want to save the bandwidth of a network, Add `compress` configuration in <match>"
259
+ elsif @compress != @buffer.compress
260
+ raise Fluent::ConfigError, "You cannot specify different compression formats for Buffer (Buffer: #{@buffer.compress}, Self: #{@compress})"
261
+ end
258
262
  end
259
263
  end
260
264
 
@@ -267,9 +271,15 @@ module Fluent::Plugin
267
271
  end
268
272
 
269
273
  raise Fluent::ConfigError, "ack_response_timeout must be a positive integer" if @ack_response_timeout < 1
274
+
275
+ if @compress == :zstd
276
+ log.warn "zstd compression feature is an experimental new feature supported since v1.19.0." +
277
+ " Please make sure that the destination server also supports this feature before using it." +
278
+ " in_forward plugin for Fluentd supports it since v1.19.0."
279
+ end
280
+
270
281
  @healthy_nodes_count_metrics = metrics_create(namespace: "fluentd", subsystem: "output", name: "healthy_nodes_count", help_text: "Number of count healthy nodes", prefer_gauge: true)
271
282
  @registered_nodes_count_metrics = metrics_create(namespace: "fluentd", subsystem: "output", name: "registered_nodes_count", help_text: "Number of count registered nodes", prefer_gauge: true)
272
-
273
283
  end
274
284
 
275
285
  def multi_workers_ready?
@@ -295,7 +305,7 @@ module Fluent::Plugin
295
305
  unless @heartbeat_type == :none
296
306
  if @heartbeat_type == :udp
297
307
  @usock = socket_create_udp(service_discovery_services.first.host, service_discovery_services.first.port, nonblock: true)
298
- server_create_udp(:out_forward_heartbeat_receiver, 0, socket: @usock, max_bytes: @read_length, &method(:on_udp_heatbeat_response_recv))
308
+ server_create_udp(:out_forward_heartbeat_receiver, 0, socket: @usock, max_bytes: @read_length, &method(:on_udp_heartbeat_response_recv))
299
309
  end
300
310
  timer_execute(:out_forward_heartbeat_request, @heartbeat_interval, &method(:on_heartbeat_timer))
301
311
  end
@@ -481,7 +491,7 @@ module Fluent::Plugin
481
491
  end
482
492
  end
483
493
 
484
- def on_udp_heatbeat_response_recv(data, sock)
494
+ def on_udp_heartbeat_response_recv(data, sock)
485
495
  sockaddr = Socket.pack_sockaddr_in(sock.remote_port, sock.remote_host)
486
496
  if node = service_discovery_services.find { |n| n.sockaddr == sockaddr }
487
497
  # log.trace "heartbeat arrived", name: node.name, host: node.host, port: node.port
@@ -567,8 +577,8 @@ module Fluent::Plugin
567
577
 
568
578
  @handshake = HandshakeProtocol.new(
569
579
  log: @log,
570
- hostname: sender.security && sender.security.self_hostname,
571
- shared_key: server.shared_key || (sender.security && sender.security.shared_key) || '',
580
+ hostname: sender.security&.self_hostname,
581
+ shared_key: server.shared_key || sender.security&.shared_key || '',
572
582
  password: server.password || '',
573
583
  username: server.username || '',
574
584
  )
@@ -584,7 +594,7 @@ module Fluent::Plugin
584
594
  attr_accessor :usock
585
595
 
586
596
  attr_reader :state
587
- attr_reader :sockaddr # used by on_udp_heatbeat_response_recv
597
+ attr_reader :sockaddr # used by on_udp_heartbeat_response_recv
588
598
  attr_reader :failure # for test
589
599
 
590
600
  def validate_host_resolution!
@@ -711,7 +721,7 @@ module Fluent::Plugin
711
721
  end
712
722
  when :udp
713
723
  @usock.send "\0", 0, Socket.pack_sockaddr_in(@port, dest_addr)
714
- # response is going to receive at on_udp_heatbeat_response_recv
724
+ # response is going to receive at on_udp_heartbeat_response_recv
715
725
  false
716
726
  when :none # :none doesn't use this class
717
727
  raise "BUG: heartbeat_type none must not use Node"
@@ -744,7 +754,7 @@ module Fluent::Plugin
744
754
  def resolve_dns!
745
755
  addrinfo_list = Socket.getaddrinfo(@host, @port, nil, Socket::SOCK_STREAM)
746
756
  addrinfo = @sender.dns_round_robin ? addrinfo_list.sample : addrinfo_list.first
747
- @sockaddr = Socket.pack_sockaddr_in(addrinfo[1], addrinfo[3]) # used by on_udp_heatbeat_response_recv
757
+ @sockaddr = Socket.pack_sockaddr_in(addrinfo[1], addrinfo[3]) # used by on_udp_heartbeat_response_recv
748
758
  addrinfo[3]
749
759
  end
750
760
  private :resolve_dns!
@@ -270,7 +270,7 @@ module Fluent::Plugin
270
270
  OpenSSL::SSL::VERIFY_PEER
271
271
  end
272
272
  opt[:ciphers] = @tls_ciphers
273
- opt[:ssl_version] = @tls_version
273
+ opt = Fluent::TLS.set_version_to_options(opt, @tls_version, nil, nil)
274
274
  end
275
275
 
276
276
  opt
@@ -98,11 +98,11 @@ module Fluent::Plugin
98
98
  raise Fluent::ConfigError, "out_secondary_file: basename or directory has an incompatible placeholder, remove time formats, like `%Y%m%d`, from basename or directory"
99
99
  end
100
100
 
101
- if !@chunk_key_tag && (ph = placeholders.find { |placeholder| placeholder.match(/tag(\[\d+\])?/) })
101
+ if !@chunk_key_tag && (ph = placeholders.find { |placeholder| placeholder.match?(/tag(\[\d+\])?/) })
102
102
  raise Fluent::ConfigError, "out_secondary_file: basename or directory has an incompatible placeholder #{ph}, remove tag placeholder, like `${tag}`, from basename or directory"
103
103
  end
104
104
 
105
- vars = placeholders.reject { |placeholder| placeholder.match(/tag(\[\d+\])?/) || (placeholder == 'chunk_id') }
105
+ vars = placeholders.reject { |placeholder| placeholder.match?(/tag(\[\d+\])?/) || (placeholder == 'chunk_id') }
106
106
 
107
107
  if ph = vars.find { |v| !@chunk_keys.include?(v) }
108
108
  raise Fluent::ConfigError, "out_secondary_file: basename or directory has an incompatible placeholder #{ph}, remove variable placeholder, like `${varname}`, from basename or directory"
@@ -25,6 +25,9 @@ module Fluent::Plugin
25
25
  DEFAULT_LINE_FORMAT_TYPE = 'stdout'
26
26
  DEFAULT_FORMAT_TYPE = 'json'
27
27
 
28
+ desc "If Fluentd logger outputs logs to a file (with -o option), this plugin outputs events to the file as well."
29
+ config_param :use_logger, :bool, default: true
30
+
28
31
  config_section :buffer do
29
32
  config_set_default :chunk_keys, ['tag']
30
33
  config_set_default :flush_at_shutdown, true
@@ -44,6 +47,10 @@ module Fluent::Plugin
44
47
  true
45
48
  end
46
49
 
50
+ def dest_io
51
+ @use_logger ? $log : $stdout
52
+ end
53
+
47
54
  attr_accessor :formatter
48
55
 
49
56
  def configure(conf)
@@ -57,9 +64,9 @@ module Fluent::Plugin
57
64
  def process(tag, es)
58
65
  es = inject_values_to_event_stream(tag, es)
59
66
  es.each {|time,record|
60
- $log.write(format(tag, time, record))
67
+ dest_io.write(format(tag, time, record))
61
68
  }
62
- $log.flush
69
+ dest_io.flush
63
70
  end
64
71
 
65
72
  def format(tag, time, record)
@@ -68,7 +75,7 @@ module Fluent::Plugin
68
75
  end
69
76
 
70
77
  def write(chunk)
71
- chunk.write_to($log)
78
+ chunk.write_to(dest_io)
72
79
  end
73
80
  end
74
81
  end
@@ -92,8 +92,8 @@ module Fluent
92
92
 
93
93
  def initialize
94
94
  super
95
- $log.warn "'tcp' output is obsoleted and will be removed. Use 'forward' instead."
96
- $log.warn "see 'forward' section in https://docs.fluentd.org/ for the high-availability configuration."
95
+ log.warn "'tcp' output is obsoleted and will be removed. Use 'forward' instead."
96
+ log.warn "see 'forward' section in https://docs.fluentd.org/ for the high-availability configuration."
97
97
  end
98
98
 
99
99
  config_param :port, :integer, default: LISTEN_PORT
@@ -114,7 +114,7 @@ module Fluent
114
114
 
115
115
  def initialize
116
116
  super
117
- $log.warn "'unix' output is obsoleted and will be removed."
117
+ log.warn "'unix' output is obsoleted and will be removed."
118
118
  end
119
119
 
120
120
  config_param :path, :string
@@ -171,30 +171,6 @@ module Fluent
171
171
  # output_enqueue_thread_waiting: for test of output.rb itself
172
172
  attr_accessor :retry_for_error_chunk # if true, error flush will be retried even if under_plugin_development is true
173
173
 
174
- def num_errors
175
- @num_errors_metrics.get
176
- end
177
-
178
- def emit_count
179
- @emit_count_metrics.get
180
- end
181
-
182
- def emit_size
183
- @emit_size_metrics.get
184
- end
185
-
186
- def emit_records
187
- @emit_records_metrics.get
188
- end
189
-
190
- def write_count
191
- @write_count_metrics.get
192
- end
193
-
194
- def rollback_count
195
- @rollback_count_metrics.get
196
- end
197
-
198
174
  def initialize
199
175
  super
200
176
  @counter_mutex = Mutex.new
@@ -210,9 +186,11 @@ module Fluent
210
186
  @emit_records_metrics = nil
211
187
  @emit_size_metrics = nil
212
188
  @write_count_metrics = nil
189
+ @write_secondary_count_metrics = nil
213
190
  @rollback_count_metrics = nil
214
191
  @flush_time_count_metrics = nil
215
192
  @slow_flush_count_metrics = nil
193
+ @drop_oldest_chunk_count_metrics = nil
216
194
  @enable_size_metrics = false
217
195
 
218
196
  # How to process events is decided here at once, but it will be decided in delayed way on #configure & #start
@@ -278,9 +256,11 @@ module Fluent
278
256
  @emit_records_metrics = metrics_create(namespace: "fluentd", subsystem: "output", name: "emit_records", help_text: "Number of emit records")
279
257
  @emit_size_metrics = metrics_create(namespace: "fluentd", subsystem: "output", name: "emit_size", help_text: "Total size of emit events")
280
258
  @write_count_metrics = metrics_create(namespace: "fluentd", subsystem: "output", name: "write_count", help_text: "Number of writing events")
259
+ @write_secondary_count_metrics = metrics_create(namespace: "fluentd", subsystem: "output", name: "write_secondary_count", help_text: "Number of writing events in secondary")
281
260
  @rollback_count_metrics = metrics_create(namespace: "fluentd", subsystem: "output", name: "rollback_count", help_text: "Number of rollbacking operations")
282
261
  @flush_time_count_metrics = metrics_create(namespace: "fluentd", subsystem: "output", name: "flush_time_count", help_text: "Count of flush time")
283
262
  @slow_flush_count_metrics = metrics_create(namespace: "fluentd", subsystem: "output", name: "slow_flush_count", help_text: "Count of slow flush occurred time(s)")
263
+ @drop_oldest_chunk_count_metrics = metrics_create(namespace: "fluentd", subsystem: "output", name: "drop_oldest_chunk_count", help_text: "Number of count that old chunk were discarded with drop_oldest_chunk")
284
264
 
285
265
  if has_buffer_section
286
266
  unless implement?(:buffered) || implement?(:delayed_commit)
@@ -572,7 +552,7 @@ module Fluent
572
552
  @output_flush_threads.each do |state|
573
553
  # to wakeup thread and make it to stop by itself
574
554
  state.mutex.synchronize {
575
- if state.thread && state.thread.status
555
+ if state.thread&.status
576
556
  state.next_clock = 0
577
557
  state.cond_var.signal
578
558
  end
@@ -999,6 +979,7 @@ module Fluent
999
979
  if oldest
1000
980
  log.warn "dropping oldest chunk to make space after buffer overflow", chunk_id: dump_unique_id_hex(oldest.unique_id)
1001
981
  @buffer.purge_chunk(oldest.unique_id)
982
+ @drop_oldest_chunk_count_metrics.inc
1002
983
  else
1003
984
  log.error "no queued chunks to be dropped for drop_oldest_chunk"
1004
985
  end
@@ -1014,13 +995,17 @@ module Fluent
1014
995
  end
1015
996
 
1016
997
  FORMAT_MSGPACK_STREAM = ->(e){ e.to_msgpack_stream(packer: Fluent::MessagePackFactory.thread_local_msgpack_packer) }
1017
- FORMAT_COMPRESSED_MSGPACK_STREAM = ->(e){ e.to_compressed_msgpack_stream(packer: Fluent::MessagePackFactory.thread_local_msgpack_packer) }
998
+ FORMAT_COMPRESSED_MSGPACK_STREAM_GZIP = ->(e){ e.to_compressed_msgpack_stream(packer: Fluent::MessagePackFactory.thread_local_msgpack_packer) }
999
+ FORMAT_COMPRESSED_MSGPACK_STREAM_ZSTD = ->(e){ e.to_compressed_msgpack_stream(packer: Fluent::MessagePackFactory.thread_local_msgpack_packer, type: :zstd) }
1018
1000
  FORMAT_MSGPACK_STREAM_TIME_INT = ->(e){ e.to_msgpack_stream(time_int: true, packer: Fluent::MessagePackFactory.thread_local_msgpack_packer) }
1019
- FORMAT_COMPRESSED_MSGPACK_STREAM_TIME_INT = ->(e){ e.to_compressed_msgpack_stream(time_int: true, packer: Fluent::MessagePackFactory.thread_local_msgpack_packer) }
1001
+ FORMAT_COMPRESSED_MSGPACK_STREAM_TIME_INT_GZIP = ->(e){ e.to_compressed_msgpack_stream(time_int: true, packer: Fluent::MessagePackFactory.thread_local_msgpack_packer) }
1002
+ FORMAT_COMPRESSED_MSGPACK_STREAM_TIME_INT_ZSTD = ->(e){ e.to_compressed_msgpack_stream(time_int: true, packer: Fluent::MessagePackFactory.thread_local_msgpack_packer, type: :zstd) }
1020
1003
 
1021
1004
  def generate_format_proc
1022
1005
  if @buffer && @buffer.compress == :gzip
1023
- @time_as_integer ? FORMAT_COMPRESSED_MSGPACK_STREAM_TIME_INT : FORMAT_COMPRESSED_MSGPACK_STREAM
1006
+ @time_as_integer ? FORMAT_COMPRESSED_MSGPACK_STREAM_TIME_INT_GZIP : FORMAT_COMPRESSED_MSGPACK_STREAM_GZIP
1007
+ elsif @buffer && @buffer.compress == :zstd
1008
+ @time_as_integer ? FORMAT_COMPRESSED_MSGPACK_STREAM_TIME_INT_ZSTD : FORMAT_COMPRESSED_MSGPACK_STREAM_ZSTD
1024
1009
  else
1025
1010
  @time_as_integer ? FORMAT_MSGPACK_STREAM_TIME_INT : FORMAT_MSGPACK_STREAM
1026
1011
  end
@@ -1036,17 +1021,17 @@ module Fluent
1036
1021
  # iteration of event stream, and it should be done just once even if total event stream size
1037
1022
  # is bigger than chunk_limit_size because of performance.
1038
1023
  def handle_stream_with_custom_format(tag, es, enqueue: false)
1039
- meta_and_data = {}
1024
+ meta_and_data = Hash.new { |h, k| h[k] = [] }
1040
1025
  records = 0
1041
1026
  es.each(unpacker: Fluent::MessagePackFactory.thread_local_msgpack_unpacker) do |time, record|
1042
1027
  meta = metadata(tag, time, record)
1043
- meta_and_data[meta] ||= []
1044
1028
  res = format(tag, time, record)
1045
1029
  if res
1046
1030
  meta_and_data[meta] << res
1047
1031
  records += 1
1048
1032
  end
1049
1033
  end
1034
+ meta_and_data.default_proc = nil
1050
1035
  write_guard do
1051
1036
  @buffer.write(meta_and_data, enqueue: enqueue)
1052
1037
  end
@@ -1057,14 +1042,14 @@ module Fluent
1057
1042
 
1058
1043
  def handle_stream_with_standard_format(tag, es, enqueue: false)
1059
1044
  format_proc = generate_format_proc
1060
- meta_and_data = {}
1045
+ meta_and_data = Hash.new { |h, k| h[k] = MultiEventStream.new }
1061
1046
  records = 0
1062
1047
  es.each(unpacker: Fluent::MessagePackFactory.thread_local_msgpack_unpacker) do |time, record|
1063
1048
  meta = metadata(tag, time, record)
1064
- meta_and_data[meta] ||= MultiEventStream.new
1065
1049
  meta_and_data[meta].add(time, record)
1066
1050
  records += 1
1067
1051
  end
1052
+ meta_and_data.default_proc = nil
1068
1053
  write_guard do
1069
1054
  @buffer.write(meta_and_data, format: format_proc, enqueue: enqueue)
1070
1055
  end
@@ -1146,7 +1131,7 @@ module Fluent
1146
1131
 
1147
1132
  def try_rollback_write
1148
1133
  @dequeued_chunks_mutex.synchronize do
1149
- while @dequeued_chunks.first && @dequeued_chunks.first.expired?
1134
+ while @dequeued_chunks.first&.expired?
1150
1135
  info = @dequeued_chunks.shift
1151
1136
  if @buffer.takeback_chunk(info.chunk_id)
1152
1137
  @rollback_count_metrics.inc
@@ -1208,6 +1193,7 @@ module Fluent
1208
1193
  if output.delayed_commit
1209
1194
  log.trace "executing delayed write and commit", chunk: dump_unique_id_hex(chunk.unique_id)
1210
1195
  @write_count_metrics.inc
1196
+ @write_secondary_count_metrics.inc if using_secondary
1211
1197
  @dequeued_chunks_mutex.synchronize do
1212
1198
  # delayed_commit_timeout for secondary is configured in <buffer> of primary (<secondary> don't get <buffer>)
1213
1199
  @dequeued_chunks << DequeuedChunkInfo.new(chunk.unique_id, Time.now, self.delayed_commit_timeout)
@@ -1220,6 +1206,7 @@ module Fluent
1220
1206
  dump_chunk_id = dump_unique_id_hex(chunk_id)
1221
1207
  log.trace "adding write count", instance: self.object_id
1222
1208
  @write_count_metrics.inc
1209
+ @write_secondary_count_metrics.inc if using_secondary
1223
1210
  log.trace "executing sync write", chunk: dump_chunk_id
1224
1211
 
1225
1212
  output.write(chunk)
@@ -1389,7 +1376,7 @@ module Fluent
1389
1376
  @output_flush_thread_current_position = (@output_flush_thread_current_position + 1) % @buffer_config.flush_thread_count
1390
1377
  state = @output_flush_threads[@output_flush_thread_current_position]
1391
1378
  state.mutex.synchronize {
1392
- if state.thread && state.thread.status # "run"/"sleep"/"aborting" or false(successfully stop) or nil(killed by exception)
1379
+ if state.thread&.status # "run"/"sleep"/"aborting" or false(successfully stop) or nil(killed by exception)
1393
1380
  state.next_clock = 0
1394
1381
  state.cond_var.signal
1395
1382
  else
@@ -1435,7 +1422,7 @@ module Fluent
1435
1422
  def flush_thread_wakeup
1436
1423
  @output_flush_threads.each do |state|
1437
1424
  state.mutex.synchronize {
1438
- if state.thread && state.thread.status
1425
+ if state.thread&.status
1439
1426
  state.next_clock = 0
1440
1427
  state.cond_var.signal
1441
1428
  end
@@ -1587,9 +1574,11 @@ module Fluent
1587
1574
  'retry_count' => @num_errors_metrics.get,
1588
1575
  'emit_count' => @emit_count_metrics.get,
1589
1576
  'write_count' => @write_count_metrics.get,
1577
+ 'write_secondary_count' => @write_secondary_count_metrics.get,
1590
1578
  'rollback_count' => @rollback_count_metrics.get,
1591
1579
  'slow_flush_count' => @slow_flush_count_metrics.get,
1592
1580
  'flush_time_count' => @flush_time_count_metrics.get,
1581
+ 'drop_oldest_chunk_count' => @drop_oldest_chunk_count_metrics.get,
1593
1582
  }
1594
1583
 
1595
1584
  if @buffer && @buffer.respond_to?(:statistics)
@@ -26,13 +26,13 @@ module Fluent
26
26
  end
27
27
 
28
28
  def owner
29
- if instance_variable_defined?("@_owner")
29
+ if instance_variable_defined?(:@_owner)
30
30
  @_owner
31
31
  end
32
32
  end
33
33
 
34
34
  def log
35
- if instance_variable_defined?("@log")
35
+ if instance_variable_defined?(:@log)
36
36
  @log
37
37
  end
38
38
  end
@@ -29,7 +29,7 @@ module Fluent
29
29
  class Parser < Base
30
30
  class TimeoutChecker
31
31
  # This implementation now uses mutex because parser is typically used in input.
32
- # If this has a performance issue under high concurreny, use concurrent-ruby's map instead.
32
+ # If this has a performance issue under high concurrent, use concurrent-ruby's map instead.
33
33
  def initialize(timeout)
34
34
  @map = {}
35
35
  @flag = ServerEngine::BlockingFlag.new
@@ -46,8 +46,8 @@ module Fluent
46
46
  @map.keys.each { |th|
47
47
  time = @map[th]
48
48
  if now - time > @timeout
49
- th.raise UncatchableError, "parsing timed out"
50
49
  @map.delete(th)
50
+ th.raise UncatchableError, "parsing timed out"
51
51
  end
52
52
  }
53
53
  }
@@ -220,7 +220,7 @@ module Fluent
220
220
  end
221
221
 
222
222
  def string_like_null(value, null_empty_string = @null_empty_string, null_value_regexp = @null_value_pattern)
223
- null_empty_string && value.empty? || null_value_regexp && string_safe_encoding(value){|s| null_value_regexp.match(s) }
223
+ null_empty_string && value.empty? || null_value_regexp && string_safe_encoding(value){|s| null_value_regexp.match?(s) }
224
224
  end
225
225
 
226
226
  TRUTHY_VALUES = ['true', 'yes', '1']
@@ -52,9 +52,9 @@ module Fluent
52
52
  when :oj
53
53
  return [Oj.method(:load), Oj::ParseError] if Fluent::OjOptions.available?
54
54
 
55
- log&.info "Oj is not installed, and failing back to Yajl for json parser"
56
- configure_json_parser(:yajl)
57
- when :json then [JSON.method(:load), JSON::ParserError]
55
+ log&.info "Oj is not installed, and failing back to JSON for json parser"
56
+ configure_json_parser(:json)
57
+ when :json then [JSON.method(:parse), JSON::ParserError]
58
58
  when :yajl then [Yajl.method(:load), Yajl::ParseError]
59
59
  else
60
60
  raise "BUG: unknown json parser specified: #{name}"
@@ -27,7 +27,7 @@ module Fluent
27
27
  Plugin.register_sd('file', self)
28
28
 
29
29
  DEFAULT_FILE_TYPE = :yaml
30
- DEFAUT_WEIGHT = 60
30
+ DEFAULT_WEIGHT = 60
31
31
  DEFAULT_SD_FILE_PATH = ENV['DEFAULT_SD_FILE_PATH'] || '/etc/fluent/sd.yaml'
32
32
 
33
33
  helpers :event_loop
@@ -126,7 +126,7 @@ module Fluent
126
126
  s.fetch('host'),
127
127
  s.fetch('port'),
128
128
  s['name'],
129
- s.fetch('weight', DEFAUT_WEIGHT),
129
+ s.fetch('weight', DEFAULT_WEIGHT),
130
130
  s['standby'],
131
131
  s['username'],
132
132
  s['password'],
@@ -19,7 +19,7 @@ require 'fluent/plugin'
19
19
  require 'fluent/plugin/storage'
20
20
 
21
21
  require 'fileutils'
22
- require 'yajl'
22
+ require 'json'
23
23
 
24
24
  module Fluent
25
25
  module Plugin
@@ -90,7 +90,7 @@ module Fluent
90
90
  log.warn "detect empty plugin storage file during startup. Ignored: #{@path}"
91
91
  return
92
92
  end
93
- data = Yajl::Parser.parse(data)
93
+ data = JSON.parse(data)
94
94
  raise Fluent::ConfigError, "Invalid contents (not object) in plugin storage file: '#{@path}'" unless data.is_a?(Hash)
95
95
  rescue => e
96
96
  log.error "failed to read data from plugin storage file", path: @path, error: e
@@ -114,7 +114,7 @@ module Fluent
114
114
  return unless File.exist?(@path)
115
115
  begin
116
116
  json_string = File.open(@path, 'r:utf-8'){ |io| io.read }
117
- json = Yajl::Parser.parse(json_string)
117
+ json = JSON.parse(json_string)
118
118
  unless json.is_a?(Hash)
119
119
  log.error "broken content for plugin storage (Hash required: ignored)", type: json.class
120
120
  log.debug "broken content", content: json_string
@@ -130,7 +130,11 @@ module Fluent
130
130
  return if @on_memory
131
131
  tmp_path = @path + '.tmp.' + Fluent::UniqueId.hex(Fluent::UniqueId.generate)
132
132
  begin
133
- json_string = Yajl::Encoder.encode(@store, pretty: @pretty_print)
133
+ if @pretty_print
134
+ json_string = JSON.pretty_generate(@store)
135
+ else
136
+ json_string = JSON.generate(@store)
137
+ end
134
138
  File.open(tmp_path, 'w:utf-8', @mode) { |io| io.write json_string; io.fsync }
135
139
  File.rename(tmp_path, @path)
136
140
  rescue => e
data/lib/fluent/plugin.rb CHANGED
@@ -175,7 +175,7 @@ module Fluent
175
175
  else
176
176
  raise Fluent::ConfigError, "#{kind} plugin '#{type}' is not a Class nor callable (without arguments)."
177
177
  end
178
- if parent && impl.respond_to?("owner=")
178
+ if parent && impl.respond_to?(:owner=)
179
179
  impl.owner = parent
180
180
  end
181
181
  impl.extend FeatureAvailabilityChecker
@@ -143,7 +143,7 @@ module Fluent
143
143
  @_child_process_mutex.synchronize{ @_child_process_processes.keys }.each do |pid|
144
144
  process_info = @_child_process_processes[pid]
145
145
  next if !process_info
146
- process_info.writeio && process_info.writeio.close rescue nil
146
+ process_info.writeio&.close rescue nil
147
147
  end
148
148
 
149
149
  super
@@ -183,7 +183,7 @@ module Fluent
183
183
 
184
184
  living_process_exist = true
185
185
 
186
- process_info.killed_at ||= Fluent::Clock.now # for illegular case (e.g., created after shutdown)
186
+ process_info.killed_at ||= Fluent::Clock.now # for irregular case (e.g., created after shutdown)
187
187
  timeout_at = process_info.killed_at + @_child_process_kill_timeout
188
188
  now = Fluent::Clock.now
189
189
  next if now < timeout_at
@@ -14,6 +14,7 @@
14
14
  # limitations under the License.
15
15
  #
16
16
 
17
+ require 'uri'
17
18
  require 'async/http/protocol'
18
19
  require 'fluent/plugin_helper/http_server/methods'
19
20
 
@@ -29,12 +30,22 @@ module Fluent
29
30
  @path, @query_string = path.split('?', 2)
30
31
  end
31
32
 
33
+ def headers
34
+ @request.headers
35
+ end
36
+
32
37
  def query
33
- @query_string && CGI.parse(@query_string)
38
+ if @query_string
39
+ hash = Hash.new { |h, k| h[k] = [] }
40
+ # For compatibility with CGI.parse
41
+ URI.decode_www_form(@query_string).each_with_object(hash) do |(key, value), h|
42
+ h[key] << value
43
+ end
44
+ end
34
45
  end
35
46
 
36
47
  def body
37
- @request.body && @request.body.read
48
+ @request.body&.read
38
49
  end
39
50
  end
40
51
  end
@@ -68,11 +68,9 @@ module Fluent
68
68
  notify.push(:ready)
69
69
  end
70
70
 
71
- if async_v2?
72
- @server_task_queue = ::Thread::Queue.new
73
- @server_task_queue.pop
74
- @server_task&.stop
75
- end
71
+ @server_task_queue = ::Thread::Queue.new
72
+ @server_task_queue.pop
73
+ @server_task&.stop
76
74
  end
77
75
 
78
76
  @logger.debug('Finished HTTP server')
@@ -80,11 +78,7 @@ module Fluent
80
78
 
81
79
  def stop
82
80
  @logger.debug('closing HTTP server')
83
- if async_v2?
84
- @server_task_queue&.push(:stop)
85
- else
86
- @server_task&.stop
87
- end
81
+ @server_task_queue.push(:stop)
88
82
  end
89
83
 
90
84
  HttpServer::Methods::ALL.map { |e| e.downcase.to_sym }.each do |name|
@@ -100,10 +94,6 @@ module Fluent
100
94
  @router.mount(name, path, app || block)
101
95
  end
102
96
  end
103
-
104
- private def async_v2?
105
- Gem::Version.new(Async::VERSION) >= Gem::Version.new('2.0')
106
- end
107
97
  end
108
98
  end
109
99
  end
@@ -14,16 +14,9 @@
14
14
  # limitations under the License.
15
15
  #
16
16
 
17
- begin
18
- require 'async'
19
- require 'fluent/plugin_helper/http_server/server'
20
- rescue LoadError => _
21
- require 'fluent/plugin_helper/http_server/compat/server'
22
- Fluent::PluginHelper::HttpServer::Server = Fluent::PluginHelper::HttpServer::Compat::Server
23
- end
24
-
25
17
  require 'fluent/plugin_helper/thread'
26
18
  require 'fluent/plugin_helper/server' # For Server::ServerTransportParams
19
+ require 'fluent/plugin_helper/http_server/server'
27
20
  require 'fluent/plugin_helper/http_server/ssl_context_builder'
28
21
 
29
22
  module Fluent
@@ -72,6 +72,13 @@ module Fluent
72
72
 
73
73
  @_metrics["#{@plugin_type_or_id}_#{namespace}_#{subsystem}_#{name}"] = metrics
74
74
 
75
+ # define the getter method for the calling instance.
76
+ singleton_class.module_eval do
77
+ unless method_defined?(name)
78
+ define_method(name) { metrics.get }
79
+ end
80
+ end
81
+
75
82
  metrics
76
83
  end
77
84
 
@@ -356,7 +356,10 @@ module Fluent
356
356
  end
357
357
 
358
358
  def shutdown
359
- @_server_connections.each do |conn|
359
+ # When it invokes conn.cose, it reduces elements in @_server_connections by close_callback,
360
+ # and it reduces the number of loops. This prevents the connection closing.
361
+ # So, it requires invoking #dup to avoid the problem.
362
+ @_server_connections.dup.each do |conn|
360
363
  conn.close rescue nil
361
364
  end
362
365
 
@@ -91,7 +91,7 @@ module Fluent
91
91
  end
92
92
 
93
93
  # @param title [Symbol] the thread name. this value should be unique.
94
- # @param configurations [Hash] hash which must has discivery_service type and its configuration like `{ type: :static, conf: <Fluent::Config::Element> }`
94
+ # @param configurations [Hash] hash which must has discovery_service type and its configuration like `{ type: :static, conf: <Fluent::Config::Element> }`
95
95
  # @param load_balancer [Object] object which has two methods #rebalance and #select_service
96
96
  # @param custom_build_method [Proc]
97
97
  def service_discovery_create_manager(title, configurations:, load_balancer: nil, custom_build_method: nil, interval: 3)
@@ -54,8 +54,8 @@ module Fluent
54
54
  if Fluent.windows?
55
55
  # To prevent closing socket forcibly on Windows,
56
56
  # this options shouldn't be set up when linger_timeout equals to 0 (including nil).
57
- # This unintended behavior always ocurrs on Windows when linger_timeout.to_i == 0.
58
- # This unintented behavior causes "Errno::ECONNRESET: An existing connection was forcibly
57
+ # This unintended behavior always occurs on Windows when linger_timeout.to_i == 0.
58
+ # This unintended behavior causes "Errno::ECONNRESET: An existing connection was forcibly
59
59
  # closed by the remote host." on Windows.
60
60
  if linger_timeout.to_i > 0
61
61
  if linger_timeout >= 2**16