fluentd 1.6.3-x64-mingw32 → 1.7.0-x64-mingw32
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of fluentd might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/.drone.yml +35 -0
- data/.github/ISSUE_TEMPLATE/bug_report.md +2 -0
- data/CHANGELOG.md +58 -0
- data/README.md +5 -1
- data/fluentd.gemspec +1 -1
- data/lib/fluent/clock.rb +4 -0
- data/lib/fluent/compat/output.rb +3 -3
- data/lib/fluent/compat/socket_util.rb +1 -1
- data/lib/fluent/config/element.rb +3 -3
- data/lib/fluent/config/literal_parser.rb +1 -1
- data/lib/fluent/config/section.rb +4 -1
- data/lib/fluent/error.rb +4 -0
- data/lib/fluent/event.rb +28 -24
- data/lib/fluent/event_router.rb +2 -1
- data/lib/fluent/log.rb +1 -1
- data/lib/fluent/msgpack_factory.rb +8 -0
- data/lib/fluent/plugin/bare_output.rb +4 -4
- data/lib/fluent/plugin/buf_file_single.rb +211 -0
- data/lib/fluent/plugin/buffer.rb +62 -63
- data/lib/fluent/plugin/buffer/chunk.rb +21 -3
- data/lib/fluent/plugin/buffer/file_chunk.rb +37 -12
- data/lib/fluent/plugin/buffer/file_single_chunk.rb +314 -0
- data/lib/fluent/plugin/buffer/memory_chunk.rb +2 -1
- data/lib/fluent/plugin/compressable.rb +10 -6
- data/lib/fluent/plugin/filter_grep.rb +2 -2
- data/lib/fluent/plugin/formatter_csv.rb +10 -6
- data/lib/fluent/plugin/in_syslog.rb +10 -3
- data/lib/fluent/plugin/in_tail.rb +7 -2
- data/lib/fluent/plugin/in_tcp.rb +34 -7
- data/lib/fluent/plugin/multi_output.rb +4 -4
- data/lib/fluent/plugin/out_exec_filter.rb +1 -0
- data/lib/fluent/plugin/out_file.rb +13 -3
- data/lib/fluent/plugin/out_forward.rb +126 -588
- data/lib/fluent/plugin/out_forward/ack_handler.rb +161 -0
- data/lib/fluent/plugin/out_forward/connection_manager.rb +113 -0
- data/lib/fluent/plugin/out_forward/error.rb +28 -0
- data/lib/fluent/plugin/out_forward/failure_detector.rb +84 -0
- data/lib/fluent/plugin/out_forward/handshake_protocol.rb +121 -0
- data/lib/fluent/plugin/out_forward/load_balancer.rb +111 -0
- data/lib/fluent/plugin/out_forward/socket_cache.rb +138 -0
- data/lib/fluent/plugin/out_http.rb +231 -0
- data/lib/fluent/plugin/output.rb +29 -35
- data/lib/fluent/plugin/parser.rb +77 -0
- data/lib/fluent/plugin/parser_csv.rb +75 -0
- data/lib/fluent/plugin_helper/server.rb +1 -1
- data/lib/fluent/plugin_helper/thread.rb +1 -0
- data/lib/fluent/root_agent.rb +1 -1
- data/lib/fluent/time.rb +4 -2
- data/lib/fluent/timezone.rb +21 -7
- data/lib/fluent/version.rb +1 -1
- data/test/command/test_fluentd.rb +1 -1
- data/test/command/test_plugin_generator.rb +18 -2
- data/test/config/test_configurable.rb +78 -40
- data/test/counter/test_store.rb +1 -1
- data/test/helper.rb +1 -0
- data/test/helpers/process_extenstion.rb +33 -0
- data/test/plugin/out_forward/test_ack_handler.rb +101 -0
- data/test/plugin/out_forward/test_connection_manager.rb +145 -0
- data/test/plugin/out_forward/test_handshake_protocol.rb +103 -0
- data/test/plugin/out_forward/test_load_balancer.rb +60 -0
- data/test/plugin/out_forward/test_socket_cache.rb +139 -0
- data/test/plugin/test_buf_file.rb +118 -2
- data/test/plugin/test_buf_file_single.rb +734 -0
- data/test/plugin/test_buffer.rb +4 -48
- data/test/plugin/test_buffer_file_chunk.rb +19 -1
- data/test/plugin/test_buffer_file_single_chunk.rb +620 -0
- data/test/plugin/test_formatter_csv.rb +16 -0
- data/test/plugin/test_in_syslog.rb +56 -6
- data/test/plugin/test_in_tail.rb +1 -1
- data/test/plugin/test_in_tcp.rb +25 -0
- data/test/plugin/test_out_forward.rb +75 -201
- data/test/plugin/test_out_http.rb +352 -0
- data/test/plugin/test_output_as_buffered.rb +27 -24
- data/test/plugin/test_parser.rb +40 -0
- data/test/plugin/test_parser_csv.rb +83 -0
- data/test/plugin_helper/test_record_accessor.rb +1 -1
- data/test/test_time_formatter.rb +140 -121
- metadata +33 -4
data/lib/fluent/plugin/output.rb
CHANGED
@@ -18,6 +18,7 @@ require 'fluent/error'
|
|
18
18
|
require 'fluent/plugin/base'
|
19
19
|
require 'fluent/plugin/buffer'
|
20
20
|
require 'fluent/plugin_helper/record_accessor'
|
21
|
+
require 'fluent/msgpack_factory'
|
21
22
|
require 'fluent/log'
|
22
23
|
require 'fluent/plugin_id'
|
23
24
|
require 'fluent/plugin_helper'
|
@@ -173,7 +174,7 @@ module Fluent
|
|
173
174
|
|
174
175
|
def initialize
|
175
176
|
super
|
176
|
-
@
|
177
|
+
@counter_mutex = Mutex.new
|
177
178
|
@buffering = false
|
178
179
|
@delayed_commit = false
|
179
180
|
@as_secondary = false
|
@@ -780,26 +781,26 @@ module Fluent
|
|
780
781
|
end
|
781
782
|
|
782
783
|
def emit_sync(tag, es)
|
783
|
-
@
|
784
|
+
@counter_mutex.synchronize{ @emit_count += 1 }
|
784
785
|
begin
|
785
786
|
process(tag, es)
|
786
|
-
@
|
787
|
+
@counter_mutex.synchronize{ @emit_records += es.size }
|
787
788
|
rescue
|
788
|
-
@
|
789
|
+
@counter_mutex.synchronize{ @num_errors += 1 }
|
789
790
|
raise
|
790
791
|
end
|
791
792
|
end
|
792
793
|
|
793
794
|
def emit_buffered(tag, es)
|
794
|
-
@
|
795
|
+
@counter_mutex.synchronize{ @emit_count += 1 }
|
795
796
|
begin
|
796
797
|
execute_chunking(tag, es, enqueue: (@flush_mode == :immediate))
|
797
|
-
if !@retry && @buffer.queued?
|
798
|
+
if !@retry && @buffer.queued?(nil, optimistic: true)
|
798
799
|
submit_flush_once
|
799
800
|
end
|
800
801
|
rescue
|
801
802
|
# TODO: separate number of errors into emit errors and write/flush errors
|
802
|
-
@
|
803
|
+
@counter_mutex.synchronize{ @num_errors += 1 }
|
803
804
|
raise
|
804
805
|
end
|
805
806
|
end
|
@@ -857,15 +858,8 @@ module Fluent
|
|
857
858
|
def chunk_for_test(tag, time, record)
|
858
859
|
require 'fluent/plugin/buffer/memory_chunk'
|
859
860
|
|
860
|
-
m = metadata_for_test(tag, time, record)
|
861
|
-
Fluent::Plugin::Buffer::MemoryChunk.new(m)
|
862
|
-
end
|
863
|
-
|
864
|
-
def metadata_for_test(tag, time, record)
|
865
|
-
raise "BUG: #metadata_for_test is available only when no actual metadata exists" unless @buffer.metadata_list.empty?
|
866
861
|
m = metadata(tag, time, record)
|
867
|
-
|
868
|
-
m
|
862
|
+
Fluent::Plugin::Buffer::MemoryChunk.new(m)
|
869
863
|
end
|
870
864
|
|
871
865
|
def execute_chunking(tag, es, enqueue: false)
|
@@ -919,10 +913,10 @@ module Fluent
|
|
919
913
|
end
|
920
914
|
end
|
921
915
|
|
922
|
-
FORMAT_MSGPACK_STREAM = ->(e){ e.to_msgpack_stream }
|
923
|
-
FORMAT_COMPRESSED_MSGPACK_STREAM = ->(e){ e.to_compressed_msgpack_stream }
|
924
|
-
FORMAT_MSGPACK_STREAM_TIME_INT = ->(e){ e.to_msgpack_stream(time_int: true) }
|
925
|
-
FORMAT_COMPRESSED_MSGPACK_STREAM_TIME_INT = ->(e){ e.to_compressed_msgpack_stream(time_int: true) }
|
916
|
+
FORMAT_MSGPACK_STREAM = ->(e){ e.to_msgpack_stream(packer: Fluent::MessagePackFactory.thread_local_msgpack_packer) }
|
917
|
+
FORMAT_COMPRESSED_MSGPACK_STREAM = ->(e){ e.to_compressed_msgpack_stream(packer: Fluent::MessagePackFactory.thread_local_msgpack_packer) }
|
918
|
+
FORMAT_MSGPACK_STREAM_TIME_INT = ->(e){ e.to_msgpack_stream(time_int: true, packer: Fluent::MessagePackFactory.thread_local_msgpack_packer) }
|
919
|
+
FORMAT_COMPRESSED_MSGPACK_STREAM_TIME_INT = ->(e){ e.to_compressed_msgpack_stream(time_int: true, packer: Fluent::MessagePackFactory.thread_local_msgpack_packer) }
|
926
920
|
|
927
921
|
def generate_format_proc
|
928
922
|
if @buffer && @buffer.compress == :gzip
|
@@ -944,7 +938,7 @@ module Fluent
|
|
944
938
|
def handle_stream_with_custom_format(tag, es, enqueue: false)
|
945
939
|
meta_and_data = {}
|
946
940
|
records = 0
|
947
|
-
es.each do |time, record|
|
941
|
+
es.each(unpacker: Fluent::MessagePackFactory.thread_local_msgpack_unpacker) do |time, record|
|
948
942
|
meta = metadata(tag, time, record)
|
949
943
|
meta_and_data[meta] ||= []
|
950
944
|
res = format(tag, time, record)
|
@@ -956,7 +950,7 @@ module Fluent
|
|
956
950
|
write_guard do
|
957
951
|
@buffer.write(meta_and_data, enqueue: enqueue)
|
958
952
|
end
|
959
|
-
@
|
953
|
+
@counter_mutex.synchronize{ @emit_records += records }
|
960
954
|
true
|
961
955
|
end
|
962
956
|
|
@@ -964,7 +958,7 @@ module Fluent
|
|
964
958
|
format_proc = generate_format_proc
|
965
959
|
meta_and_data = {}
|
966
960
|
records = 0
|
967
|
-
es.each do |time, record|
|
961
|
+
es.each(unpacker: Fluent::MessagePackFactory.thread_local_msgpack_unpacker) do |time, record|
|
968
962
|
meta = metadata(tag, time, record)
|
969
963
|
meta_and_data[meta] ||= MultiEventStream.new
|
970
964
|
meta_and_data[meta].add(time, record)
|
@@ -973,7 +967,7 @@ module Fluent
|
|
973
967
|
write_guard do
|
974
968
|
@buffer.write(meta_and_data, format: format_proc, enqueue: enqueue)
|
975
969
|
end
|
976
|
-
@
|
970
|
+
@counter_mutex.synchronize{ @emit_records += records }
|
977
971
|
true
|
978
972
|
end
|
979
973
|
|
@@ -984,7 +978,7 @@ module Fluent
|
|
984
978
|
if @custom_format
|
985
979
|
records = 0
|
986
980
|
data = []
|
987
|
-
es.each do |time, record|
|
981
|
+
es.each(unpacker: Fluent::MessagePackFactory.thread_local_msgpack_unpacker) do |time, record|
|
988
982
|
res = format(tag, time, record)
|
989
983
|
if res
|
990
984
|
data << res
|
@@ -998,7 +992,7 @@ module Fluent
|
|
998
992
|
write_guard do
|
999
993
|
@buffer.write({meta => data}, format: format_proc, enqueue: enqueue)
|
1000
994
|
end
|
1001
|
-
@
|
995
|
+
@counter_mutex.synchronize{ @emit_records += records }
|
1002
996
|
true
|
1003
997
|
end
|
1004
998
|
|
@@ -1036,7 +1030,7 @@ module Fluent
|
|
1036
1030
|
# false if chunk was already flushed and couldn't be rollbacked unexpectedly
|
1037
1031
|
# in many cases, false can be just ignored
|
1038
1032
|
if @buffer.takeback_chunk(chunk_id)
|
1039
|
-
@
|
1033
|
+
@counter_mutex.synchronize{ @rollback_count += 1 }
|
1040
1034
|
if update_retry
|
1041
1035
|
primary = @as_secondary ? @primary_instance : self
|
1042
1036
|
primary.update_retry_state(chunk_id, @as_secondary)
|
@@ -1052,7 +1046,7 @@ module Fluent
|
|
1052
1046
|
while @dequeued_chunks.first && @dequeued_chunks.first.expired?
|
1053
1047
|
info = @dequeued_chunks.shift
|
1054
1048
|
if @buffer.takeback_chunk(info.chunk_id)
|
1055
|
-
@
|
1049
|
+
@counter_mutex.synchronize{ @rollback_count += 1 }
|
1056
1050
|
log.warn "failed to flush the buffer chunk, timeout to commit.", chunk_id: dump_unique_id_hex(info.chunk_id), flushed_at: info.time
|
1057
1051
|
primary = @as_secondary ? @primary_instance : self
|
1058
1052
|
primary.update_retry_state(info.chunk_id, @as_secondary)
|
@@ -1067,7 +1061,7 @@ module Fluent
|
|
1067
1061
|
until @dequeued_chunks.empty?
|
1068
1062
|
info = @dequeued_chunks.shift
|
1069
1063
|
if @buffer.takeback_chunk(info.chunk_id)
|
1070
|
-
@
|
1064
|
+
@counter_mutex.synchronize{ @rollback_count += 1 }
|
1071
1065
|
log.info "delayed commit for buffer chunks was cancelled in shutdown", chunk_id: dump_unique_id_hex(info.chunk_id)
|
1072
1066
|
primary = @as_secondary ? @primary_instance : self
|
1073
1067
|
primary.update_retry_state(info.chunk_id, @as_secondary)
|
@@ -1110,7 +1104,7 @@ module Fluent
|
|
1110
1104
|
|
1111
1105
|
if output.delayed_commit
|
1112
1106
|
log.trace "executing delayed write and commit", chunk: dump_unique_id_hex(chunk.unique_id)
|
1113
|
-
@
|
1107
|
+
@counter_mutex.synchronize{ @write_count += 1 }
|
1114
1108
|
@dequeued_chunks_mutex.synchronize do
|
1115
1109
|
# delayed_commit_timeout for secondary is configured in <buffer> of primary (<secondary> don't get <buffer>)
|
1116
1110
|
@dequeued_chunks << DequeuedChunkInfo.new(chunk.unique_id, Time.now, self.delayed_commit_timeout)
|
@@ -1122,7 +1116,7 @@ module Fluent
|
|
1122
1116
|
chunk_id = chunk.unique_id
|
1123
1117
|
dump_chunk_id = dump_unique_id_hex(chunk_id)
|
1124
1118
|
log.trace "adding write count", instance: self.object_id
|
1125
|
-
@
|
1119
|
+
@counter_mutex.synchronize{ @write_count += 1 }
|
1126
1120
|
log.trace "executing sync write", chunk: dump_chunk_id
|
1127
1121
|
|
1128
1122
|
output.write(chunk)
|
@@ -1178,7 +1172,7 @@ module Fluent
|
|
1178
1172
|
end
|
1179
1173
|
|
1180
1174
|
if @buffer.takeback_chunk(chunk.unique_id)
|
1181
|
-
@
|
1175
|
+
@counter_mutex.synchronize { @rollback_count += 1 }
|
1182
1176
|
end
|
1183
1177
|
|
1184
1178
|
update_retry_state(chunk.unique_id, using_secondary, e)
|
@@ -1209,9 +1203,9 @@ module Fluent
|
|
1209
1203
|
def check_slow_flush(start)
|
1210
1204
|
elapsed_time = Fluent::Clock.now - start
|
1211
1205
|
elapsed_millsec = (elapsed_time * 1000).to_i
|
1212
|
-
@
|
1206
|
+
@counter_mutex.synchronize { @flush_time_count += elapsed_millsec }
|
1213
1207
|
if elapsed_time > @slow_flush_log_threshold
|
1214
|
-
@
|
1208
|
+
@counter_mutex.synchronize { @slow_flush_count += 1 }
|
1215
1209
|
log.warn "buffer flush took longer time than slow_flush_log_threshold:",
|
1216
1210
|
elapsed_time: elapsed_time, slow_flush_log_threshold: @slow_flush_log_threshold, plugin_id: self.plugin_id
|
1217
1211
|
end
|
@@ -1219,7 +1213,7 @@ module Fluent
|
|
1219
1213
|
|
1220
1214
|
def update_retry_state(chunk_id, using_secondary, error = nil)
|
1221
1215
|
@retry_mutex.synchronize do
|
1222
|
-
@
|
1216
|
+
@counter_mutex.synchronize{ @num_errors += 1 }
|
1223
1217
|
chunk_id_hex = dump_unique_id_hex(chunk_id)
|
1224
1218
|
|
1225
1219
|
unless @retry
|
@@ -1376,7 +1370,7 @@ module Fluent
|
|
1376
1370
|
# This block should be done by integer values.
|
1377
1371
|
# If both of flush_interval & flush_thread_interval are 1s, expected actual flush timing is 1.5s.
|
1378
1372
|
# If we use integered values for this comparison, expected actual flush timing is 1.0s.
|
1379
|
-
@buffer.enqueue_all{ |metadata, chunk| chunk.
|
1373
|
+
@buffer.enqueue_all{ |metadata, chunk| chunk.raw_create_at + flush_interval <= now_int }
|
1380
1374
|
end
|
1381
1375
|
|
1382
1376
|
if @chunk_key_time
|
data/lib/fluent/plugin/parser.rb
CHANGED
@@ -17,13 +17,58 @@
|
|
17
17
|
require 'fluent/plugin/base'
|
18
18
|
require 'fluent/plugin/owned_by_mixin'
|
19
19
|
|
20
|
+
require 'fluent/error'
|
20
21
|
require 'fluent/mixin' # for TypeConverter
|
21
22
|
require 'fluent/time'
|
22
23
|
require 'fluent/plugin/string_util'
|
23
24
|
|
25
|
+
require 'serverengine/blocking_flag'
|
26
|
+
|
24
27
|
module Fluent
|
25
28
|
module Plugin
|
26
29
|
class Parser < Base
|
30
|
+
class TimeoutChecker
|
31
|
+
# This implementation now uses mutex because parser is typically used in input.
|
32
|
+
# If this has a performance issue under high concurreny, use concurrent-ruby's map instead.
|
33
|
+
def initialize(timeout)
|
34
|
+
@map = {}
|
35
|
+
@flag = ServerEngine::BlockingFlag.new
|
36
|
+
@mutex = Mutex.new
|
37
|
+
@timeout = timeout
|
38
|
+
end
|
39
|
+
|
40
|
+
def start
|
41
|
+
@thread = ::Thread.new {
|
42
|
+
until @flag.wait_for_set(0.5)
|
43
|
+
now = Time.now
|
44
|
+
@mutex.synchronize {
|
45
|
+
@map.keys.each { |th|
|
46
|
+
time = @map[th]
|
47
|
+
if now - time > @timeout
|
48
|
+
th.raise UncatchableError, "parsing timed out"
|
49
|
+
@map.delete(th)
|
50
|
+
end
|
51
|
+
}
|
52
|
+
}
|
53
|
+
end
|
54
|
+
}
|
55
|
+
end
|
56
|
+
|
57
|
+
def stop
|
58
|
+
@flag.set!
|
59
|
+
@thread.join
|
60
|
+
end
|
61
|
+
|
62
|
+
def execute
|
63
|
+
th = Thread.current
|
64
|
+
@mutex.synchronize { @map[th] = Time.now }
|
65
|
+
yield
|
66
|
+
ensure
|
67
|
+
# Need clean up here because if next event is delayed, incorrect exception will be raised in normal flow.
|
68
|
+
@mutex.synchronize { @map.delete(th) }
|
69
|
+
end
|
70
|
+
end
|
71
|
+
|
27
72
|
include OwnedByMixin
|
28
73
|
include TimeMixin::Parser
|
29
74
|
|
@@ -47,6 +92,7 @@ module Fluent
|
|
47
92
|
config_param :null_empty_string, :bool, default: false
|
48
93
|
config_param :estimate_current_event, :bool, default: true
|
49
94
|
config_param :keep_time_key, :bool, default: false
|
95
|
+
config_param :timeout, :time, default: nil
|
50
96
|
|
51
97
|
AVAILABLE_PARSER_VALUE_TYPES = ['string', 'integer', 'float', 'bool', 'time', 'array']
|
52
98
|
|
@@ -65,12 +111,43 @@ module Fluent
|
|
65
111
|
@null_value_regexp = @null_value_pattern && Regexp.new(@null_value_pattern)
|
66
112
|
@type_converters = build_type_converters(@types)
|
67
113
|
@execute_convert_values = @type_converters || @null_value_regexp || @null_empty_string
|
114
|
+
@timeout_checker = if @timeout
|
115
|
+
class << self
|
116
|
+
alias_method :parse_orig, :parse
|
117
|
+
alias_method :parse, :parse_with_timeout
|
118
|
+
end
|
119
|
+
TimeoutChecker.new(@timeout)
|
120
|
+
else
|
121
|
+
nil
|
122
|
+
end
|
123
|
+
end
|
124
|
+
|
125
|
+
def start
|
126
|
+
super
|
127
|
+
|
128
|
+
@timeout_checker.start if @timeout_checker
|
129
|
+
end
|
130
|
+
|
131
|
+
def stop
|
132
|
+
super
|
133
|
+
|
134
|
+
@timeout_checker.stop if @timeout_checker
|
68
135
|
end
|
69
136
|
|
70
137
|
def parse(text, &block)
|
71
138
|
raise NotImplementedError, "Implement this method in child class"
|
72
139
|
end
|
73
140
|
|
141
|
+
def parse_with_timeout(text, &block)
|
142
|
+
@timeout_checker.execute {
|
143
|
+
parse_orig(text, &block)
|
144
|
+
}
|
145
|
+
rescue UncatchableError
|
146
|
+
log.warn "parsing timed out with #{self.class}: text = #{text}"
|
147
|
+
# Return nil instead of raising error. in_tail or other plugin can emit broken line.
|
148
|
+
yield nil, nil
|
149
|
+
end
|
150
|
+
|
74
151
|
def call(*a, &b)
|
75
152
|
# Keep backward compatibility for existing plugins
|
76
153
|
# TODO: warn when deprecated
|
@@ -27,6 +27,23 @@ module Fluent
|
|
27
27
|
config_param :keys, :array, value_type: :string
|
28
28
|
desc 'The delimiter character (or string) of CSV values'
|
29
29
|
config_param :delimiter, :string, default: ','
|
30
|
+
desc 'The parser type used to parse CSV line'
|
31
|
+
config_param :parser_type, :enum, list: [:normal, :fast], default: :normal
|
32
|
+
|
33
|
+
def configure(conf)
|
34
|
+
super
|
35
|
+
|
36
|
+
|
37
|
+
if @parser_type == :fast
|
38
|
+
@quote_char = '"'
|
39
|
+
@escape_pattern = Regexp.compile(@quote_char * 2)
|
40
|
+
|
41
|
+
m = method(:parse_fast)
|
42
|
+
self.singleton_class.module_eval do
|
43
|
+
define_method(:parse, m)
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
30
47
|
|
31
48
|
def parse(text, &block)
|
32
49
|
values = CSV.parse_line(text, col_sep: @delimiter)
|
@@ -34,6 +51,64 @@ module Fluent
|
|
34
51
|
time, record = convert_values(parse_time(r), r)
|
35
52
|
yield time, record
|
36
53
|
end
|
54
|
+
|
55
|
+
def parse_fast(text, &block)
|
56
|
+
r = parse_fast_internal(text)
|
57
|
+
time, record = convert_values(parse_time(r), r)
|
58
|
+
yield time, record
|
59
|
+
end
|
60
|
+
|
61
|
+
# CSV.parse_line is too slow due to initialize lots of object and
|
62
|
+
# CSV module doesn't provide the efficient method for parsing single line.
|
63
|
+
# This method avoids the overhead of CSV.parse_line for typical patterns
|
64
|
+
def parse_fast_internal(text)
|
65
|
+
record = {}
|
66
|
+
text.chomp!
|
67
|
+
|
68
|
+
return record if text.empty?
|
69
|
+
|
70
|
+
# use while because while is now faster than each_with_index
|
71
|
+
columns = text.split(@delimiter, -1)
|
72
|
+
num_columns = columns.size
|
73
|
+
i = 0
|
74
|
+
j = 0
|
75
|
+
while j < num_columns
|
76
|
+
column = columns[j]
|
77
|
+
|
78
|
+
case column.count(@quote_char)
|
79
|
+
when 0
|
80
|
+
if column.empty?
|
81
|
+
column = nil
|
82
|
+
end
|
83
|
+
when 1
|
84
|
+
if column.start_with?(@quote_char)
|
85
|
+
to_merge = [column]
|
86
|
+
j += 1
|
87
|
+
while j < num_columns
|
88
|
+
merged_col = columns[j]
|
89
|
+
to_merge << merged_col
|
90
|
+
break if merged_col.end_with?(@quote_char)
|
91
|
+
j += 1
|
92
|
+
end
|
93
|
+
column = to_merge.join(@delimiter)[1..-2]
|
94
|
+
end
|
95
|
+
when 2
|
96
|
+
if column.start_with?(@quote_char) && column.end_with?(@quote_char)
|
97
|
+
column = column[1..-2]
|
98
|
+
end
|
99
|
+
else
|
100
|
+
if column.start_with?(@quote_char) && column.end_with?(@quote_char)
|
101
|
+
column = column[1..-2]
|
102
|
+
end
|
103
|
+
column.gsub!(@escape_pattern, @quote_char)
|
104
|
+
end
|
105
|
+
|
106
|
+
record[@keys[i]] = column
|
107
|
+
j += 1
|
108
|
+
i += 1
|
109
|
+
end
|
110
|
+
record
|
111
|
+
end
|
37
112
|
end
|
38
113
|
end
|
39
114
|
end
|
@@ -377,7 +377,7 @@ module Fluent
|
|
377
377
|
end
|
378
378
|
|
379
379
|
# Use string "?" for port, not integer or nil. "?" is clear than -1 or nil in the log.
|
380
|
-
PEERADDR_FAILED = ["?", "?", "name
|
380
|
+
PEERADDR_FAILED = ["?", "?", "name resolution failed", "?"]
|
381
381
|
|
382
382
|
class CallbackSocket
|
383
383
|
def initialize(server_type, sock, enabled_events = [], close_socket: true)
|
data/lib/fluent/root_agent.rb
CHANGED
@@ -201,7 +201,7 @@ module Fluent
|
|
201
201
|
def start
|
202
202
|
lifecycle(desc: true) do |i| # instance
|
203
203
|
i.start unless i.started?
|
204
|
-
# Input#start sometimes emits lots of
|
204
|
+
# Input#start sometimes emits lots of events with in_tail/`read_from_head true` case
|
205
205
|
# and it causes deadlock for small buffer/queue output. To avoid such problem,
|
206
206
|
# buffer related output threads should be run before `Input#start`.
|
207
207
|
# This is why after_start should be called immediately after start call.
|
data/lib/fluent/time.rb
CHANGED
@@ -78,7 +78,7 @@ module Fluent
|
|
78
78
|
end
|
79
79
|
rescue
|
80
80
|
def to_time
|
81
|
-
Time.at(@sec
|
81
|
+
Time.at(Rational(@sec * 1_000_000_000 + @nsec, 1_000_000_000))
|
82
82
|
end
|
83
83
|
end
|
84
84
|
|
@@ -111,7 +111,9 @@ module Fluent
|
|
111
111
|
end
|
112
112
|
|
113
113
|
def self.now
|
114
|
-
|
114
|
+
# This method is called many time. so call Process.clock_gettime directly instead of Fluent::Clock.real_now
|
115
|
+
now = Process.clock_gettime(Process::CLOCK_REALTIME, :nanosecond)
|
116
|
+
Fluent::EventTime.new(now / 1_000_000_000, now % 1_000_000_000)
|
115
117
|
end
|
116
118
|
|
117
119
|
def self.parse(*args)
|