logstash-output-sumologic 1.1.4 → 1.1.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +5 -5
- data/CHANGELOG.md +11 -6
- data/DEVELOPER.md +19 -7
- data/Gemfile +2 -1
- data/README.md +110 -38
- data/lib/logstash/outputs/sumologic.rb +79 -314
- data/lib/logstash/outputs/sumologic/common.rb +57 -0
- data/lib/logstash/outputs/sumologic/compressor.rb +39 -0
- data/lib/logstash/outputs/sumologic/header_builder.rb +79 -0
- data/lib/logstash/outputs/sumologic/message_queue.rb +38 -0
- data/lib/logstash/outputs/sumologic/monitor.rb +72 -0
- data/lib/logstash/outputs/sumologic/payload_builder.rb +155 -0
- data/lib/logstash/outputs/sumologic/piler.rb +87 -0
- data/lib/logstash/outputs/sumologic/sender.rb +167 -0
- data/lib/logstash/outputs/sumologic/statistics.rb +124 -0
- data/logstash-output-sumologic.gemspec +17 -15
- data/spec/outputs/sumologic/compressor_spec.rb +27 -0
- data/spec/outputs/sumologic/header_builder_spec.rb +197 -0
- data/spec/outputs/sumologic/message_queue_spec.rb +48 -0
- data/spec/outputs/sumologic/payload_builder_spec.rb +523 -0
- data/spec/outputs/sumologic/piler_spec.rb +189 -0
- data/spec/outputs/sumologic/sender_spec.rb +188 -0
- data/spec/outputs/sumologic_spec.rb +224 -400
- data/spec/test_server.rb +49 -0
- metadata +71 -37
- data/CONTRIBUTORS +0 -13
- data/spec/spec_helper.rb +0 -61
@@ -0,0 +1,57 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require "date"
|
3
|
+
|
4
|
+
module LogStash; module Outputs; class SumoLogic;
|
5
|
+
module Common
|
6
|
+
|
7
|
+
# global constants
|
8
|
+
DEFAULT_LOG_FORMAT = "%{@timestamp} %{host} %{message}"
|
9
|
+
METRICS_NAME_PLACEHOLDER = "*"
|
10
|
+
GRAPHITE = "graphite"
|
11
|
+
CARBON2 = "carbon2"
|
12
|
+
DEFLATE = "deflate"
|
13
|
+
GZIP = "gzip"
|
14
|
+
STATS_TAG = "STATS_TAG"
|
15
|
+
|
16
|
+
# for debugging test
|
17
|
+
LOG_TO_CONSOLE = false
|
18
|
+
@@logger = nil
|
19
|
+
|
20
|
+
def set_logger(logger)
|
21
|
+
@@logger = logger
|
22
|
+
end
|
23
|
+
|
24
|
+
def log_info(message, *opts)
|
25
|
+
if LOG_TO_CONSOLE
|
26
|
+
puts "[INFO:#{DateTime::now}]#{message} #{opts.to_s}"
|
27
|
+
else
|
28
|
+
@@logger && @@logger.info(message, opts)
|
29
|
+
end
|
30
|
+
end # def log_info
|
31
|
+
|
32
|
+
def log_warn(message, *opts)
|
33
|
+
if LOG_TO_CONSOLE
|
34
|
+
puts "\e[33m[WARN:#{DateTime::now}]#{message} #{opts.to_s}\e[0m"
|
35
|
+
else
|
36
|
+
@@logger && @@logger.warn(message, opts)
|
37
|
+
end
|
38
|
+
end # def log_warn
|
39
|
+
|
40
|
+
def log_err(message, *opts)
|
41
|
+
if LOG_TO_CONSOLE
|
42
|
+
puts "\e[31m[ERR :#{DateTime::now}]#{message} #{opts.to_s}\e[0m"
|
43
|
+
else
|
44
|
+
@@logger && @@logger.error(message, opts)
|
45
|
+
end
|
46
|
+
end # def log_err
|
47
|
+
|
48
|
+
def log_dbg(message, *opts)
|
49
|
+
if LOG_TO_CONSOLE
|
50
|
+
puts "\e[36m[DBG :#{DateTime::now}]#{message} #{opts.to_s}\e[0m"
|
51
|
+
else
|
52
|
+
@@logger && @@logger.debug(message, opts)
|
53
|
+
end
|
54
|
+
end # def log_dbg
|
55
|
+
|
56
|
+
end
|
57
|
+
end; end; end
|
@@ -0,0 +1,39 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require "stringio"
|
3
|
+
require "zlib"
|
4
|
+
require "logstash/outputs/sumologic/common"
|
5
|
+
|
6
|
+
module LogStash; module Outputs; class SumoLogic;
|
7
|
+
class Compressor
|
8
|
+
|
9
|
+
include LogStash::Outputs::SumoLogic::Common
|
10
|
+
|
11
|
+
def initialize(config)
|
12
|
+
@compress = config["compress"]
|
13
|
+
@compress_encoding = (config["compress_encoding"] ||= DEFLATE).downcase
|
14
|
+
end # def initialize
|
15
|
+
|
16
|
+
def compress(content)
|
17
|
+
if @compress
|
18
|
+
if @compress_encoding == GZIP
|
19
|
+
result = gzip(content)
|
20
|
+
result.bytes.to_a.pack("c*")
|
21
|
+
else
|
22
|
+
Zlib::Deflate.deflate(content)
|
23
|
+
end
|
24
|
+
else
|
25
|
+
content
|
26
|
+
end
|
27
|
+
end # def compress
|
28
|
+
|
29
|
+
def gzip(content)
|
30
|
+
stream = StringIO.new("w")
|
31
|
+
stream.set_encoding("ASCII")
|
32
|
+
gz = Zlib::GzipWriter.new(stream)
|
33
|
+
gz.write(content)
|
34
|
+
gz.close
|
35
|
+
stream.string.bytes.to_a.pack("c*")
|
36
|
+
end # def gzip
|
37
|
+
|
38
|
+
end
|
39
|
+
end; end; end
|
@@ -0,0 +1,79 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require "socket"
|
3
|
+
require "logstash/outputs/sumologic/common"
|
4
|
+
|
5
|
+
module LogStash; module Outputs; class SumoLogic;
|
6
|
+
class HeaderBuilder
|
7
|
+
|
8
|
+
include LogStash::Outputs::SumoLogic::Common
|
9
|
+
|
10
|
+
CONTENT_TYPE = "Content-Type"
|
11
|
+
CONTENT_TYPE_LOG = "text/plain"
|
12
|
+
CONTENT_TYPE_GRAPHITE = "application/vnd.sumologic.graphite"
|
13
|
+
CONTENT_TYPE_CARBON2 = "application/vnd.sumologic.carbon2"
|
14
|
+
CONTENT_ENCODING = "Content-Encoding"
|
15
|
+
|
16
|
+
CATEGORY_HEADER = "X-Sumo-Category"
|
17
|
+
CATEGORY_HEADER_DEFAULT = "Logstash"
|
18
|
+
HOST_HEADER = "X-Sumo-Host"
|
19
|
+
NAME_HEADER = "X-Sumo-Name"
|
20
|
+
NAME_HEADER_DEFAULT = "logstash-output-sumologic"
|
21
|
+
|
22
|
+
CLIENT_HEADER = "X-Sumo-Client"
|
23
|
+
CLIENT_HEADER_VALUE = "logstash-output-sumologic"
|
24
|
+
|
25
|
+
def initialize(config)
|
26
|
+
|
27
|
+
@extra_headers = config["extra_headers"] ||= {}
|
28
|
+
@source_category = config["source_category"] ||= CATEGORY_HEADER_DEFAULT
|
29
|
+
@source_host = config["source_host"] ||= Socket.gethostname
|
30
|
+
@source_name = config["source_name"] ||= NAME_HEADER_DEFAULT
|
31
|
+
@metrics = config["metrics"]
|
32
|
+
@fields_as_metrics = config["fields_as_metrics"]
|
33
|
+
@metrics_format = (config["metrics_format"] ||= CARBON2).downcase
|
34
|
+
@compress = config["compress"]
|
35
|
+
@compress_encoding = config["compress_encoding"]
|
36
|
+
|
37
|
+
end # def initialize
|
38
|
+
|
39
|
+
def build()
|
40
|
+
headers = build_common()
|
41
|
+
headers[CATEGORY_HEADER] = @source_category unless @source_category.blank?
|
42
|
+
append_content_header(headers)
|
43
|
+
headers
|
44
|
+
end # def build
|
45
|
+
|
46
|
+
def build_stats()
|
47
|
+
headers = build_common()
|
48
|
+
headers[CATEGORY_HEADER] = "#{@source_category}.stats"
|
49
|
+
headers[CONTENT_TYPE] = CONTENT_TYPE_CARBON2
|
50
|
+
headers
|
51
|
+
end # def build_stats
|
52
|
+
|
53
|
+
private
|
54
|
+
def build_common()
|
55
|
+
headers = Hash.new()
|
56
|
+
headers.merge!(@extra_headers)
|
57
|
+
headers[CLIENT_HEADER] = CLIENT_HEADER_VALUE
|
58
|
+
headers[HOST_HEADER] = @source_host unless @source_host.blank?
|
59
|
+
headers[NAME_HEADER] = @source_name unless @source_name.blank?
|
60
|
+
append_compress_header(headers)
|
61
|
+
headers
|
62
|
+
end # build_common
|
63
|
+
|
64
|
+
def append_content_header(headers)
|
65
|
+
contentType = CONTENT_TYPE_LOG
|
66
|
+
if @metrics || @fields_as_metrics
|
67
|
+
contentType = (@metrics_format == GRAPHITE) ? CONTENT_TYPE_GRAPHITE : CONTENT_TYPE_CARBON2
|
68
|
+
end
|
69
|
+
headers[CONTENT_TYPE] = contentType
|
70
|
+
end # def append_content_header
|
71
|
+
|
72
|
+
def append_compress_header(headers)
|
73
|
+
if @compress
|
74
|
+
headers[CONTENT_ENCODING] = (@compress_encoding == GZIP) ? GZIP : DEFLATE
|
75
|
+
end
|
76
|
+
end # append_compress_header
|
77
|
+
|
78
|
+
end
|
79
|
+
end; end; end
|
@@ -0,0 +1,38 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require "logstash/outputs/sumologic/common"
|
3
|
+
require "logstash/outputs/sumologic/statistics"
|
4
|
+
|
5
|
+
module LogStash; module Outputs; class SumoLogic;
|
6
|
+
class MessageQueue
|
7
|
+
|
8
|
+
def initialize(stats, config)
|
9
|
+
@queue_max = (config["queue_max"] ||= 1) < 1 ? 1 : config["queue_max"]
|
10
|
+
@queue = SizedQueue::new(@queue_max)
|
11
|
+
@stats = stats
|
12
|
+
end
|
13
|
+
|
14
|
+
def enq(obj)
|
15
|
+
if (obj.bytesize > 0)
|
16
|
+
@queue.enq(obj)
|
17
|
+
@stats.record_enque(obj)
|
18
|
+
end
|
19
|
+
end # def push
|
20
|
+
|
21
|
+
def deq()
|
22
|
+
obj = @queue.deq()
|
23
|
+
@stats.record_deque(obj)
|
24
|
+
obj
|
25
|
+
end # def pop
|
26
|
+
|
27
|
+
def drain()
|
28
|
+
@queue.size.times.map {
|
29
|
+
deq()
|
30
|
+
}
|
31
|
+
end # def drain
|
32
|
+
|
33
|
+
def size()
|
34
|
+
@queue.size()
|
35
|
+
end # size
|
36
|
+
|
37
|
+
end
|
38
|
+
end; end; end
|
@@ -0,0 +1,72 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require "logstash/outputs/sumologic/common"
|
3
|
+
require "logstash/outputs/sumologic/statistics"
|
4
|
+
require "logstash/outputs/sumologic/message_queue"
|
5
|
+
|
6
|
+
module LogStash; module Outputs; class SumoLogic;
|
7
|
+
class Monitor
|
8
|
+
|
9
|
+
include LogStash::Outputs::SumoLogic::Common
|
10
|
+
|
11
|
+
attr_reader :is_pile
|
12
|
+
|
13
|
+
def initialize(queue, stats, config)
|
14
|
+
@queue = queue
|
15
|
+
@stats = stats
|
16
|
+
@stopping = Concurrent::AtomicBoolean.new(false)
|
17
|
+
|
18
|
+
@enabled = config["stats_enabled"] ||= false
|
19
|
+
@interval = config["stats_interval"] ||= 60
|
20
|
+
@interval = @interval < 0 ? 0 : @interval
|
21
|
+
end # initialize
|
22
|
+
|
23
|
+
def start()
|
24
|
+
@stopping.make_false()
|
25
|
+
if (@enabled)
|
26
|
+
@monitor_t = Thread.new {
|
27
|
+
while @stopping.false?
|
28
|
+
Stud.stoppable_sleep(@interval) { @stopping.true? }
|
29
|
+
if @stats.total_input_events.value > 0
|
30
|
+
@queue.enq(build_stats_payload())
|
31
|
+
end
|
32
|
+
end # while
|
33
|
+
}
|
34
|
+
end # if
|
35
|
+
end # def start
|
36
|
+
|
37
|
+
def stop()
|
38
|
+
@stopping.make_true()
|
39
|
+
if (@enabled)
|
40
|
+
log_info "shutting down monitor..."
|
41
|
+
@monitor_t.join
|
42
|
+
log_info "monitor is fully shutted down"
|
43
|
+
end
|
44
|
+
end # def stop
|
45
|
+
|
46
|
+
def build_stats_payload()
|
47
|
+
timestamp = Time.now().to_i
|
48
|
+
|
49
|
+
counters = [
|
50
|
+
"total_input_events",
|
51
|
+
"total_input_bytes",
|
52
|
+
"total_metrics_datapoints",
|
53
|
+
"total_log_lines",
|
54
|
+
"total_output_requests",
|
55
|
+
"total_output_bytes",
|
56
|
+
"total_output_bytes_compressed",
|
57
|
+
"total_response_times",
|
58
|
+
"total_response_success"
|
59
|
+
].map { |key|
|
60
|
+
value = @stats.send(key).value
|
61
|
+
build_metric_line(key, value, timestamp)
|
62
|
+
}.join($/)
|
63
|
+
|
64
|
+
"#{STATS_TAG}#{counters}"
|
65
|
+
end # def build_stats_payload
|
66
|
+
|
67
|
+
def build_metric_line(key, value, timestamp)
|
68
|
+
"metric=#{key} interval=#{@interval} category=monitor #{value} #{timestamp}"
|
69
|
+
end # def build_metric_line
|
70
|
+
|
71
|
+
end
|
72
|
+
end; end; end
|
@@ -0,0 +1,155 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require "logstash/json"
|
3
|
+
require "logstash/event"
|
4
|
+
|
5
|
+
require "logstash/outputs/sumologic/common"
|
6
|
+
|
7
|
+
module LogStash; module Outputs; class SumoLogic;
|
8
|
+
class PayloadBuilder
|
9
|
+
|
10
|
+
include LogStash::Outputs::SumoLogic::Common
|
11
|
+
|
12
|
+
TIMESTAMP_FIELD = "@timestamp"
|
13
|
+
METRICS_NAME_TAG = "metric"
|
14
|
+
JSON_PLACEHOLDER = "%{@json}"
|
15
|
+
ALWAYS_EXCLUDED = [ "@timestamp", "@version" ]
|
16
|
+
|
17
|
+
def initialize(stats, config)
|
18
|
+
@stats = stats
|
19
|
+
|
20
|
+
@format = config["format"] ||= DEFAULT_LOG_FORMAT
|
21
|
+
@json_mapping = config["json_mapping"]
|
22
|
+
|
23
|
+
@metrics = config["metrics"]
|
24
|
+
@metrics_name = config["metrics_name"]
|
25
|
+
@fields_as_metrics = config["fields_as_metrics"]
|
26
|
+
@metrics_format = (config["metrics_format"] ||= CARBON2).downcase
|
27
|
+
@intrinsic_tags = config["intrinsic_tags"] ||= {}
|
28
|
+
@meta_tags = config["meta_tags"] ||= {}
|
29
|
+
@fields_include = config["fields_include"] ||= []
|
30
|
+
@fields_exclude = config["fields_exclude"] ||= []
|
31
|
+
|
32
|
+
end # def initialize
|
33
|
+
|
34
|
+
def build(event)
|
35
|
+
payload = if @metrics || @fields_as_metrics
|
36
|
+
build_metrics_payload(event)
|
37
|
+
else
|
38
|
+
build_log_payload(event)
|
39
|
+
end
|
40
|
+
payload
|
41
|
+
end # def build
|
42
|
+
|
43
|
+
private
|
44
|
+
|
45
|
+
def build_log_payload(event)
|
46
|
+
@stats.record_log_process()
|
47
|
+
apply_template(@format, event)
|
48
|
+
end # def event2log
|
49
|
+
|
50
|
+
def build_metrics_payload(event)
|
51
|
+
timestamp = event.get(TIMESTAMP_FIELD).to_i
|
52
|
+
source = if @fields_as_metrics
|
53
|
+
event_as_metrics(event)
|
54
|
+
else
|
55
|
+
expand_hash(@metrics, event)
|
56
|
+
end
|
57
|
+
lines = source.flat_map { |key, value|
|
58
|
+
get_single_line(event, key, value, timestamp)
|
59
|
+
}.reject(&:nil?)
|
60
|
+
@stats.record_metrics_process(lines.size)
|
61
|
+
lines.join($/)
|
62
|
+
end # def event2metrics
|
63
|
+
|
64
|
+
def event_as_metrics(event)
|
65
|
+
hash = event2hash(event)
|
66
|
+
acc = {}
|
67
|
+
hash.keys.each do |field|
|
68
|
+
value = hash[field]
|
69
|
+
dotify(acc, field, value, nil)
|
70
|
+
end
|
71
|
+
acc
|
72
|
+
end # def event_as_metrics
|
73
|
+
|
74
|
+
def get_single_line(event, key, value, timestamp)
|
75
|
+
full = get_metrics_name(event, key)
|
76
|
+
if !ALWAYS_EXCLUDED.include?(full) && \
|
77
|
+
(@fields_include.empty? || @fields_include.any? { |regexp| full.match(regexp) }) && \
|
78
|
+
!(@fields_exclude.any? {|regexp| full.match(regexp)}) && \
|
79
|
+
is_number?(value)
|
80
|
+
if @metrics_format == GRAPHITE
|
81
|
+
"#{full} #{value} #{timestamp}"
|
82
|
+
else
|
83
|
+
@intrinsic_tags[METRICS_NAME_TAG] = full
|
84
|
+
"#{hash2line(@intrinsic_tags, event)} #{hash2line(@meta_tags, event)}#{value} #{timestamp}"
|
85
|
+
end
|
86
|
+
end
|
87
|
+
end # def get_single_line
|
88
|
+
|
89
|
+
def dotify(acc, key, value, prefix)
|
90
|
+
pk = prefix ? "#{prefix}.#{key}" : key.to_s
|
91
|
+
if value.is_a?(Hash)
|
92
|
+
value.each do |k, v|
|
93
|
+
dotify(acc, k, v, pk)
|
94
|
+
end
|
95
|
+
elsif value.is_a?(Array)
|
96
|
+
value.each_with_index.map { |v, i|
|
97
|
+
dotify(acc, i.to_s, v, pk)
|
98
|
+
}
|
99
|
+
else
|
100
|
+
acc[pk] = value
|
101
|
+
end
|
102
|
+
end # def dotify
|
103
|
+
|
104
|
+
def event2hash(event)
|
105
|
+
if @json_mapping
|
106
|
+
@json_mapping.reduce({}) do |acc, kv|
|
107
|
+
k, v = kv
|
108
|
+
acc[k] = event.sprintf(v)
|
109
|
+
acc
|
110
|
+
end
|
111
|
+
else
|
112
|
+
event.to_hash
|
113
|
+
end
|
114
|
+
end # def map_event
|
115
|
+
|
116
|
+
def is_number?(me)
|
117
|
+
me.to_f.to_s == me.to_s || me.to_i.to_s == me.to_s
|
118
|
+
end # def is_number?
|
119
|
+
|
120
|
+
def expand_hash(hash, event)
|
121
|
+
hash.reduce({}) do |acc, kv|
|
122
|
+
k, v = kv
|
123
|
+
exp_k = apply_template(k, event)
|
124
|
+
exp_v = apply_template(v, event)
|
125
|
+
acc[exp_k] = exp_v
|
126
|
+
acc
|
127
|
+
end
|
128
|
+
end # def expand_hash
|
129
|
+
|
130
|
+
def apply_template(template, event)
|
131
|
+
if template.include? JSON_PLACEHOLDER
|
132
|
+
hash = event2hash(event)
|
133
|
+
dump = LogStash::Json.dump(hash)
|
134
|
+
template = template.gsub(JSON_PLACEHOLDER) { dump }
|
135
|
+
end
|
136
|
+
event.sprintf(template)
|
137
|
+
end # def expand
|
138
|
+
|
139
|
+
def get_metrics_name(event, name)
|
140
|
+
name = @metrics_name.gsub(METRICS_NAME_PLACEHOLDER) { name } if @metrics_name
|
141
|
+
event.sprintf(name)
|
142
|
+
end # def get_metrics_name
|
143
|
+
|
144
|
+
def hash2line(hash, event)
|
145
|
+
if (hash.is_a?(Hash) && !hash.empty?)
|
146
|
+
expand_hash(hash, event).flat_map { |k, v|
|
147
|
+
"#{k}=#{v} "
|
148
|
+
}.join()
|
149
|
+
else
|
150
|
+
""
|
151
|
+
end
|
152
|
+
end # def hash2line
|
153
|
+
|
154
|
+
end
|
155
|
+
end; end; end
|
@@ -0,0 +1,87 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require "logstash/outputs/sumologic/common"
|
3
|
+
require "logstash/outputs/sumologic/statistics"
|
4
|
+
require "logstash/outputs/sumologic/message_queue"
|
5
|
+
|
6
|
+
module LogStash; module Outputs; class SumoLogic;
|
7
|
+
class Piler
|
8
|
+
|
9
|
+
include LogStash::Outputs::SumoLogic::Common
|
10
|
+
|
11
|
+
attr_reader :is_pile
|
12
|
+
|
13
|
+
def initialize(queue, stats, config)
|
14
|
+
|
15
|
+
@interval = config["interval"] ||= 0
|
16
|
+
@pile_max = config["pile_max"] ||= 0
|
17
|
+
@queue = queue
|
18
|
+
@stats = stats
|
19
|
+
@stopping = Concurrent::AtomicBoolean.new(false)
|
20
|
+
@is_pile = (@interval > 0 && @pile_max > 0)
|
21
|
+
|
22
|
+
if (@is_pile)
|
23
|
+
@pile = Array.new
|
24
|
+
@pile_size = 0
|
25
|
+
@semaphore = Mutex.new
|
26
|
+
end
|
27
|
+
|
28
|
+
end # def initialize
|
29
|
+
|
30
|
+
def start()
|
31
|
+
@stopping.make_false()
|
32
|
+
if (@is_pile)
|
33
|
+
@piler_t = Thread.new {
|
34
|
+
while @stopping.false?
|
35
|
+
Stud.stoppable_sleep(@interval) { @stopping.true? }
|
36
|
+
log_dbg("timeout, enqueue pile now")
|
37
|
+
enq_and_clear()
|
38
|
+
end # while
|
39
|
+
}
|
40
|
+
end # if
|
41
|
+
end # def start
|
42
|
+
|
43
|
+
def stop()
|
44
|
+
@stopping.make_true()
|
45
|
+
if (@is_pile)
|
46
|
+
log_info "shutting down piler..."
|
47
|
+
@piler_t.join
|
48
|
+
log_info "piler is fully shutted down"
|
49
|
+
end
|
50
|
+
end # def stop
|
51
|
+
|
52
|
+
def input(entry)
|
53
|
+
if (@stopping.true?)
|
54
|
+
log_warn "piler is shutting down, message ignored", "message" => entry
|
55
|
+
elsif (@is_pile)
|
56
|
+
@semaphore.synchronize {
|
57
|
+
if @pile_size + entry.bytesize > @pile_max
|
58
|
+
@queue.enq(@pile.join($/))
|
59
|
+
@pile.clear
|
60
|
+
@pile_size = 0
|
61
|
+
@stats.record_clear_pile()
|
62
|
+
end
|
63
|
+
@pile << entry
|
64
|
+
@pile_size += entry.bytesize
|
65
|
+
@stats.record_input(entry)
|
66
|
+
}
|
67
|
+
else
|
68
|
+
@queue.enq(entry)
|
69
|
+
end # if
|
70
|
+
end # def input
|
71
|
+
|
72
|
+
private
|
73
|
+
def enq_and_clear()
|
74
|
+
if (@pile.size > 0)
|
75
|
+
@semaphore.synchronize {
|
76
|
+
if (@pile.size > 0)
|
77
|
+
@queue.enq(@pile.join($/))
|
78
|
+
@pile.clear
|
79
|
+
@pile_size = 0
|
80
|
+
@stats.record_clear_pile()
|
81
|
+
end
|
82
|
+
}
|
83
|
+
end
|
84
|
+
end # def enq_and_clear
|
85
|
+
|
86
|
+
end
|
87
|
+
end; end; end
|