logstash-core 5.0.2-java → 5.1.1.1-java
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/gemspec_jars.rb +9 -0
- data/lib/logstash-core/logstash-core.jar +0 -0
- data/lib/logstash-core/logstash-core.rb +22 -0
- data/lib/logstash-core/version.rb +1 -1
- data/lib/logstash-core_jars.rb +20 -0
- data/lib/logstash/agent.rb +65 -14
- data/lib/logstash/api/commands/default_metadata.rb +2 -1
- data/lib/logstash/api/commands/stats.rb +3 -2
- data/lib/logstash/config/file.rb +0 -1
- data/lib/logstash/config/loader.rb +1 -0
- data/lib/logstash/config/mixin.rb +2 -6
- data/lib/logstash/environment.rb +25 -2
- data/lib/logstash/event_dispatcher.rb +40 -0
- data/lib/logstash/filter_delegator.rb +1 -1
- data/lib/logstash/filters/base.rb +10 -2
- data/lib/logstash/instrument/metric_store.rb +0 -1
- data/lib/logstash/instrument/metric_type/base.rb +0 -1
- data/lib/logstash/instrument/namespaced_null_metric.rb +54 -0
- data/lib/logstash/instrument/null_metric.rb +55 -46
- data/lib/logstash/instrument/periodic_poller/jvm.rb +26 -3
- data/lib/logstash/instrument/periodic_poller/load_average.rb +47 -0
- data/lib/logstash/instrument/snapshot.rb +0 -1
- data/lib/logstash/java_integration.rb +0 -1
- data/lib/logstash/logging/logger.rb +37 -4
- data/lib/logstash/outputs/base.rb +1 -1
- data/lib/logstash/patches.rb +1 -0
- data/lib/logstash/patches/exception_to_json.rb +5 -0
- data/lib/logstash/pipeline.rb +50 -17
- data/lib/logstash/plugin.rb +14 -48
- data/lib/logstash/plugins/hooks_registry.rb +57 -0
- data/lib/logstash/plugins/registry.rb +208 -45
- data/lib/logstash/runner.rb +10 -5
- data/lib/logstash/settings.rb +101 -9
- data/lib/logstash/universal_plugin.rb +13 -0
- data/lib/logstash/util/byte_value.rb +60 -0
- data/lib/logstash/util/loggable.rb +14 -2
- data/lib/logstash/util/safe_uri.rb +1 -0
- data/lib/logstash/util/time_value.rb +70 -0
- data/lib/logstash/util/wrapped_acked_queue.rb +347 -0
- data/lib/logstash/util/wrapped_synchronous_queue.rb +17 -33
- data/lib/logstash/version.rb +1 -1
- data/locales/en.yml +1 -1
- data/logstash-core.gemspec +13 -18
- data/spec/api/lib/api/node_stats_spec.rb +3 -1
- data/spec/api/lib/api/support/resource_dsl_methods.rb +14 -6
- data/spec/api/spec_helper.rb +1 -0
- data/spec/conditionals_spec.rb +3 -2
- data/spec/logstash/agent_spec.rb +142 -62
- data/spec/logstash/environment_spec.rb +38 -0
- data/spec/logstash/event_dispatcher_spec.rb +76 -0
- data/spec/logstash/filter_delegator_spec.rb +2 -1
- data/spec/logstash/instrument/namespaced_null_metric_spec.rb +33 -0
- data/spec/logstash/instrument/null_metric_spec.rb +9 -5
- data/spec/logstash/instrument/periodic_poller/jvm_spec.rb +40 -0
- data/spec/logstash/instrument/periodic_poller/load_average_spec.rb +91 -0
- data/spec/logstash/output_delegator_spec.rb +2 -1
- data/spec/logstash/patches_spec.rb +15 -4
- data/spec/logstash/pipeline_pq_file_spec.rb +131 -0
- data/spec/logstash/pipeline_spec.rb +21 -17
- data/spec/logstash/plugin_spec.rb +4 -16
- data/spec/logstash/plugins/hooks_registry_spec.rb +60 -0
- data/spec/logstash/plugins/registry_spec.rb +22 -14
- data/spec/logstash/settings/bytes_spec.rb +53 -0
- data/spec/logstash/settings/time_value_spec.rb +31 -0
- data/spec/logstash/settings/writable_directory_spec.rb +125 -0
- data/spec/logstash/settings_spec.rb +39 -0
- data/spec/logstash/util/byte_value_spec.rb +33 -0
- data/spec/logstash/util/time_value_spec.rb +59 -0
- data/spec/logstash/util/wrapped_synchronous_queue_spec.rb +2 -2
- data/spec/logstash/webserver_spec.rb +4 -7
- data/spec/support/helpers.rb +8 -0
- data/spec/support/mocks_classes.rb +61 -31
- metadata +73 -20
- data/lib/jars.rb +0 -7
- data/lib/logstash/config/registry.rb +0 -13
- data/lib/logstash/inputs/metrics.rb +0 -47
- data/spec/logstash/inputs/metrics_spec.rb +0 -51
- data/vendor/jars/com/fasterxml/jackson/core/jackson-core/2.7.4/jackson-core-2.7.4.jar +0 -0
- data/vendor/jars/com/fasterxml/jackson/core/jackson-databind/2.7.4/jackson-databind-2.7.4.jar +0 -0
- data/vendor/jars/org/apache/logging/log4j/log4j-1.2-api/2.6.2/log4j-1.2-api-2.6.2.jar +0 -0
- data/vendor/jars/org/apache/logging/log4j/log4j-api/2.6.2/log4j-api-2.6.2.jar +0 -0
- data/vendor/jars/org/apache/logging/log4j/log4j-core/2.6.2/log4j-core-2.6.2.jar +0 -0
- data/vendor/jars/org/logstash/logstash-core/5.0.2/logstash-core-5.0.2.jar +0 -0
data/lib/logstash/settings.rb
CHANGED
|
@@ -1,10 +1,19 @@
|
|
|
1
1
|
# encoding: utf-8
|
|
2
|
+
require "logstash/util/loggable"
|
|
3
|
+
require "fileutils"
|
|
4
|
+
require "logstash/util/byte_value"
|
|
5
|
+
require "logstash/util/time_value"
|
|
2
6
|
|
|
3
7
|
module LogStash
|
|
4
8
|
class Settings
|
|
5
9
|
|
|
6
10
|
def initialize
|
|
7
11
|
@settings = {}
|
|
12
|
+
# Theses settings were loaded from the yaml file
|
|
13
|
+
# but we didn't find any settings to validate them,
|
|
14
|
+
# lets keep them around until we do `validate_all` at that
|
|
15
|
+
# time universal plugins could have added new settings.
|
|
16
|
+
@transient_settings = {}
|
|
8
17
|
end
|
|
9
18
|
|
|
10
19
|
def register(setting)
|
|
@@ -48,8 +57,14 @@ module LogStash
|
|
|
48
57
|
end
|
|
49
58
|
alias_method :get, :get_value
|
|
50
59
|
|
|
51
|
-
def set_value(setting_name, value)
|
|
60
|
+
def set_value(setting_name, value, graceful = false)
|
|
52
61
|
get_setting(setting_name).set(value)
|
|
62
|
+
rescue ArgumentError => e
|
|
63
|
+
if graceful
|
|
64
|
+
@transient_settings[setting_name] = value
|
|
65
|
+
else
|
|
66
|
+
raise e
|
|
67
|
+
end
|
|
53
68
|
end
|
|
54
69
|
alias_method :set, :set_value
|
|
55
70
|
|
|
@@ -61,8 +76,8 @@ module LogStash
|
|
|
61
76
|
hash
|
|
62
77
|
end
|
|
63
78
|
|
|
64
|
-
def merge(hash)
|
|
65
|
-
hash.each {|key, value| set_value(key, value) }
|
|
79
|
+
def merge(hash, graceful = false)
|
|
80
|
+
hash.each {|key, value| set_value(key, value, graceful) }
|
|
66
81
|
self
|
|
67
82
|
end
|
|
68
83
|
|
|
@@ -92,10 +107,13 @@ module LogStash
|
|
|
92
107
|
|
|
93
108
|
def from_yaml(yaml_path)
|
|
94
109
|
settings = read_yaml(::File.join(yaml_path, "logstash.yml"))
|
|
95
|
-
self.merge(flatten_hash(settings))
|
|
110
|
+
self.merge(flatten_hash(settings), true)
|
|
96
111
|
end
|
|
97
112
|
|
|
98
113
|
def validate_all
|
|
114
|
+
# lets merge the transient_settings again to see if new setting were added.
|
|
115
|
+
self.merge(@transient_settings)
|
|
116
|
+
|
|
99
117
|
@settings.each do |name, setting|
|
|
100
118
|
setting.validate_value
|
|
101
119
|
end
|
|
@@ -118,6 +136,8 @@ module LogStash
|
|
|
118
136
|
end
|
|
119
137
|
|
|
120
138
|
class Setting
|
|
139
|
+
include LogStash::Util::Loggable
|
|
140
|
+
|
|
121
141
|
attr_reader :name, :default
|
|
122
142
|
|
|
123
143
|
def initialize(name, klass, default=nil, strict=true, &validator_proc)
|
|
@@ -389,17 +409,89 @@ module LogStash
|
|
|
389
409
|
|
|
390
410
|
class WritableDirectory < Setting
|
|
391
411
|
def initialize(name, default=nil, strict=false)
|
|
392
|
-
super(name, ::String, default, strict)
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
412
|
+
super(name, ::String, default, strict)
|
|
413
|
+
end
|
|
414
|
+
|
|
415
|
+
def validate(path)
|
|
416
|
+
super(path)
|
|
417
|
+
|
|
418
|
+
if ::File.directory?(path)
|
|
419
|
+
if !::File.writable?(path)
|
|
420
|
+
raise ::ArgumentError.new("Path \"#{path}\" must be a writable directory. It is not writable.")
|
|
421
|
+
end
|
|
422
|
+
elsif ::File.symlink?(path)
|
|
423
|
+
# TODO(sissel): I'm OK if we relax this restriction. My experience
|
|
424
|
+
# is that it's usually easier and safer to just reject symlinks.
|
|
425
|
+
raise ::ArgumentError.new("Path \"#{path}\" must be a writable directory. It cannot be a symlink.")
|
|
426
|
+
elsif ::File.exist?(path)
|
|
427
|
+
raise ::ArgumentError.new("Path \"#{path}\" must be a writable directory. It is not a directory.")
|
|
428
|
+
else
|
|
429
|
+
parent = ::File.dirname(path)
|
|
430
|
+
if !::File.writable?(parent)
|
|
431
|
+
raise ::ArgumentError.new("Path \"#{path}\" does not exist and I cannot create it because the parent path \"#{parent}\" is not writable.")
|
|
432
|
+
end
|
|
433
|
+
end
|
|
434
|
+
|
|
435
|
+
# If we get here, the directory exists and is writable.
|
|
436
|
+
true
|
|
437
|
+
end
|
|
438
|
+
|
|
439
|
+
def value
|
|
440
|
+
super.tap do |path|
|
|
441
|
+
if !::File.directory?(path)
|
|
442
|
+
# Create the directory if it doesn't exist.
|
|
443
|
+
begin
|
|
444
|
+
logger.info("Creating directory", setting: name, path: path)
|
|
445
|
+
::FileUtils.mkdir_p(path)
|
|
446
|
+
rescue => e
|
|
447
|
+
# TODO(sissel): Catch only specific exceptions?
|
|
448
|
+
raise ::ArgumentError.new("Path \"#{path}\" does not exist, and I failed trying to create it: #{e.class.name} - #{e}")
|
|
449
|
+
end
|
|
397
450
|
end
|
|
398
451
|
end
|
|
399
452
|
end
|
|
400
453
|
end
|
|
454
|
+
|
|
455
|
+
class Bytes < Coercible
|
|
456
|
+
def initialize(name, default=nil, strict=true)
|
|
457
|
+
super(name, ::Fixnum, default, strict=true) { |value| valid?(value) }
|
|
458
|
+
end
|
|
459
|
+
|
|
460
|
+
def valid?(value)
|
|
461
|
+
value.is_a?(Fixnum) && value >= 0
|
|
462
|
+
end
|
|
463
|
+
|
|
464
|
+
def coerce(value)
|
|
465
|
+
case value
|
|
466
|
+
when ::Numeric
|
|
467
|
+
value
|
|
468
|
+
when ::String
|
|
469
|
+
LogStash::Util::ByteValue.parse(value)
|
|
470
|
+
else
|
|
471
|
+
raise ArgumentError.new("Could not coerce '#{value}' into a bytes value")
|
|
472
|
+
end
|
|
473
|
+
end
|
|
474
|
+
|
|
475
|
+
def validate(value)
|
|
476
|
+
unless valid?(value)
|
|
477
|
+
raise ArgumentError.new("Invalid byte value \"#{value}\".")
|
|
478
|
+
end
|
|
479
|
+
end
|
|
480
|
+
end
|
|
481
|
+
|
|
482
|
+
class TimeValue < Coercible
|
|
483
|
+
def initialize(name, default, strict=true, &validator_proc)
|
|
484
|
+
super(name, ::Fixnum, default, strict, &validator_proc)
|
|
485
|
+
end
|
|
486
|
+
|
|
487
|
+
def coerce(value)
|
|
488
|
+
return value if value.is_a?(::Fixnum)
|
|
489
|
+
Util::TimeValue.from_value(value).to_nanos
|
|
490
|
+
end
|
|
491
|
+
end
|
|
401
492
|
end
|
|
402
493
|
|
|
494
|
+
|
|
403
495
|
SETTINGS = Settings.new
|
|
404
496
|
end
|
|
405
497
|
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
# encoding: utf-8
|
|
2
|
+
require "logstash/namespace"
|
|
3
|
+
|
|
4
|
+
module LogStash; module Util; module ByteValue
|
|
5
|
+
module_function
|
|
6
|
+
|
|
7
|
+
B = 1
|
|
8
|
+
KB = B << 10
|
|
9
|
+
MB = B << 20
|
|
10
|
+
GB = B << 30
|
|
11
|
+
TB = B << 40
|
|
12
|
+
PB = B << 50
|
|
13
|
+
|
|
14
|
+
def parse(text)
|
|
15
|
+
if !text.is_a?(String)
|
|
16
|
+
raise ArgumentError, "ByteValue::parse takes a String, got a `#{text.class.name}`"
|
|
17
|
+
end
|
|
18
|
+
number = text.to_f
|
|
19
|
+
factor = multiplier(text)
|
|
20
|
+
|
|
21
|
+
(number * factor).to_i
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
def multiplier(text)
|
|
25
|
+
case text
|
|
26
|
+
when /(?:k|kb)$/
|
|
27
|
+
KB
|
|
28
|
+
when /(?:m|mb)$/
|
|
29
|
+
MB
|
|
30
|
+
when /(?:g|gb)$/
|
|
31
|
+
GB
|
|
32
|
+
when /(?:t|tb)$/
|
|
33
|
+
TB
|
|
34
|
+
when /(?:p|pb)$/
|
|
35
|
+
PB
|
|
36
|
+
when /(?:b)$/
|
|
37
|
+
B
|
|
38
|
+
else
|
|
39
|
+
raise ArgumentError, "Unknown bytes value '#{text}'"
|
|
40
|
+
end
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
def human_readable(number)
|
|
44
|
+
value, unit = if number > PB
|
|
45
|
+
[number / PB, "pb"]
|
|
46
|
+
elsif number > TB
|
|
47
|
+
[number / TB, "tb"]
|
|
48
|
+
elsif number > GB
|
|
49
|
+
[number / GB, "gb"]
|
|
50
|
+
elsif number > MB
|
|
51
|
+
[number / MB, "mb"]
|
|
52
|
+
elsif number > KB
|
|
53
|
+
[number / KB, "kb"]
|
|
54
|
+
else
|
|
55
|
+
[number, "b"]
|
|
56
|
+
end
|
|
57
|
+
|
|
58
|
+
format("%.2d%s", value, unit)
|
|
59
|
+
end
|
|
60
|
+
end end end
|
|
@@ -5,15 +5,27 @@ require "logstash/namespace"
|
|
|
5
5
|
module LogStash module Util
|
|
6
6
|
module Loggable
|
|
7
7
|
def self.included(klass)
|
|
8
|
-
|
|
8
|
+
|
|
9
|
+
def klass.log4j_name
|
|
9
10
|
ruby_name = self.name || self.class.name || self.class.to_s
|
|
10
|
-
|
|
11
|
+
ruby_name.gsub('::', '.').downcase
|
|
12
|
+
end
|
|
13
|
+
|
|
14
|
+
def klass.logger
|
|
11
15
|
@logger ||= LogStash::Logging::Logger.new(log4j_name)
|
|
12
16
|
end
|
|
13
17
|
|
|
18
|
+
def klass.slow_logger(warn_threshold, info_threshold, debug_threshold, trace_threshold)
|
|
19
|
+
@slow_logger ||= LogStash::Logging::SlowLogger.new(log4j_name, warn_threshold, info_threshold, debug_threshold, trace_threshold)
|
|
20
|
+
end
|
|
21
|
+
|
|
14
22
|
def logger
|
|
15
23
|
self.class.logger
|
|
16
24
|
end
|
|
25
|
+
|
|
26
|
+
def slow_logger(warn_threshold, info_threshold, debug_threshold, trace_threshold)
|
|
27
|
+
self.class.slow_logger(warn_threshold, info_threshold, debug_threshold, trace_threshold)
|
|
28
|
+
end
|
|
17
29
|
end
|
|
18
30
|
end
|
|
19
31
|
end; end
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
module LogStash
|
|
2
|
+
module Util
|
|
3
|
+
class TimeValue
|
|
4
|
+
def initialize(duration, time_unit)
|
|
5
|
+
@duration = duration
|
|
6
|
+
@time_unit = time_unit
|
|
7
|
+
end
|
|
8
|
+
|
|
9
|
+
def self.from_value(value)
|
|
10
|
+
if value.is_a?(TimeValue)
|
|
11
|
+
TimeValue.new(value.duration, value.time_unit)
|
|
12
|
+
elsif value.is_a?(::String)
|
|
13
|
+
normalized = value.downcase.strip
|
|
14
|
+
if normalized.end_with?("nanos")
|
|
15
|
+
TimeValue.new(parse(normalized, 5), :nanosecond)
|
|
16
|
+
elsif normalized.end_with?("micros")
|
|
17
|
+
TimeValue.new(parse(normalized, 6), :microsecond)
|
|
18
|
+
elsif normalized.end_with?("ms")
|
|
19
|
+
TimeValue.new(parse(normalized, 2), :millisecond)
|
|
20
|
+
elsif normalized.end_with?("s")
|
|
21
|
+
TimeValue.new(parse(normalized, 1), :second)
|
|
22
|
+
elsif normalized.end_with?("m")
|
|
23
|
+
TimeValue.new(parse(normalized, 1), :minute)
|
|
24
|
+
elsif normalized.end_with?("h")
|
|
25
|
+
TimeValue.new(parse(normalized, 1), :hour)
|
|
26
|
+
elsif normalized.end_with?("d")
|
|
27
|
+
TimeValue.new(parse(normalized, 1), :day)
|
|
28
|
+
elsif normalized =~ /^-0*1/
|
|
29
|
+
TimeValue.new(-1, :nanosecond)
|
|
30
|
+
else
|
|
31
|
+
raise ArgumentError.new("invalid time unit: \"#{value}\"")
|
|
32
|
+
end
|
|
33
|
+
else
|
|
34
|
+
raise ArgumentError.new("value is not a string: #{value} [#{value.class}]")
|
|
35
|
+
end
|
|
36
|
+
end
|
|
37
|
+
|
|
38
|
+
def to_nanos
|
|
39
|
+
case @time_unit
|
|
40
|
+
when :day
|
|
41
|
+
86400000000000 * @duration
|
|
42
|
+
when :hour
|
|
43
|
+
3600000000000 * @duration
|
|
44
|
+
when :minute
|
|
45
|
+
60000000000 * @duration
|
|
46
|
+
when :second
|
|
47
|
+
1000000000 * @duration
|
|
48
|
+
when :millisecond
|
|
49
|
+
1000000 * @duration
|
|
50
|
+
when :microsecond
|
|
51
|
+
1000 * @duration
|
|
52
|
+
when :nanosecond
|
|
53
|
+
@duration
|
|
54
|
+
end
|
|
55
|
+
end
|
|
56
|
+
|
|
57
|
+
def ==(other)
|
|
58
|
+
self.duration == other.duration and self.time_unit == other.time_unit
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
def self.parse(value, suffix)
|
|
62
|
+
Integer(value[0..(value.size - suffix - 1)].strip)
|
|
63
|
+
end
|
|
64
|
+
|
|
65
|
+
private_class_method :parse
|
|
66
|
+
attr_reader :duration
|
|
67
|
+
attr_reader :time_unit
|
|
68
|
+
end
|
|
69
|
+
end
|
|
70
|
+
end
|
|
@@ -0,0 +1,347 @@
|
|
|
1
|
+
# encoding: utf-8
|
|
2
|
+
|
|
3
|
+
require "logstash-core-queue-jruby/logstash-core-queue-jruby"
|
|
4
|
+
require "concurrent"
|
|
5
|
+
# This is an adapted copy of the wrapped_synchronous_queue file
|
|
6
|
+
# ideally this should be moved to Java/JRuby
|
|
7
|
+
|
|
8
|
+
module LogStash; module Util
|
|
9
|
+
# Some specialized constructors. The calling code *does* need to know what kind it creates but
|
|
10
|
+
# not the internal implementation e.g. LogStash::AckedMemoryQueue etc.
|
|
11
|
+
# Note the use of allocate - this is what new does before it calls initialize.
|
|
12
|
+
# Note that the new method has been made private this is because there is no
|
|
13
|
+
# default queue implementation.
|
|
14
|
+
# It would be expensive to create a persistent queue in the new method
|
|
15
|
+
# to then throw it away in favor of a memory based one directly after.
|
|
16
|
+
# Especially in terms of (mmap) memory allocation and proper close sequencing.
|
|
17
|
+
|
|
18
|
+
class WrappedAckedQueue
|
|
19
|
+
class QueueClosedError < ::StandardError; end
|
|
20
|
+
class NotImplementedError < ::StandardError; end
|
|
21
|
+
|
|
22
|
+
def self.create_memory_based(path, capacity, max_events, max_bytes)
|
|
23
|
+
self.allocate.with_queue(
|
|
24
|
+
LogStash::AckedMemoryQueue.new(path, capacity, max_events, max_bytes)
|
|
25
|
+
)
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
def self.create_file_based(path, capacity, max_events, checkpoint_max_writes, checkpoint_max_acks, checkpoint_max_interval, max_bytes)
|
|
29
|
+
self.allocate.with_queue(
|
|
30
|
+
LogStash::AckedQueue.new(path, capacity, max_events, checkpoint_max_writes, checkpoint_max_acks, checkpoint_max_interval, max_bytes)
|
|
31
|
+
)
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
private_class_method :new
|
|
35
|
+
|
|
36
|
+
def with_queue(queue)
|
|
37
|
+
@queue = queue
|
|
38
|
+
@queue.open
|
|
39
|
+
@closed = Concurrent::AtomicBoolean.new(false)
|
|
40
|
+
self
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
def closed?
|
|
44
|
+
@closed.true?
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
# Push an object to the queue if the queue is full
|
|
48
|
+
# it will block until the object can be added to the queue.
|
|
49
|
+
#
|
|
50
|
+
# @param [Object] Object to add to the queue
|
|
51
|
+
def push(obj)
|
|
52
|
+
check_closed("write")
|
|
53
|
+
@queue.write(obj)
|
|
54
|
+
end
|
|
55
|
+
alias_method(:<<, :push)
|
|
56
|
+
|
|
57
|
+
# TODO - fix doc for this noop method
|
|
58
|
+
# Offer an object to the queue, wait for the specified amount of time.
|
|
59
|
+
# If adding to the queue was successful it will return true, false otherwise.
|
|
60
|
+
#
|
|
61
|
+
# @param [Object] Object to add to the queue
|
|
62
|
+
# @param [Integer] Time in milliseconds to wait before giving up
|
|
63
|
+
# @return [Boolean] True if adding was successfull if not it return false
|
|
64
|
+
def offer(obj, timeout_ms)
|
|
65
|
+
raise NotImplementedError.new("The offer method is not implemented. There is no non blocking write operation yet.")
|
|
66
|
+
end
|
|
67
|
+
|
|
68
|
+
# Blocking
|
|
69
|
+
def take
|
|
70
|
+
check_closed("read a batch")
|
|
71
|
+
# TODO - determine better arbitrary timeout millis
|
|
72
|
+
@queue.read_batch(1, 200).get_elements.first
|
|
73
|
+
end
|
|
74
|
+
|
|
75
|
+
# Block for X millis
|
|
76
|
+
def poll(millis)
|
|
77
|
+
check_closed("read")
|
|
78
|
+
@queue.read_batch(1, millis).get_elements.first
|
|
79
|
+
end
|
|
80
|
+
|
|
81
|
+
def read_batch(size, wait)
|
|
82
|
+
check_closed("read a batch")
|
|
83
|
+
@queue.read_batch(size, wait)
|
|
84
|
+
end
|
|
85
|
+
|
|
86
|
+
def write_client
|
|
87
|
+
WriteClient.new(self)
|
|
88
|
+
end
|
|
89
|
+
|
|
90
|
+
def read_client()
|
|
91
|
+
ReadClient.new(self)
|
|
92
|
+
end
|
|
93
|
+
|
|
94
|
+
def check_closed(action)
|
|
95
|
+
if closed?
|
|
96
|
+
raise QueueClosedError.new("Attempted to #{action} on a closed AckedQueue")
|
|
97
|
+
end
|
|
98
|
+
end
|
|
99
|
+
|
|
100
|
+
def close
|
|
101
|
+
@queue.close
|
|
102
|
+
@closed.make_true
|
|
103
|
+
end
|
|
104
|
+
|
|
105
|
+
class ReadClient
|
|
106
|
+
# We generally only want one thread at a time able to access pop/take/poll operations
|
|
107
|
+
# from this queue. We also depend on this to be able to block consumers while we snapshot
|
|
108
|
+
# in-flight buffers
|
|
109
|
+
|
|
110
|
+
def initialize(queue, batch_size = 125, wait_for = 250)
|
|
111
|
+
@queue = queue
|
|
112
|
+
@mutex = Mutex.new
|
|
113
|
+
# Note that @inflight_batches as a central mechanism for tracking inflight
|
|
114
|
+
# batches will fail if we have multiple read clients in the pipeline.
|
|
115
|
+
@inflight_batches = {}
|
|
116
|
+
# allow the worker thread to report the execution time of the filter + output
|
|
117
|
+
@inflight_clocks = {}
|
|
118
|
+
@batch_size = batch_size
|
|
119
|
+
@wait_for = wait_for
|
|
120
|
+
end
|
|
121
|
+
|
|
122
|
+
def close
|
|
123
|
+
@queue.close
|
|
124
|
+
end
|
|
125
|
+
|
|
126
|
+
def set_batch_dimensions(batch_size, wait_for)
|
|
127
|
+
@batch_size = batch_size
|
|
128
|
+
@wait_for = wait_for
|
|
129
|
+
end
|
|
130
|
+
|
|
131
|
+
def set_events_metric(metric)
|
|
132
|
+
@event_metric = metric
|
|
133
|
+
end
|
|
134
|
+
|
|
135
|
+
def set_pipeline_metric(metric)
|
|
136
|
+
@pipeline_metric = metric
|
|
137
|
+
end
|
|
138
|
+
|
|
139
|
+
def inflight_batches
|
|
140
|
+
@mutex.synchronize do
|
|
141
|
+
yield(@inflight_batches)
|
|
142
|
+
end
|
|
143
|
+
end
|
|
144
|
+
|
|
145
|
+
def current_inflight_batch
|
|
146
|
+
@inflight_batches.fetch(Thread.current, [])
|
|
147
|
+
end
|
|
148
|
+
|
|
149
|
+
def take_batch
|
|
150
|
+
if @queue.closed?
|
|
151
|
+
raise QueueClosedError.new("Attempt to take a batch from a closed AckedQueue")
|
|
152
|
+
end
|
|
153
|
+
@mutex.synchronize do
|
|
154
|
+
batch = ReadBatch.new(@queue, @batch_size, @wait_for)
|
|
155
|
+
add_starting_metrics(batch)
|
|
156
|
+
set_current_thread_inflight_batch(batch)
|
|
157
|
+
start_clock
|
|
158
|
+
batch
|
|
159
|
+
end
|
|
160
|
+
end
|
|
161
|
+
|
|
162
|
+
def set_current_thread_inflight_batch(batch)
|
|
163
|
+
@inflight_batches[Thread.current] = batch
|
|
164
|
+
end
|
|
165
|
+
|
|
166
|
+
def close_batch(batch)
|
|
167
|
+
@mutex.synchronize do
|
|
168
|
+
batch.close
|
|
169
|
+
@inflight_batches.delete(Thread.current)
|
|
170
|
+
stop_clock
|
|
171
|
+
end
|
|
172
|
+
end
|
|
173
|
+
|
|
174
|
+
def start_clock
|
|
175
|
+
@inflight_clocks[Thread.current] = [
|
|
176
|
+
@event_metric.time(:duration_in_millis),
|
|
177
|
+
@pipeline_metric.time(:duration_in_millis)
|
|
178
|
+
]
|
|
179
|
+
end
|
|
180
|
+
|
|
181
|
+
def stop_clock
|
|
182
|
+
@inflight_clocks[Thread.current].each(&:stop)
|
|
183
|
+
@inflight_clocks.delete(Thread.current)
|
|
184
|
+
end
|
|
185
|
+
|
|
186
|
+
def add_starting_metrics(batch)
|
|
187
|
+
return if @event_metric.nil? || @pipeline_metric.nil?
|
|
188
|
+
@event_metric.increment(:in, batch.starting_size)
|
|
189
|
+
@pipeline_metric.increment(:in, batch.starting_size)
|
|
190
|
+
end
|
|
191
|
+
|
|
192
|
+
def add_filtered_metrics(batch)
|
|
193
|
+
@event_metric.increment(:filtered, batch.filtered_size)
|
|
194
|
+
@pipeline_metric.increment(:filtered, batch.filtered_size)
|
|
195
|
+
end
|
|
196
|
+
|
|
197
|
+
def add_output_metrics(batch)
|
|
198
|
+
@event_metric.increment(:out, batch.filtered_size)
|
|
199
|
+
@pipeline_metric.increment(:out, batch.filtered_size)
|
|
200
|
+
end
|
|
201
|
+
end
|
|
202
|
+
|
|
203
|
+
class ReadBatch
|
|
204
|
+
def initialize(queue, size, wait)
|
|
205
|
+
@originals = Hash.new
|
|
206
|
+
|
|
207
|
+
# TODO: disabled for https://github.com/elastic/logstash/issues/6055 - will have to properly refactor
|
|
208
|
+
# @cancelled = Hash.new
|
|
209
|
+
|
|
210
|
+
@generated = Hash.new
|
|
211
|
+
@iterating_temp = Hash.new
|
|
212
|
+
@iterating = false # Atomic Boolean maybe? Although batches are not shared across threads
|
|
213
|
+
take_originals_from_queue(queue, size, wait) # this sets a reference to @acked_batch
|
|
214
|
+
end
|
|
215
|
+
|
|
216
|
+
def close
|
|
217
|
+
# this will ack the whole batch, regardless of whether some
|
|
218
|
+
# events were cancelled or failed
|
|
219
|
+
return if @acked_batch.nil?
|
|
220
|
+
@acked_batch.close
|
|
221
|
+
end
|
|
222
|
+
|
|
223
|
+
def merge(event)
|
|
224
|
+
return if event.nil? || @originals.key?(event)
|
|
225
|
+
# take care not to cause @generated to change during iteration
|
|
226
|
+
# @iterating_temp is merged after the iteration
|
|
227
|
+
if iterating?
|
|
228
|
+
@iterating_temp[event] = true
|
|
229
|
+
else
|
|
230
|
+
# the periodic flush could generate events outside of an each iteration
|
|
231
|
+
@generated[event] = true
|
|
232
|
+
end
|
|
233
|
+
end
|
|
234
|
+
|
|
235
|
+
def cancel(event)
|
|
236
|
+
# TODO: disabled for https://github.com/elastic/logstash/issues/6055 - will have to properly refactor
|
|
237
|
+
raise("cancel is unsupported")
|
|
238
|
+
# @cancelled[event] = true
|
|
239
|
+
end
|
|
240
|
+
|
|
241
|
+
def each(&blk)
|
|
242
|
+
# take care not to cause @originals or @generated to change during iteration
|
|
243
|
+
|
|
244
|
+
# below the checks for @cancelled.include?(e) have been replaced by e.cancelled?
|
|
245
|
+
# TODO: for https://github.com/elastic/logstash/issues/6055 = will have to properly refactor
|
|
246
|
+
@iterating = true
|
|
247
|
+
@originals.each do |e, _|
|
|
248
|
+
blk.call(e) unless e.cancelled?
|
|
249
|
+
end
|
|
250
|
+
@generated.each do |e, _|
|
|
251
|
+
blk.call(e) unless e.cancelled?
|
|
252
|
+
end
|
|
253
|
+
@iterating = false
|
|
254
|
+
update_generated
|
|
255
|
+
end
|
|
256
|
+
|
|
257
|
+
def size
|
|
258
|
+
filtered_size
|
|
259
|
+
end
|
|
260
|
+
|
|
261
|
+
def starting_size
|
|
262
|
+
@originals.size
|
|
263
|
+
end
|
|
264
|
+
|
|
265
|
+
def filtered_size
|
|
266
|
+
@originals.size + @generated.size
|
|
267
|
+
end
|
|
268
|
+
|
|
269
|
+
def cancelled_size
|
|
270
|
+
# TODO: disabled for https://github.com/elastic/logstash/issues/6055 = will have to properly refactor
|
|
271
|
+
raise("cancelled_size is unsupported ")
|
|
272
|
+
# @cancelled.size
|
|
273
|
+
end
|
|
274
|
+
|
|
275
|
+
def shutdown_signal_received?
|
|
276
|
+
false
|
|
277
|
+
end
|
|
278
|
+
|
|
279
|
+
def flush_signal_received?
|
|
280
|
+
false
|
|
281
|
+
end
|
|
282
|
+
|
|
283
|
+
private
|
|
284
|
+
|
|
285
|
+
def iterating?
|
|
286
|
+
@iterating
|
|
287
|
+
end
|
|
288
|
+
|
|
289
|
+
def update_generated
|
|
290
|
+
@generated.update(@iterating_temp)
|
|
291
|
+
@iterating_temp.clear
|
|
292
|
+
end
|
|
293
|
+
|
|
294
|
+
def take_originals_from_queue(queue, size, wait)
|
|
295
|
+
@acked_batch = queue.read_batch(size, wait)
|
|
296
|
+
return if @acked_batch.nil?
|
|
297
|
+
@acked_batch.get_elements.each do |e|
|
|
298
|
+
@originals[e] = true
|
|
299
|
+
end
|
|
300
|
+
end
|
|
301
|
+
end
|
|
302
|
+
|
|
303
|
+
class WriteClient
|
|
304
|
+
def initialize(queue)
|
|
305
|
+
@queue = queue
|
|
306
|
+
end
|
|
307
|
+
|
|
308
|
+
def get_new_batch
|
|
309
|
+
WriteBatch.new
|
|
310
|
+
end
|
|
311
|
+
|
|
312
|
+
def push(event)
|
|
313
|
+
if @queue.closed?
|
|
314
|
+
raise QueueClosedError.new("Attempted to write an event to a closed AckedQueue")
|
|
315
|
+
end
|
|
316
|
+
@queue.push(event)
|
|
317
|
+
end
|
|
318
|
+
alias_method(:<<, :push)
|
|
319
|
+
|
|
320
|
+
def push_batch(batch)
|
|
321
|
+
if @queue.closed?
|
|
322
|
+
raise QueueClosedError.new("Attempted to write a batch to a closed AckedQueue")
|
|
323
|
+
end
|
|
324
|
+
batch.each do |event|
|
|
325
|
+
push(event)
|
|
326
|
+
end
|
|
327
|
+
end
|
|
328
|
+
end
|
|
329
|
+
|
|
330
|
+
class WriteBatch
|
|
331
|
+
def initialize
|
|
332
|
+
@events = []
|
|
333
|
+
end
|
|
334
|
+
|
|
335
|
+
def push(event)
|
|
336
|
+
@events.push(event)
|
|
337
|
+
end
|
|
338
|
+
alias_method(:<<, :push)
|
|
339
|
+
|
|
340
|
+
def each(&blk)
|
|
341
|
+
@events.each do |e|
|
|
342
|
+
blk.call(e)
|
|
343
|
+
end
|
|
344
|
+
end
|
|
345
|
+
end
|
|
346
|
+
end
|
|
347
|
+
end end
|