logstash-core 5.0.0.alpha5.snapshot1-java → 5.0.0.alpha6.snapshot1-java
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of logstash-core might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/lib/logstash-core/version.rb +1 -1
- data/lib/logstash/agent.rb +1 -1
- data/lib/logstash/api/commands/default_metadata.rb +1 -1
- data/lib/logstash/api/commands/hot_threads_reporter.rb +4 -7
- data/lib/logstash/api/commands/node.rb +5 -4
- data/lib/logstash/api/commands/stats.rb +8 -3
- data/lib/logstash/api/modules/base.rb +5 -0
- data/lib/logstash/api/modules/node.rb +1 -2
- data/lib/logstash/api/modules/node_stats.rb +1 -2
- data/lib/logstash/codecs/base.rb +29 -1
- data/lib/logstash/config/mixin.rb +1 -1
- data/lib/logstash/environment.rb +5 -5
- data/lib/logstash/filter_delegator.rb +4 -5
- data/lib/logstash/instrument/periodic_poller/jvm.rb +43 -10
- data/lib/logstash/output_delegator.rb +33 -168
- data/lib/logstash/output_delegator_strategies/legacy.rb +29 -0
- data/lib/logstash/output_delegator_strategies/shared.rb +20 -0
- data/lib/logstash/output_delegator_strategies/single.rb +23 -0
- data/lib/logstash/output_delegator_strategy_registry.rb +36 -0
- data/lib/logstash/outputs/base.rb +39 -26
- data/lib/logstash/patches/clamp.rb +6 -0
- data/lib/logstash/pipeline.rb +42 -14
- data/lib/logstash/pipeline_reporter.rb +2 -8
- data/lib/logstash/plugin.rb +6 -10
- data/lib/logstash/runner.rb +12 -9
- data/lib/logstash/settings.rb +124 -21
- data/lib/logstash/util/wrapped_synchronous_queue.rb +17 -1
- data/lib/logstash/version.rb +1 -1
- data/lib/logstash/webserver.rb +44 -33
- data/locales/en.yml +5 -1
- data/logstash-core.gemspec +2 -2
- data/spec/api/lib/api/node_spec.rb +62 -10
- data/spec/api/lib/api/node_stats_spec.rb +16 -3
- data/spec/api/lib/api/support/resource_dsl_methods.rb +11 -1
- data/spec/api/spec_helper.rb +1 -1
- data/spec/conditionals_spec.rb +12 -1
- data/spec/logstash/agent_spec.rb +3 -0
- data/spec/logstash/codecs/base_spec.rb +74 -0
- data/spec/logstash/instrument/periodic_poller/jvm_spec.rb +37 -10
- data/spec/logstash/output_delegator_spec.rb +64 -89
- data/spec/logstash/outputs/base_spec.rb +91 -15
- data/spec/logstash/pipeline_reporter_spec.rb +1 -6
- data/spec/logstash/pipeline_spec.rb +20 -22
- data/spec/logstash/plugin_spec.rb +3 -3
- data/spec/logstash/runner_spec.rb +86 -3
- data/spec/logstash/settings/integer_spec.rb +20 -0
- data/spec/logstash/settings/numeric_spec.rb +28 -0
- data/spec/logstash/settings/port_range_spec.rb +93 -0
- data/spec/logstash/util/wrapped_synchronous_queue_spec.rb +6 -0
- data/spec/logstash/webserver_spec.rb +95 -0
- metadata +20 -6
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: e89e8c25486877ae001285428c6e6d9fbe3dfc6b
|
4
|
+
data.tar.gz: 5e6318f6ae9f263864588fa2361f5e9ebdc94fb4
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: af57be94f22a74d3ffbbde98eca0eb92fa503c15254b666c5fdfe518c401af10260203b520b9ce26f2b9043c93fdd2289f2af5ecb1387deaf7f65bdfa6ad1a19
|
7
|
+
data.tar.gz: 9d6a1a9a69f77fb49cb0ec3f5d2be1f8a0f2f3a460590a4722ae071cd711fb5a792baa663c233a0f06b94dbea6e7a09a3fd9c0f60ee0ac7d488097b5a96f2a52
|
data/lib/logstash/agent.rb
CHANGED
@@ -135,7 +135,7 @@ class LogStash::Agent
|
|
135
135
|
|
136
136
|
private
|
137
137
|
def start_webserver
|
138
|
-
options = {:http_host => @http_host, :
|
138
|
+
options = {:http_host => @http_host, :http_ports => @http_port, :http_environment => @http_environment }
|
139
139
|
@webserver = LogStash::WebServer.new(@logger, self, options)
|
140
140
|
Thread.new(@webserver) do |webserver|
|
141
141
|
LogStash::Util.set_thread_name("Api Webserver")
|
@@ -1,6 +1,7 @@
|
|
1
1
|
# encoding: utf-8
|
2
2
|
|
3
3
|
class HotThreadsReport
|
4
|
+
STRING_SEPARATOR_LENGTH = 80.freeze
|
4
5
|
HOT_THREADS_STACK_TRACES_SIZE_DEFAULT = 10.freeze
|
5
6
|
|
6
7
|
def initialize(cmd, options)
|
@@ -13,19 +14,16 @@ class HotThreadsReport
|
|
13
14
|
def to_s
|
14
15
|
hash = to_hash[:hot_threads]
|
15
16
|
report = "#{I18n.t("logstash.web_api.hot_threads.title", :hostname => hash[:hostname], :time => hash[:time], :top_count => @thread_dump.top_count )} \n"
|
16
|
-
report << '=' *
|
17
|
+
report << '=' * STRING_SEPARATOR_LENGTH
|
17
18
|
report << "\n"
|
18
19
|
hash[:threads].each do |thread|
|
19
|
-
thread_report = ""
|
20
|
-
thread_report = "#{I18n.t("logstash.web_api.
|
21
|
-
hot_threads.thread_title", :percent_of_cpu_time => thread[:percent_of_cpu_time], :thread_state => thread[:state], :thread_name => thread[:name])} \n"
|
22
|
-
thread_report = "#{thread[:percent_of_cpu_time]} % of of cpu usage by #{thread[:state]} thread named '#{thread[:name]}'\n"
|
20
|
+
thread_report = "#{I18n.t("logstash.web_api.hot_threads.thread_title", :percent_of_cpu_time => thread[:percent_of_cpu_time], :thread_state => thread[:state], :thread_name => thread[:name])} \n"
|
23
21
|
thread_report << "#{thread[:path]}\n" if thread[:path]
|
24
22
|
thread[:traces].each do |trace|
|
25
23
|
thread_report << "\t#{trace}\n"
|
26
24
|
end
|
27
25
|
report << thread_report
|
28
|
-
report << '-' *
|
26
|
+
report << '-' * STRING_SEPARATOR_LENGTH
|
29
27
|
report << "\n"
|
30
28
|
end
|
31
29
|
report
|
@@ -57,5 +55,4 @@ class HotThreadsReport
|
|
57
55
|
def cpu_time(hash)
|
58
56
|
hash["cpu.time"] / 1000000.0
|
59
57
|
end
|
60
|
-
|
61
58
|
end
|
@@ -20,7 +20,7 @@ module LogStash
|
|
20
20
|
def pipeline
|
21
21
|
extract_metrics(
|
22
22
|
[:stats, :pipelines, :main, :config],
|
23
|
-
:workers, :batch_size, :batch_delay
|
23
|
+
:workers, :batch_size, :batch_delay, :config_reload_automatic, :config_reload_interval
|
24
24
|
)
|
25
25
|
end
|
26
26
|
|
@@ -35,27 +35,28 @@ module LogStash
|
|
35
35
|
|
36
36
|
def jvm
|
37
37
|
memory_bean = ManagementFactory.getMemoryMXBean()
|
38
|
+
|
38
39
|
{
|
39
40
|
:pid => ManagementFactory.getRuntimeMXBean().getName().split("@").first.to_i,
|
40
41
|
:version => java.lang.System.getProperty("java.version"),
|
41
42
|
:vm_name => java.lang.System.getProperty("java.vm.name"),
|
42
43
|
:vm_version => java.lang.System.getProperty("java.version"),
|
43
44
|
:vm_vendor => java.lang.System.getProperty("java.vendor"),
|
44
|
-
:vm_name => java.lang.System.getProperty("java.vm.name"),
|
45
|
+
:vm_name => java.lang.System.getProperty("java.vm.name"),
|
45
46
|
:start_time_in_millis => started_at,
|
46
47
|
:mem => {
|
47
48
|
:heap_init_in_bytes => (memory_bean.getHeapMemoryUsage().getInit() < 0 ? 0 : memory_bean.getHeapMemoryUsage().getInit()),
|
48
49
|
:heap_max_in_bytes => (memory_bean.getHeapMemoryUsage().getMax() < 0 ? 0 : memory_bean.getHeapMemoryUsage().getMax()),
|
49
50
|
:non_heap_init_in_bytes => (memory_bean.getNonHeapMemoryUsage().getInit() < 0 ? 0 : memory_bean.getNonHeapMemoryUsage().getInit()),
|
50
51
|
:non_heap_max_in_bytes => (memory_bean.getNonHeapMemoryUsage().getMax() < 0 ? 0 : memory_bean.getNonHeapMemoryUsage().getMax())
|
51
|
-
}
|
52
|
+
},
|
53
|
+
:gc_collectors => ManagementFactory.getGarbageCollectorMXBeans().collect(&:getName)
|
52
54
|
}
|
53
55
|
end
|
54
56
|
|
55
57
|
def hot_threads(options={})
|
56
58
|
HotThreadsReport.new(self, options)
|
57
59
|
end
|
58
|
-
|
59
60
|
end
|
60
61
|
end
|
61
62
|
end
|
@@ -14,7 +14,8 @@ module LogStash
|
|
14
14
|
:count,
|
15
15
|
:peak_count
|
16
16
|
),
|
17
|
-
:mem => memory
|
17
|
+
:mem => memory,
|
18
|
+
:gc => gc
|
18
19
|
}
|
19
20
|
end
|
20
21
|
|
@@ -32,7 +33,7 @@ module LogStash
|
|
32
33
|
def events
|
33
34
|
extract_metrics(
|
34
35
|
[:stats, :events],
|
35
|
-
:in, :filtered, :out
|
36
|
+
:in, :filtered, :out, :duration_in_millis
|
36
37
|
)
|
37
38
|
end
|
38
39
|
|
@@ -59,6 +60,10 @@ module LogStash
|
|
59
60
|
}
|
60
61
|
end
|
61
62
|
|
63
|
+
def gc
|
64
|
+
service.get_shallow(:jvm, :gc)
|
65
|
+
end
|
66
|
+
|
62
67
|
def hot_threads(options={})
|
63
68
|
HotThreadsReport.new(self, options)
|
64
69
|
end
|
@@ -70,7 +75,7 @@ module LogStash
|
|
70
75
|
# Turn the `plugins` stats hash into an array of [ {}, {}, ... ]
|
71
76
|
# This is to produce an array of data points, one point for each
|
72
77
|
# plugin instance.
|
73
|
-
return [] unless stats[:plugins].include?(plugin_type)
|
78
|
+
return [] unless stats[:plugins] && stats[:plugins].include?(plugin_type)
|
74
79
|
stats[:plugins][plugin_type].collect do |id, data|
|
75
80
|
{ :id => id }.merge(data)
|
76
81
|
end
|
@@ -14,7 +14,7 @@ module LogStash
|
|
14
14
|
|
15
15
|
options = {
|
16
16
|
:ignore_idle_threads => as_boolean(ignore_idle_threads),
|
17
|
-
:human =>
|
17
|
+
:human => human?
|
18
18
|
}
|
19
19
|
options[:threads] = params["threads"].to_i if params.has_key?("threads")
|
20
20
|
|
@@ -26,7 +26,6 @@ module LogStash
|
|
26
26
|
selected_fields = extract_fields(params["filter"].to_s.strip)
|
27
27
|
respond_with node.all(selected_fields)
|
28
28
|
end
|
29
|
-
|
30
29
|
end
|
31
30
|
end
|
32
31
|
end
|
data/lib/logstash/codecs/base.rb
CHANGED
@@ -18,6 +18,7 @@ module LogStash::Codecs; class Base < LogStash::Plugin
|
|
18
18
|
super
|
19
19
|
config_init(@params)
|
20
20
|
register if respond_to?(:register)
|
21
|
+
setup_multi_encode!
|
21
22
|
end
|
22
23
|
|
23
24
|
public
|
@@ -28,10 +29,37 @@ module LogStash::Codecs; class Base < LogStash::Plugin
|
|
28
29
|
alias_method :<<, :decode
|
29
30
|
|
30
31
|
public
|
32
|
+
# DEPRECATED: Prefer defining encode_sync or multi_encode
|
31
33
|
def encode(event)
|
32
|
-
|
34
|
+
encoded = multi_encode([event])
|
35
|
+
encoded.each {|event,data| @on_event.call(event,data) }
|
33
36
|
end # def encode
|
34
37
|
|
38
|
+
public
|
39
|
+
# Relies on the codec being synchronous (which they all are!)
|
40
|
+
# We need a better long term design here, but this is an improvement
|
41
|
+
# over the current API for shared plugins
|
42
|
+
# It is best if the codec implements this directly
|
43
|
+
def multi_encode(events)
|
44
|
+
if @has_encode_sync
|
45
|
+
events.map {|event| [event, self.encode_sync(event)]}
|
46
|
+
else
|
47
|
+
batch = Thread.current[:logstash_output_codec_batch] ||= []
|
48
|
+
batch.clear
|
49
|
+
|
50
|
+
events.each {|event| self.encode(event) }
|
51
|
+
batch
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
55
|
+
def setup_multi_encode!
|
56
|
+
@has_encode_sync = self.methods.include?(:encode_sync)
|
57
|
+
|
58
|
+
on_event do |event, data|
|
59
|
+
Thread.current[:logstash_output_codec_batch] << [event, data]
|
60
|
+
end
|
61
|
+
end
|
62
|
+
|
35
63
|
public
|
36
64
|
def close; end;
|
37
65
|
|
@@ -209,7 +209,7 @@ module LogStash::Config::Mixin
|
|
209
209
|
|
210
210
|
name = name.to_s if name.is_a?(Symbol)
|
211
211
|
@config[name] = opts # ok if this is empty
|
212
|
-
|
212
|
+
|
213
213
|
if name.is_a?(String)
|
214
214
|
define_method(name) { instance_variable_get("@#{name}") }
|
215
215
|
define_method("#{name}=") { |v| instance_variable_set("@#{name}", v) }
|
data/lib/logstash/environment.rb
CHANGED
@@ -21,12 +21,12 @@ module LogStash
|
|
21
21
|
Setting::String.new("config.string", nil, false),
|
22
22
|
Setting::Boolean.new("config.test_and_exit", false),
|
23
23
|
Setting::Boolean.new("config.reload.automatic", false),
|
24
|
-
Setting::Numeric.new("config.reload.interval", 3),
|
24
|
+
Setting::Numeric.new("config.reload.interval", 3), # in seconds
|
25
25
|
Setting::Boolean.new("metric.collect", true) {|v| v == true }, # metric collection cannot be disabled
|
26
26
|
Setting::String.new("pipeline.id", "main"),
|
27
|
-
|
28
|
-
|
29
|
-
|
27
|
+
Setting::PositiveInteger.new("pipeline.workers", LogStash::Config::CpuCoreStrategy.maximum),
|
28
|
+
Setting::PositiveInteger.new("pipeline.output.workers", 1),
|
29
|
+
Setting::PositiveInteger.new("pipeline.batch.size", 125),
|
30
30
|
Setting::Numeric.new("pipeline.batch.delay", 5), # in milliseconds
|
31
31
|
Setting::Boolean.new("pipeline.unsafe_shutdown", false),
|
32
32
|
Setting.new("path.plugins", Array, []),
|
@@ -38,7 +38,7 @@ module LogStash
|
|
38
38
|
Setting::String.new("path.log", nil, false),
|
39
39
|
Setting::String.new("log.format", "plain", true, ["json", "plain"]),
|
40
40
|
Setting::String.new("http.host", "127.0.0.1"),
|
41
|
-
|
41
|
+
Setting::PortRange.new("http.port", 9600..9700),
|
42
42
|
Setting::String.new("http.environment", "production"),
|
43
43
|
].each {|setting| SETTINGS.register(setting) }
|
44
44
|
|
@@ -13,15 +13,14 @@ module LogStash
|
|
13
13
|
]
|
14
14
|
def_delegators :@filter, *DELEGATED_METHODS
|
15
15
|
|
16
|
-
def initialize(logger, klass, metric,
|
17
|
-
options = args.reduce({}, :merge)
|
18
|
-
|
16
|
+
def initialize(logger, klass, metric, plugin_args)
|
19
17
|
@logger = logger
|
20
18
|
@klass = klass
|
21
|
-
@
|
19
|
+
@id = plugin_args["id"]
|
20
|
+
@filter = klass.new(plugin_args)
|
22
21
|
|
23
22
|
# Scope the metrics to the plugin
|
24
|
-
namespaced_metric = metric.namespace(@
|
23
|
+
namespaced_metric = metric.namespace("#{@klass.config_name}_#{@id}".to_sym)
|
25
24
|
@filter.metric = namespaced_metric
|
26
25
|
|
27
26
|
@metric_events = namespaced_metric.namespace(:events)
|
@@ -1,18 +1,36 @@
|
|
1
1
|
|
2
2
|
# encoding: utf-8
|
3
3
|
require "logstash/instrument/periodic_poller/base"
|
4
|
-
require
|
4
|
+
require "jrmonitor"
|
5
|
+
require "set"
|
5
6
|
|
6
7
|
java_import 'java.lang.management.ManagementFactory'
|
7
8
|
java_import 'java.lang.management.OperatingSystemMXBean'
|
9
|
+
java_import 'java.lang.management.GarbageCollectorMXBean'
|
8
10
|
java_import 'com.sun.management.UnixOperatingSystemMXBean'
|
9
11
|
java_import 'javax.management.MBeanServer'
|
10
12
|
java_import 'javax.management.ObjectName'
|
11
13
|
java_import 'javax.management.AttributeList'
|
12
14
|
java_import 'javax.naming.directory.Attribute'
|
13
15
|
|
16
|
+
|
14
17
|
module LogStash module Instrument module PeriodicPoller
|
15
18
|
class JVM < Base
|
19
|
+
class GarbageCollectorName
|
20
|
+
YOUNG_GC_NAMES = Set.new(["Copy", "PS Scavenge", "ParNew", "G1 Young Generation"])
|
21
|
+
OLD_GC_NAMES = Set.new(["MarkSweepCompact", "PS MarkSweep", "ConcurrentMarkSweep", "G1 Old Generation"])
|
22
|
+
|
23
|
+
YOUNG = :young
|
24
|
+
OLD = :old
|
25
|
+
|
26
|
+
def self.get(gc_name)
|
27
|
+
if YOUNG_GC_NAMES.include?(gc_name)
|
28
|
+
YOUNG
|
29
|
+
elsif(OLD_GC_NAMES.include?(gc_name))
|
30
|
+
OLD
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
16
34
|
|
17
35
|
attr_reader :metric
|
18
36
|
|
@@ -22,31 +40,46 @@ module LogStash module Instrument module PeriodicPoller
|
|
22
40
|
end
|
23
41
|
|
24
42
|
def collect
|
25
|
-
raw = JRMonitor.memory.generate
|
43
|
+
raw = JRMonitor.memory.generate
|
26
44
|
collect_heap_metrics(raw)
|
27
45
|
collect_non_heap_metrics(raw)
|
28
46
|
collect_pools_metrics(raw)
|
29
47
|
collect_threads_metrics
|
30
48
|
collect_process_metrics
|
49
|
+
collect_gc_stats
|
31
50
|
end
|
32
51
|
|
33
52
|
private
|
34
53
|
|
35
|
-
def
|
54
|
+
def collect_gc_stats
|
55
|
+
garbage_collectors = ManagementFactory.getGarbageCollectorMXBeans()
|
56
|
+
|
57
|
+
garbage_collectors.each do |collector|
|
58
|
+
name = GarbageCollectorName.get(collector.getName())
|
59
|
+
if name.nil?
|
60
|
+
logger.error("Unknown garbage collector name", :name => name)
|
61
|
+
else
|
62
|
+
metric.gauge([:jvm, :gc, :collectors, name], :collection_count, collector.getCollectionCount())
|
63
|
+
metric.gauge([:jvm, :gc, :collectors, name], :collection_time_in_millis, collector.getCollectionTime())
|
64
|
+
end
|
65
|
+
end
|
66
|
+
end
|
67
|
+
|
68
|
+
def collect_threads_metrics
|
36
69
|
threads = JRMonitor.threads.generate
|
37
|
-
|
70
|
+
|
38
71
|
current = threads.count
|
39
72
|
if @peak_threads.nil? || @peak_threads < current
|
40
73
|
@peak_threads = current
|
41
|
-
end
|
42
|
-
|
43
|
-
metric.gauge([:jvm, :threads], :count, threads.count)
|
74
|
+
end
|
75
|
+
|
76
|
+
metric.gauge([:jvm, :threads], :count, threads.count)
|
44
77
|
metric.gauge([:jvm, :threads], :peak_count, @peak_threads)
|
45
78
|
end
|
46
79
|
|
47
80
|
def collect_process_metrics
|
48
81
|
process_metrics = JRMonitor.process.generate
|
49
|
-
|
82
|
+
|
50
83
|
path = [:jvm, :process]
|
51
84
|
|
52
85
|
|
@@ -91,6 +124,7 @@ module LogStash module Instrument module PeriodicPoller
|
|
91
124
|
end
|
92
125
|
end
|
93
126
|
|
127
|
+
|
94
128
|
def build_pools_metrics(data)
|
95
129
|
heap = data["heap"]
|
96
130
|
old = {}
|
@@ -129,9 +163,8 @@ module LogStash module Instrument module PeriodicPoller
|
|
129
163
|
:committed_in_bytes => 0,
|
130
164
|
:max_in_bytes => 0,
|
131
165
|
:peak_used_in_bytes => 0,
|
132
|
-
:peak_max_in_bytes
|
166
|
+
:peak_max_in_bytes => 0
|
133
167
|
}
|
134
168
|
end
|
135
|
-
|
136
169
|
end
|
137
170
|
end; end; end
|
@@ -1,192 +1,57 @@
|
|
1
|
-
|
2
|
-
|
3
|
-
|
1
|
+
require "logstash/output_delegator_strategy_registry"
|
2
|
+
|
3
|
+
require "logstash/output_delegator_strategies/shared"
|
4
|
+
require "logstash/output_delegator_strategies/single"
|
5
|
+
require "logstash/output_delegator_strategies/legacy"
|
4
6
|
|
5
|
-
# This class goes hand in hand with the pipeline to provide a pool of
|
6
|
-
# free workers to be used by pipeline worker threads. The pool is
|
7
|
-
# internally represented with a SizedQueue set the the size of the number
|
8
|
-
# of 'workers' the output plugin is configured with.
|
9
|
-
#
|
10
|
-
# This plugin also records some basic statistics
|
11
7
|
module LogStash class OutputDelegator
|
12
|
-
attr_reader :
|
8
|
+
attr_reader :metric, :metric_events, :strategy, :namespaced_metric, :metric_events , :plugin_args, :strategy_registry
|
13
9
|
|
14
|
-
|
15
|
-
# Internally these just get merged together into a single hash
|
16
|
-
def initialize(logger, klass, default_worker_count, metric, *plugin_args)
|
10
|
+
def initialize(logger, output_class, metric, strategy_registry, plugin_args)
|
17
11
|
@logger = logger
|
18
|
-
@
|
19
|
-
@
|
20
|
-
@
|
21
|
-
@
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
output = @klass.new(@config)
|
27
|
-
|
28
|
-
# Scope the metrics to the plugin
|
29
|
-
namespaced_metric = metric.namespace(output.plugin_unique_name.to_sym)
|
30
|
-
output.metric = namespaced_metric
|
31
|
-
|
32
|
-
@metric_events = namespaced_metric.namespace(:events)
|
33
|
-
namespaced_metric.gauge(:name, config_name)
|
12
|
+
@output_class = output_class
|
13
|
+
@metric = metric
|
14
|
+
@plugin_args = plugin_args
|
15
|
+
@strategy_registry = strategy_registry
|
16
|
+
raise ArgumentError, "No strategy registry specified" unless strategy_registry
|
17
|
+
raise ArgumentError, "No ID specified! Got args #{plugin_args}" unless id
|
18
|
+
|
19
|
+
build_strategy!
|
34
20
|
|
35
|
-
@
|
21
|
+
@namespaced_metric = metric.namespace(id.to_sym)
|
22
|
+
@metric_events = @namespaced_metric.namespace(:events)
|
23
|
+
@namespaced_metric.gauge(:name, id)
|
36
24
|
end
|
37
25
|
|
38
|
-
def
|
39
|
-
|
26
|
+
def config_name
|
27
|
+
@output_class.config_name
|
40
28
|
end
|
41
29
|
|
42
|
-
def
|
43
|
-
|
44
|
-
if worker_limits_overriden?
|
45
|
-
message = @klass.workers_not_supported_message
|
46
|
-
warning_meta = {:plugin => @klass.config_name, :worker_count => @config["workers"]}
|
47
|
-
if message
|
48
|
-
warning_meta[:message] = message
|
49
|
-
@logger.warn(I18n.t("logstash.pipeline.output-worker-unsupported-with-message", warning_meta))
|
50
|
-
else
|
51
|
-
@logger.warn(I18n.t("logstash.pipeline.output-worker-unsupported", warning_meta))
|
52
|
-
end
|
53
|
-
end
|
30
|
+
def concurrency
|
31
|
+
@output_class.concurrency
|
54
32
|
end
|
55
33
|
|
56
|
-
def
|
57
|
-
@
|
34
|
+
def build_strategy!
|
35
|
+
@strategy = strategy_registry.
|
36
|
+
class_for(self.concurrency).
|
37
|
+
new(@logger, @output_class, @metric, @plugin_args)
|
58
38
|
end
|
59
39
|
|
60
|
-
def
|
61
|
-
|
62
|
-
raise ArgumentError, "Attempted to detect target worker count before instantiating a worker to test for legacy workers_not_supported!" if @workers.size == 0
|
63
|
-
|
64
|
-
if @threadsafe || @klass.workers_not_supported?
|
65
|
-
1
|
66
|
-
else
|
67
|
-
@config["workers"] || @default_worker_count
|
68
|
-
end
|
69
|
-
end
|
70
|
-
|
71
|
-
def config_name
|
72
|
-
@klass.config_name
|
40
|
+
def id
|
41
|
+
@plugin_args["id"]
|
73
42
|
end
|
74
43
|
|
75
44
|
def register
|
76
|
-
|
77
|
-
@registered = true
|
78
|
-
# We define this as an array regardless of threadsafety
|
79
|
-
# to make reporting simpler, even though a threadsafe plugin will just have
|
80
|
-
# a single instance
|
81
|
-
#
|
82
|
-
# Older plugins invoke the instance method Outputs::Base#workers_not_supported
|
83
|
-
# To detect these we need an instance to be created first :()
|
84
|
-
# TODO: In the next major version after 2.x remove support for this
|
85
|
-
@workers << @klass.new(@config)
|
86
|
-
@workers.first.register # Needed in case register calls `workers_not_supported`
|
87
|
-
|
88
|
-
@logger.debug("Will start workers for output", :worker_count => target_worker_count, :class => @klass.name)
|
89
|
-
|
90
|
-
# Threadsafe versions don't need additional workers
|
91
|
-
setup_additional_workers!(target_worker_count) unless @threadsafe
|
92
|
-
# We skip the first worker because that's pre-registered to deal with legacy workers_not_supported
|
93
|
-
@workers.subList(1,@workers.size).each(&:register)
|
94
|
-
setup_multi_receive!
|
95
|
-
end
|
96
|
-
|
97
|
-
def setup_additional_workers!(target_worker_count)
|
98
|
-
warn_on_worker_override!
|
99
|
-
|
100
|
-
(target_worker_count - 1).times do
|
101
|
-
inst = @klass.new(@config)
|
102
|
-
inst.metric = @metric
|
103
|
-
@workers << inst
|
104
|
-
end
|
105
|
-
|
106
|
-
# This queue is used to manage sharing across threads
|
107
|
-
@worker_queue = SizedQueue.new(target_worker_count)
|
108
|
-
@workers.each {|w| @worker_queue << w }
|
45
|
+
@strategy.register
|
109
46
|
end
|
110
47
|
|
111
|
-
def
|
112
|
-
# One might wonder why we don't use something like
|
113
|
-
# define_singleton_method(:multi_receive, method(:threadsafe_multi_receive)
|
114
|
-
# and the answer is this is buggy on Jruby 1.7.x . It works 98% of the time!
|
115
|
-
# The other 2% you get weird errors about rebinding to the same object
|
116
|
-
# Until we switch to Jruby 9.x keep the define_singleton_method parts
|
117
|
-
# the way they are, with a block
|
118
|
-
# See https://github.com/jruby/jruby/issues/3582
|
119
|
-
if threadsafe?
|
120
|
-
@threadsafe_worker = @workers.first
|
121
|
-
define_singleton_method(:multi_receive) do |events|
|
122
|
-
threadsafe_multi_receive(events)
|
123
|
-
end
|
124
|
-
else
|
125
|
-
define_singleton_method(:multi_receive) do |events|
|
126
|
-
worker_multi_receive(events)
|
127
|
-
end
|
128
|
-
end
|
129
|
-
end
|
130
|
-
|
131
|
-
def threadsafe_multi_receive(events)
|
132
|
-
@events_received.increment(events.length)
|
48
|
+
def multi_receive(events)
|
133
49
|
@metric_events.increment(:in, events.length)
|
134
|
-
|
135
|
-
clock = @metric_events.time(:duration_in_millis)
|
136
|
-
@threadsafe_worker.multi_receive(events)
|
137
|
-
clock.stop
|
50
|
+
@strategy.multi_receive(events)
|
138
51
|
@metric_events.increment(:out, events.length)
|
139
52
|
end
|
140
53
|
|
141
|
-
def worker_multi_receive(events)
|
142
|
-
@events_received.increment(events.length)
|
143
|
-
@metric_events.increment(:in, events.length)
|
144
|
-
|
145
|
-
worker = @worker_queue.pop
|
146
|
-
begin
|
147
|
-
clock = @metric_events.time(:duration_in_millis)
|
148
|
-
worker.multi_receive(events)
|
149
|
-
clock.stop
|
150
|
-
@metric_events.increment(:out, events.length)
|
151
|
-
ensure
|
152
|
-
@worker_queue.push(worker)
|
153
|
-
end
|
154
|
-
end
|
155
|
-
|
156
54
|
def do_close
|
157
|
-
@
|
158
|
-
|
159
|
-
if @threadsafe
|
160
|
-
@workers.each(&:do_close)
|
161
|
-
else
|
162
|
-
worker_count.times do
|
163
|
-
worker = @worker_queue.pop
|
164
|
-
worker.do_close
|
165
|
-
end
|
166
|
-
end
|
55
|
+
@strategy.do_close
|
167
56
|
end
|
168
|
-
|
169
|
-
def events_received
|
170
|
-
@events_received.value
|
171
|
-
end
|
172
|
-
|
173
|
-
# There's no concept of 'busy' workers for a threadsafe plugin!
|
174
|
-
def busy_workers
|
175
|
-
if @threadsafe
|
176
|
-
0
|
177
|
-
else
|
178
|
-
# The pipeline reporter can run before the outputs are registered trying to pull a value here
|
179
|
-
# In that case @worker_queue is empty, we just return 0
|
180
|
-
return 0 unless @worker_queue
|
181
|
-
@workers.size - @worker_queue.size
|
182
|
-
end
|
183
|
-
end
|
184
|
-
|
185
|
-
def worker_count
|
186
|
-
@workers.size
|
187
|
-
end
|
188
|
-
|
189
|
-
private
|
190
|
-
# Needed for testing, so private
|
191
|
-
attr_reader :threadsafe_worker, :worker_queue
|
192
|
-
end end
|
57
|
+
end; end
|