logstash-core 5.6.16-java → 6.0.0.alpha1-java
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/gemspec_jars.rb +4 -7
- data/lib/logstash-core/logstash-core.jar +0 -0
- data/lib/logstash-core/version.rb +4 -8
- data/lib/logstash-core_jars.rb +12 -26
- data/lib/logstash/agent.rb +261 -246
- data/lib/logstash/api/commands/default_metadata.rb +1 -1
- data/lib/logstash/api/commands/hot_threads_reporter.rb +5 -11
- data/lib/logstash/api/commands/node.rb +3 -2
- data/lib/logstash/api/commands/stats.rb +3 -2
- data/lib/logstash/bootstrap_check/bad_java.rb +16 -0
- data/lib/logstash/bootstrap_check/bad_ruby.rb +12 -0
- data/lib/logstash/bootstrap_check/default_config.rb +17 -0
- data/lib/logstash/compiler.rb +38 -0
- data/lib/logstash/compiler/lscl.rb +566 -0
- data/lib/logstash/compiler/lscl/lscl_grammar.rb +3503 -0
- data/lib/logstash/compiler/treetop_monkeypatches.rb +92 -0
- data/lib/logstash/config/config_ast.rb +4 -82
- data/lib/logstash/config/mixin.rb +73 -41
- data/lib/logstash/config/pipeline_config.rb +48 -0
- data/lib/logstash/config/source/base.rb +16 -0
- data/lib/logstash/config/source/local.rb +215 -0
- data/lib/logstash/config/source_loader.rb +125 -0
- data/lib/logstash/converge_result.rb +103 -0
- data/lib/logstash/environment.rb +6 -19
- data/lib/logstash/errors.rb +2 -0
- data/lib/logstash/execution_context.rb +4 -7
- data/lib/logstash/filter_delegator.rb +6 -9
- data/lib/logstash/inputs/base.rb +0 -2
- data/lib/logstash/instrument/collector.rb +5 -7
- data/lib/logstash/instrument/metric_store.rb +12 -12
- data/lib/logstash/instrument/metric_type/mean.rb +0 -5
- data/lib/logstash/instrument/namespaced_metric.rb +0 -4
- data/lib/logstash/instrument/namespaced_null_metric.rb +0 -4
- data/lib/logstash/instrument/null_metric.rb +0 -10
- data/lib/logstash/instrument/periodic_poller/cgroup.rb +85 -168
- data/lib/logstash/instrument/periodic_poller/jvm.rb +5 -5
- data/lib/logstash/instrument/periodic_poller/pq.rb +3 -7
- data/lib/logstash/instrument/periodic_pollers.rb +1 -3
- data/lib/logstash/instrument/wrapped_write_client.rb +24 -33
- data/lib/logstash/logging/logger.rb +15 -47
- data/lib/logstash/namespace.rb +0 -1
- data/lib/logstash/output_delegator.rb +5 -7
- data/lib/logstash/outputs/base.rb +0 -2
- data/lib/logstash/pipeline.rb +159 -87
- data/lib/logstash/pipeline_action.rb +13 -0
- data/lib/logstash/pipeline_action/base.rb +29 -0
- data/lib/logstash/pipeline_action/create.rb +47 -0
- data/lib/logstash/pipeline_action/reload.rb +48 -0
- data/lib/logstash/pipeline_action/stop.rb +23 -0
- data/lib/logstash/plugin.rb +0 -1
- data/lib/logstash/plugins/hooks_registry.rb +6 -0
- data/lib/logstash/plugins/registry.rb +0 -1
- data/lib/logstash/program.rb +14 -0
- data/lib/logstash/queue_factory.rb +5 -1
- data/lib/logstash/runner.rb +58 -80
- data/lib/logstash/settings.rb +3 -27
- data/lib/logstash/state_resolver.rb +41 -0
- data/lib/logstash/util/java_version.rb +6 -0
- data/lib/logstash/util/safe_uri.rb +12 -148
- data/lib/logstash/util/thread_dump.rb +4 -7
- data/lib/logstash/util/wrapped_acked_queue.rb +36 -39
- data/lib/logstash/util/wrapped_synchronous_queue.rb +29 -39
- data/lib/logstash/version.rb +10 -8
- data/locales/en.yml +3 -54
- data/logstash-core.gemspec +8 -35
- data/spec/{logstash/api/modules → api/lib/api}/logging_spec.rb +10 -1
- data/spec/{logstash/api/modules → api/lib/api}/node_plugins_spec.rb +2 -1
- data/spec/{logstash/api/modules → api/lib/api}/node_spec.rb +3 -3
- data/spec/{logstash/api/modules → api/lib/api}/node_stats_spec.rb +3 -7
- data/spec/{logstash/api/modules → api/lib/api}/plugins_spec.rb +3 -4
- data/spec/{logstash/api/modules → api/lib/api}/root_spec.rb +2 -2
- data/spec/api/lib/api/support/resource_dsl_methods.rb +87 -0
- data/spec/{logstash/api/commands/stats_spec.rb → api/lib/commands/stats.rb} +2 -7
- data/spec/{logstash/api → api/lib}/errors_spec.rb +1 -1
- data/spec/{logstash/api → api/lib}/rack_app_spec.rb +0 -0
- data/spec/api/spec_helper.rb +106 -0
- data/spec/logstash/agent/converge_spec.rb +286 -0
- data/spec/logstash/agent/metrics_spec.rb +244 -0
- data/spec/logstash/agent_spec.rb +213 -225
- data/spec/logstash/compiler/compiler_spec.rb +584 -0
- data/spec/logstash/config/config_ast_spec.rb +8 -47
- data/spec/logstash/config/mixin_spec.rb +2 -42
- data/spec/logstash/config/pipeline_config_spec.rb +75 -0
- data/spec/logstash/config/source/local_spec.rb +395 -0
- data/spec/logstash/config/source_loader_spec.rb +122 -0
- data/spec/logstash/converge_result_spec.rb +179 -0
- data/spec/logstash/event_spec.rb +0 -66
- data/spec/logstash/execution_context_spec.rb +8 -12
- data/spec/logstash/filter_delegator_spec.rb +12 -24
- data/spec/logstash/inputs/base_spec.rb +7 -5
- data/spec/logstash/instrument/periodic_poller/cgroup_spec.rb +92 -225
- data/spec/logstash/instrument/periodic_poller/jvm_spec.rb +1 -1
- data/spec/logstash/instrument/periodic_poller/os_spec.rb +32 -29
- data/spec/logstash/instrument/wrapped_write_client_spec.rb +33 -33
- data/spec/logstash/legacy_ruby_event_spec.rb +13 -4
- data/spec/logstash/output_delegator_spec.rb +11 -20
- data/spec/logstash/outputs/base_spec.rb +7 -5
- data/spec/logstash/pipeline_action/create_spec.rb +83 -0
- data/spec/logstash/pipeline_action/reload_spec.rb +83 -0
- data/spec/logstash/pipeline_action/stop_spec.rb +37 -0
- data/spec/logstash/pipeline_pq_file_spec.rb +1 -1
- data/spec/logstash/pipeline_spec.rb +81 -137
- data/spec/logstash/plugin_spec.rb +2 -1
- data/spec/logstash/plugins/hooks_registry_spec.rb +6 -0
- data/spec/logstash/queue_factory_spec.rb +13 -1
- data/spec/logstash/runner_spec.rb +29 -140
- data/spec/logstash/settings/writable_directory_spec.rb +10 -13
- data/spec/logstash/settings_spec.rb +0 -91
- data/spec/logstash/state_resolver_spec.rb +156 -0
- data/spec/logstash/timestamp_spec.rb +2 -6
- data/spec/logstash/util/java_version_spec.rb +22 -0
- data/spec/logstash/util/safe_uri_spec.rb +0 -56
- data/spec/logstash/util/wrapped_synchronous_queue_spec.rb +22 -0
- data/spec/support/helpers.rb +9 -11
- data/spec/support/matchers.rb +96 -6
- data/spec/support/mocks_classes.rb +80 -0
- data/spec/support/shared_contexts.rb +2 -27
- metadata +100 -149
- data/lib/logstash/config/loader.rb +0 -107
- data/lib/logstash/config/modules_common.rb +0 -103
- data/lib/logstash/config/source/modules.rb +0 -55
- data/lib/logstash/config/string_escape.rb +0 -27
- data/lib/logstash/dependency_report.rb +0 -131
- data/lib/logstash/dependency_report_runner.rb +0 -17
- data/lib/logstash/elasticsearch_client.rb +0 -142
- data/lib/logstash/instrument/global_metrics.rb +0 -13
- data/lib/logstash/instrument/periodic_poller/dlq.rb +0 -24
- data/lib/logstash/modules/cli_parser.rb +0 -74
- data/lib/logstash/modules/elasticsearch_config.rb +0 -22
- data/lib/logstash/modules/elasticsearch_importer.rb +0 -37
- data/lib/logstash/modules/elasticsearch_resource.rb +0 -10
- data/lib/logstash/modules/file_reader.rb +0 -36
- data/lib/logstash/modules/kibana_base.rb +0 -24
- data/lib/logstash/modules/kibana_client.rb +0 -124
- data/lib/logstash/modules/kibana_config.rb +0 -105
- data/lib/logstash/modules/kibana_dashboards.rb +0 -36
- data/lib/logstash/modules/kibana_importer.rb +0 -17
- data/lib/logstash/modules/kibana_resource.rb +0 -10
- data/lib/logstash/modules/kibana_settings.rb +0 -40
- data/lib/logstash/modules/logstash_config.rb +0 -120
- data/lib/logstash/modules/resource_base.rb +0 -38
- data/lib/logstash/modules/scaffold.rb +0 -52
- data/lib/logstash/modules/settings_merger.rb +0 -23
- data/lib/logstash/modules/util.rb +0 -17
- data/lib/logstash/util/dead_letter_queue_manager.rb +0 -61
- data/lib/logstash/util/environment_variables.rb +0 -43
- data/spec/logstash/config/loader_spec.rb +0 -38
- data/spec/logstash/config/string_escape_spec.rb +0 -24
- data/spec/logstash/instrument/periodic_poller/dlq_spec.rb +0 -17
- data/spec/logstash/modules/logstash_config_spec.rb +0 -56
- data/spec/logstash/modules/scaffold_spec.rb +0 -234
- data/spec/logstash/pipeline_dlq_commit_spec.rb +0 -109
- data/spec/logstash/settings/splittable_string_array_spec.rb +0 -51
- data/spec/logstash/util/wrapped_acked_queue_spec.rb +0 -49
- data/versions-gem-copy.yml +0 -12
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 81ae0f7af1f442de24af8f661aaaeda947a52536
|
4
|
+
data.tar.gz: 87404b518150d21bd9f20605b44ee0008a54efd0
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: f7091e24dce522fe665b44ec723751298635cb4dface96907ac0dd0c840284ae0fcd96cd837eb0aa4193cf5a0a3630b419f81165aa45dd5b5d83abdf58921cba
|
7
|
+
data.tar.gz: 05d429649b6302583ac6017d49e614ad75c98b62191e145ba1e71deb97d8168513fb787b52ea4f6bfdcbbaab6fc0ed528704e94661c2c454244bd9d022ff1636
|
data/gemspec_jars.rb
CHANGED
@@ -2,12 +2,9 @@
|
|
2
2
|
# runtime dependencies to generate this gemspec dependencies file to be eval'ed by the gemspec
|
3
3
|
# for the jar-dependencies requirements.
|
4
4
|
|
5
|
-
gem.requirements << "jar org.apache.logging.log4j:log4j-slf4j-impl, 2.6.2"
|
6
5
|
gem.requirements << "jar org.apache.logging.log4j:log4j-api, 2.6.2"
|
7
6
|
gem.requirements << "jar org.apache.logging.log4j:log4j-core, 2.6.2"
|
8
|
-
gem.requirements << "jar com.fasterxml.jackson.core:jackson-core, 2.
|
9
|
-
gem.requirements << "jar com.fasterxml.jackson.core:jackson-databind, 2.
|
10
|
-
gem.requirements << "jar com.fasterxml.jackson.
|
11
|
-
gem.requirements << "jar com.fasterxml.jackson.
|
12
|
-
gem.requirements << "jar com.fasterxml.jackson.dataformat:jackson-dataformat-cbor, 2.9.5"
|
13
|
-
gem.requirements << "jar com.google.guava:guava, 22.0"
|
7
|
+
gem.requirements << "jar com.fasterxml.jackson.core:jackson-core, 2.7.4"
|
8
|
+
gem.requirements << "jar com.fasterxml.jackson.core:jackson-databind, 2.7.4"
|
9
|
+
gem.requirements << "jar com.fasterxml.jackson.module:jackson-module-afterburner, 2.7.4"
|
10
|
+
gem.requirements << "jar com.fasterxml.jackson.dataformat:jackson-dataformat-cbor, 2.7.4"
|
Binary file
|
@@ -2,11 +2,7 @@
|
|
2
2
|
|
3
3
|
# The version of logstash core gem.
|
4
4
|
#
|
5
|
-
#
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
end
|
10
|
-
if !defined?(LOGSTASH_CORE_VERSION)
|
11
|
-
LOGSTASH_CORE_VERSION = ALL_VERSIONS.fetch("logstash-core")
|
12
|
-
end
|
5
|
+
# Note to authors: this should not include dashes because 'gem' barfs if
|
6
|
+
# you include a dash in the version string.
|
7
|
+
|
8
|
+
LOGSTASH_CORE_VERSION = "6.0.0-alpha1"
|
data/lib/logstash-core_jars.rb
CHANGED
@@ -2,35 +2,21 @@
|
|
2
2
|
begin
|
3
3
|
require 'jar_dependencies'
|
4
4
|
rescue LoadError
|
5
|
-
require 'com/fasterxml/jackson/core/jackson-annotations/2.9.5/jackson-annotations-2.9.5.jar'
|
6
5
|
require 'org/apache/logging/log4j/log4j-core/2.6.2/log4j-core-2.6.2.jar'
|
7
|
-
require 'com/
|
8
|
-
require 'com/fasterxml/jackson/core/jackson-core/2.9.5/jackson-core-2.9.5.jar'
|
9
|
-
require 'org/slf4j/slf4j-api/1.7.21/slf4j-api-1.7.21.jar'
|
10
|
-
require 'com/google/code/findbugs/jsr305/1.3.9/jsr305-1.3.9.jar'
|
11
|
-
require 'com/fasterxml/jackson/dataformat/jackson-dataformat-cbor/2.9.5/jackson-dataformat-cbor-2.9.5.jar'
|
12
|
-
require 'com/google/j2objc/j2objc-annotations/1.1/j2objc-annotations-1.1.jar'
|
13
|
-
require 'org/codehaus/mojo/animal-sniffer-annotations/1.14/animal-sniffer-annotations-1.14.jar'
|
6
|
+
require 'com/fasterxml/jackson/module/jackson-module-afterburner/2.7.4/jackson-module-afterburner-2.7.4.jar'
|
14
7
|
require 'org/apache/logging/log4j/log4j-api/2.6.2/log4j-api-2.6.2.jar'
|
15
|
-
require '
|
16
|
-
require 'com/
|
17
|
-
require 'com/fasterxml/jackson/
|
18
|
-
require 'com/fasterxml/jackson/
|
8
|
+
require 'com/fasterxml/jackson/core/jackson-core/2.7.4/jackson-core-2.7.4.jar'
|
9
|
+
require 'com/fasterxml/jackson/core/jackson-annotations/2.7.0/jackson-annotations-2.7.0.jar'
|
10
|
+
require 'com/fasterxml/jackson/dataformat/jackson-dataformat-cbor/2.7.4/jackson-dataformat-cbor-2.7.4.jar'
|
11
|
+
require 'com/fasterxml/jackson/core/jackson-databind/2.7.4/jackson-databind-2.7.4.jar'
|
19
12
|
end
|
20
13
|
|
21
14
|
if defined? Jars
|
22
|
-
require_jar '
|
23
|
-
require_jar '
|
24
|
-
require_jar '
|
25
|
-
require_jar 'com.fasterxml.jackson.core', 'jackson-core', '2.
|
26
|
-
require_jar '
|
27
|
-
require_jar 'com.
|
28
|
-
require_jar 'com.fasterxml.jackson.
|
29
|
-
require_jar 'com.google.j2objc', 'j2objc-annotations', '1.1'
|
30
|
-
require_jar 'org.codehaus.mojo', 'animal-sniffer-annotations', '1.14'
|
31
|
-
require_jar 'org.apache.logging.log4j', 'log4j-api', '2.6.2'
|
32
|
-
require_jar 'org.apache.logging.log4j', 'log4j-slf4j-impl', '2.6.2'
|
33
|
-
require_jar 'com.google.errorprone', 'error_prone_annotations', '2.0.18'
|
34
|
-
require_jar 'com.fasterxml.jackson.core', 'jackson-databind', '2.9.5'
|
35
|
-
require_jar 'com.fasterxml.jackson.module', 'jackson-module-afterburner', '2.9.5'
|
15
|
+
require_jar( 'org.apache.logging.log4j', 'log4j-core', '2.6.2' )
|
16
|
+
require_jar( 'com.fasterxml.jackson.module', 'jackson-module-afterburner', '2.7.4' )
|
17
|
+
require_jar( 'org.apache.logging.log4j', 'log4j-api', '2.6.2' )
|
18
|
+
require_jar( 'com.fasterxml.jackson.core', 'jackson-core', '2.7.4' )
|
19
|
+
require_jar( 'com.fasterxml.jackson.core', 'jackson-annotations', '2.7.0' )
|
20
|
+
require_jar( 'com.fasterxml.jackson.dataformat', 'jackson-dataformat-cbor', '2.7.4' )
|
21
|
+
require_jar( 'com.fasterxml.jackson.core', 'jackson-databind', '2.7.4' )
|
36
22
|
end
|
data/lib/logstash/agent.rb
CHANGED
@@ -10,8 +10,11 @@ require "logstash/instrument/metric"
|
|
10
10
|
require "logstash/pipeline"
|
11
11
|
require "logstash/webserver"
|
12
12
|
require "logstash/event_dispatcher"
|
13
|
+
require "logstash/config/source_loader"
|
14
|
+
require "logstash/pipeline_action"
|
15
|
+
require "logstash/converge_result"
|
16
|
+
require "logstash/state_resolver"
|
13
17
|
require "stud/trap"
|
14
|
-
require "logstash/config/loader"
|
15
18
|
require "uri"
|
16
19
|
require "socket"
|
17
20
|
require "securerandom"
|
@@ -30,7 +33,7 @@ class LogStash::Agent
|
|
30
33
|
# :name [String] - identifier for the agent
|
31
34
|
# :auto_reload [Boolean] - enable reloading of pipelines
|
32
35
|
# :reload_interval [Integer] - reload pipelines every X seconds
|
33
|
-
def initialize(settings = LogStash::SETTINGS)
|
36
|
+
def initialize(settings = LogStash::SETTINGS, source_loader = nil)
|
34
37
|
@logger = self.class.logger
|
35
38
|
@settings = settings
|
36
39
|
@auto_reload = setting("config.reload.automatic")
|
@@ -43,37 +46,63 @@ class LogStash::Agent
|
|
43
46
|
# Generate / load the persistent uuid
|
44
47
|
id
|
45
48
|
|
46
|
-
|
49
|
+
# This is for backward compatibility in the tests
|
50
|
+
if source_loader.nil?
|
51
|
+
@source_loader = LogStash::Config::SOURCE_LOADER
|
52
|
+
@source_loader.add_source(LogStash::Config::Source::Local.new(@settings))
|
53
|
+
else
|
54
|
+
@source_loader = source_loader
|
55
|
+
end
|
56
|
+
|
47
57
|
@reload_interval = setting("config.reload.interval")
|
48
|
-
@
|
58
|
+
@pipelines_mutex = Mutex.new
|
49
59
|
|
50
60
|
@collect_metric = setting("metric.collect")
|
51
61
|
|
52
62
|
# Create the collectors and configured it with the library
|
53
63
|
configure_metrics_collectors
|
54
64
|
|
65
|
+
@state_resolver = LogStash::StateResolver.new(metric)
|
66
|
+
|
55
67
|
@pipeline_reload_metric = metric.namespace([:stats, :pipelines])
|
56
68
|
@instance_reload_metric = metric.namespace([:stats, :reloads])
|
69
|
+
initialize_agent_metrics
|
57
70
|
|
58
71
|
@dispatcher = LogStash::EventDispatcher.new(self)
|
59
72
|
LogStash::PLUGIN_REGISTRY.hooks.register_emitter(self.class, dispatcher)
|
60
73
|
dispatcher.fire(:after_initialize)
|
74
|
+
|
75
|
+
@running = Concurrent::AtomicBoolean.new(false)
|
61
76
|
end
|
62
77
|
|
63
78
|
def execute
|
64
79
|
@thread = Thread.current # this var is implicitly used by Stud.stop?
|
65
|
-
|
80
|
+
logger.debug("starting agent")
|
66
81
|
|
67
|
-
start_pipelines
|
68
82
|
start_webserver
|
69
83
|
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
if
|
75
|
-
|
84
|
+
transition_to_running
|
85
|
+
|
86
|
+
converge_state_and_update
|
87
|
+
|
88
|
+
if auto_reload?
|
89
|
+
# `sleep_then_run` instead of firing the interval right away
|
90
|
+
Stud.interval(@reload_interval, :sleep_then_run => true) do
|
91
|
+
# TODO(ph) OK, in reality, we should get out of the loop, but I am
|
92
|
+
# worried about the implication of that change so instead when we are stopped
|
93
|
+
# we don't converge.
|
94
|
+
#
|
95
|
+
# Logstash currently expect to be block here, the signal will force a kill on the agent making
|
96
|
+
# the agent thread unblock
|
97
|
+
#
|
98
|
+
# Actually what we really need is one more state:
|
99
|
+
#
|
100
|
+
# init => running => stopping => stopped
|
101
|
+
converge_state_and_update unless stopped?
|
102
|
+
end
|
76
103
|
else
|
104
|
+
return 1 if clean_state?
|
105
|
+
|
77
106
|
while !Stud.stop?
|
78
107
|
if clean_state? || running_user_defined_pipelines?
|
79
108
|
sleep(0.5)
|
@@ -82,44 +111,52 @@ class LogStash::Agent
|
|
82
111
|
end
|
83
112
|
end
|
84
113
|
end
|
114
|
+
|
115
|
+
return 0
|
116
|
+
ensure
|
117
|
+
transition_to_stopped
|
85
118
|
end
|
86
119
|
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
def
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
@pipelines.each do |pipeline_id, pipeline|
|
109
|
-
next if pipeline.settings.get("config.reload.automatic") == false && force == false
|
110
|
-
begin
|
111
|
-
reload_pipeline!(pipeline_id, force)
|
112
|
-
rescue => e
|
113
|
-
@instance_reload_metric.increment(:failures)
|
114
|
-
@pipeline_reload_metric.namespace([pipeline_id.to_sym, :reloads]).tap do |n|
|
115
|
-
n.increment(:failures)
|
116
|
-
n.gauge(:last_error, { :message => e.message, :backtrace => e.backtrace})
|
117
|
-
n.gauge(:last_failure_timestamp, LogStash::Timestamp.now)
|
118
|
-
end
|
119
|
-
@logger.error(I18n.t("oops"), :message => e.message, :class => e.class.name, :backtrace => e.backtrace)
|
120
|
-
end
|
120
|
+
def auto_reload?
|
121
|
+
@auto_reload
|
122
|
+
end
|
123
|
+
|
124
|
+
def running?
|
125
|
+
@running.value
|
126
|
+
end
|
127
|
+
|
128
|
+
def stopped?
|
129
|
+
!@running.value
|
130
|
+
end
|
131
|
+
|
132
|
+
def converge_state_and_update
|
133
|
+
results = @source_loader.fetch
|
134
|
+
|
135
|
+
unless results.success?
|
136
|
+
if auto_reload?
|
137
|
+
logger.debug("Could not fetch the configuration to converge, will retry", :message => results.error, :retrying_in => @reload_interval)
|
138
|
+
return
|
139
|
+
else
|
140
|
+
raise "Could not fetch the configuration, message: #{results.error}"
|
121
141
|
end
|
122
142
|
end
|
143
|
+
|
144
|
+
# We Lock any access on the pipelines, since the actions will modify the
|
145
|
+
# content of it.
|
146
|
+
converge_result = nil
|
147
|
+
|
148
|
+
@pipelines_mutex.synchronize do
|
149
|
+
pipeline_actions = resolve_actions(results.response)
|
150
|
+
converge_result = converge_state(pipeline_actions)
|
151
|
+
end
|
152
|
+
|
153
|
+
report_currently_running_pipelines(converge_result)
|
154
|
+
update_metrics(converge_result)
|
155
|
+
dispatch_events(converge_result)
|
156
|
+
|
157
|
+
converge_result
|
158
|
+
rescue => e
|
159
|
+
logger.error("An exception happened when converging configuration", :exception => e.class, :message => e.message, :backtrace => e.backtrace)
|
123
160
|
end
|
124
161
|
|
125
162
|
# Calculate the Logstash uptime in milliseconds
|
@@ -129,14 +166,19 @@ class LogStash::Agent
|
|
129
166
|
((Time.now.to_f - STARTED_AT.to_f) * 1000.0).to_i
|
130
167
|
end
|
131
168
|
|
132
|
-
def
|
133
|
-
|
169
|
+
def shutdown
|
170
|
+
stop_collecting_metrics
|
171
|
+
stop_webserver
|
172
|
+
transition_to_stopped
|
173
|
+
converge_result = shutdown_pipelines
|
174
|
+
converge_result
|
134
175
|
end
|
135
176
|
|
136
|
-
def
|
177
|
+
def force_shutdown!
|
137
178
|
stop_collecting_metrics
|
138
179
|
stop_webserver
|
139
|
-
|
180
|
+
transition_to_stopped
|
181
|
+
force_shutdown_pipelines!
|
140
182
|
end
|
141
183
|
|
142
184
|
def id
|
@@ -177,35 +219,40 @@ class LogStash::Agent
|
|
177
219
|
@id_path ||= ::File.join(settings.get("path.data"), "uuid")
|
178
220
|
end
|
179
221
|
|
222
|
+
def get_pipeline(pipeline_id)
|
223
|
+
@pipelines_mutex.synchronize do
|
224
|
+
@pipelines[pipeline_id]
|
225
|
+
end
|
226
|
+
end
|
227
|
+
|
228
|
+
def pipelines_count
|
229
|
+
@pipelines_mutex.synchronize do
|
230
|
+
pipelines.size
|
231
|
+
end
|
232
|
+
end
|
233
|
+
|
180
234
|
def running_pipelines
|
181
|
-
@
|
235
|
+
@pipelines_mutex.synchronize do
|
182
236
|
@pipelines.select {|pipeline_id, _| running_pipeline?(pipeline_id) }
|
183
237
|
end
|
184
238
|
end
|
185
239
|
|
186
240
|
def running_pipelines?
|
187
|
-
@
|
241
|
+
@pipelines_mutex.synchronize do
|
188
242
|
@pipelines.select {|pipeline_id, _| running_pipeline?(pipeline_id) }.any?
|
189
243
|
end
|
190
244
|
end
|
191
245
|
|
192
246
|
def running_user_defined_pipelines?
|
193
|
-
|
194
|
-
@pipelines.select do |pipeline_id, _|
|
195
|
-
pipeline = @pipelines[pipeline_id]
|
196
|
-
pipeline.running? && !pipeline.system?
|
197
|
-
end.any?
|
198
|
-
end
|
247
|
+
running_user_defined_pipelines.any?
|
199
248
|
end
|
200
249
|
|
201
|
-
def
|
202
|
-
|
203
|
-
@pipelines.select do |
|
204
|
-
pipeline = @pipelines[pipeline_id]
|
250
|
+
def running_user_defined_pipelines
|
251
|
+
@pipelines_mutex.synchronize do
|
252
|
+
@pipelines.select do |_, pipeline|
|
205
253
|
pipeline.running? && !pipeline.system?
|
206
254
|
end
|
207
255
|
end
|
208
|
-
found
|
209
256
|
end
|
210
257
|
|
211
258
|
def close_pipeline(id)
|
@@ -223,6 +270,93 @@ class LogStash::Agent
|
|
223
270
|
end
|
224
271
|
|
225
272
|
private
|
273
|
+
def transition_to_stopped
|
274
|
+
@running.make_false
|
275
|
+
end
|
276
|
+
|
277
|
+
def transition_to_running
|
278
|
+
@running.make_true
|
279
|
+
end
|
280
|
+
|
281
|
+
# We depends on a series of task derived from the internal state and what
|
282
|
+
# need to be run, theses actions are applied to the current pipelines to converge to
|
283
|
+
# the desired state.
|
284
|
+
#
|
285
|
+
# The current actions are simple and favor composition, allowing us to experiment with different
|
286
|
+
# way to making them and also test them in isolation with the current running agent.
|
287
|
+
#
|
288
|
+
# Currently only action related to pipeline exist, but nothing prevent us to use the same logic
|
289
|
+
# for other tasks.
|
290
|
+
#
|
291
|
+
def converge_state(pipeline_actions)
|
292
|
+
logger.debug("Converging pipelines")
|
293
|
+
|
294
|
+
converge_result = LogStash::ConvergeResult.new(pipeline_actions.size)
|
295
|
+
|
296
|
+
logger.debug("Needed actions to converge", :actions_count => pipeline_actions.size) unless pipeline_actions.empty?
|
297
|
+
|
298
|
+
pipeline_actions.each do |action|
|
299
|
+
# We execute every task we need to converge the current state of pipelines
|
300
|
+
# for every task we will record the action result, that will help us
|
301
|
+
# the results of all the task will determine if the converge was successful or not
|
302
|
+
#
|
303
|
+
# The ConvergeResult#add, will accept the following values
|
304
|
+
# - boolean
|
305
|
+
# - FailedAction
|
306
|
+
# - SuccessfulAction
|
307
|
+
# - Exception
|
308
|
+
#
|
309
|
+
# This give us a bit more extensibility with the current startup/validation model
|
310
|
+
# that we currently have.
|
311
|
+
begin
|
312
|
+
logger.debug("Executing action", :action => action)
|
313
|
+
action_result = action.execute(self, @pipelines)
|
314
|
+
converge_result.add(action, action_result)
|
315
|
+
|
316
|
+
unless action_result.successful?
|
317
|
+
logger.error("Failed to execute action", :id => action.pipeline_id,
|
318
|
+
:action_type => action_result.class, :message => action_result.message)
|
319
|
+
end
|
320
|
+
rescue SystemExit => e
|
321
|
+
converge_result.add(action, e)
|
322
|
+
rescue Exception => e
|
323
|
+
logger.error("Failed to execute action", :action => action, :exception => e.class.name, :message => e.message)
|
324
|
+
converge_result.add(action, e)
|
325
|
+
end
|
326
|
+
end
|
327
|
+
|
328
|
+
if logger.trace?
|
329
|
+
logger.trace("Converge results", :success => converge_result.success?,
|
330
|
+
:failed_actions => converge_result.failed_actions.collect { |a, r| "id: #{a.pipeline_id}, action_type: #{a.class}, message: #{r.message}" },
|
331
|
+
:successful_actions => converge_result.successful_actions.collect { |a, r| "id: #{a.pipeline_id}, action_type: #{a.class}" })
|
332
|
+
end
|
333
|
+
|
334
|
+
converge_result
|
335
|
+
end
|
336
|
+
|
337
|
+
def resolve_actions(pipeline_configs)
|
338
|
+
@state_resolver.resolve(@pipelines, pipeline_configs)
|
339
|
+
end
|
340
|
+
|
341
|
+
def report_currently_running_pipelines(converge_result)
|
342
|
+
if converge_result.success? && converge_result.total > 0
|
343
|
+
number_of_running_pipeline = running_pipelines.size
|
344
|
+
logger.info("Pipelines running", :count => number_of_running_pipeline, :pipelines => running_pipelines.values.collect(&:pipeline_id) )
|
345
|
+
end
|
346
|
+
end
|
347
|
+
|
348
|
+
def dispatch_events(converge_results)
|
349
|
+
converge_results.successful_actions.each do |action, _|
|
350
|
+
case action
|
351
|
+
when LogStash::PipelineAction::Create
|
352
|
+
dispatcher.fire(:pipeline_started, get_pipeline(action.pipeline_id))
|
353
|
+
when LogStash::PipelineAction::Reload
|
354
|
+
dispatcher.fire(:pipeline_stopped, get_pipeline(action.pipeline_id))
|
355
|
+
when LogStash::PipelineAction::Stop
|
356
|
+
dispatcher.fire(:pipeline_started, get_pipeline(action.pipeline_id))
|
357
|
+
end
|
358
|
+
end
|
359
|
+
end
|
226
360
|
|
227
361
|
def start_webserver
|
228
362
|
options = {:http_host => @http_host, :http_ports => @http_port, :http_environment => @http_environment }
|
@@ -251,222 +385,94 @@ class LogStash::Agent
|
|
251
385
|
@periodic_pollers.start
|
252
386
|
end
|
253
387
|
|
254
|
-
def
|
255
|
-
|
256
|
-
# these include metrics about the plugins and number of processed events
|
257
|
-
# we want to keep other metrics like reload counts and error messages
|
258
|
-
@collector.clear("stats/pipelines/#{id}/plugins")
|
259
|
-
@collector.clear("stats/pipelines/#{id}/events")
|
388
|
+
def stop_collecting_metrics
|
389
|
+
@periodic_pollers.stop
|
260
390
|
end
|
261
391
|
|
262
392
|
def collect_metrics?
|
263
393
|
@collect_metric
|
264
394
|
end
|
265
395
|
|
266
|
-
def
|
267
|
-
@
|
268
|
-
|
269
|
-
|
270
|
-
n.gauge(:last_error, { :message => message, :backtrace =>backtrace})
|
271
|
-
n.gauge(:last_failure_timestamp, LogStash::Timestamp.now)
|
272
|
-
end
|
273
|
-
if @logger.debug?
|
274
|
-
@logger.error("Cannot create pipeline", :reason => message, :backtrace => backtrace)
|
275
|
-
else
|
276
|
-
@logger.error("Cannot create pipeline", :reason => message)
|
396
|
+
def force_shutdown_pipelines!
|
397
|
+
@pipelines.each do |_, pipeline|
|
398
|
+
# TODO(ph): should it be his own action?
|
399
|
+
pipeline.force_shutdown!
|
277
400
|
end
|
278
401
|
end
|
279
402
|
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
|
289
|
-
rescue => e
|
290
|
-
@logger.error("failed to fetch pipeline configuration", :message => e.message)
|
291
|
-
return nil
|
292
|
-
end
|
293
|
-
end
|
294
|
-
|
295
|
-
begin
|
296
|
-
LogStash::Pipeline.new(config, settings, metric)
|
297
|
-
rescue => e
|
298
|
-
increment_reload_failures_metrics(settings.get("pipeline.id"), e.message, e.backtrace)
|
299
|
-
return nil
|
403
|
+
def shutdown_pipelines
|
404
|
+
logger.debug("Shutting down all pipelines", :pipelines_count => pipelines_count)
|
405
|
+
|
406
|
+
# In this context I could just call shutdown, but I've decided to
|
407
|
+
# use the stop action implementation for that so we have the same code.
|
408
|
+
# This also give us some context into why a shutdown is failing
|
409
|
+
@pipelines_mutex.synchronize do
|
410
|
+
pipeline_actions = resolve_actions([]) # We stop all the pipeline, so we converge to a empty state
|
411
|
+
converge_state(pipeline_actions)
|
300
412
|
end
|
301
413
|
end
|
302
414
|
|
303
|
-
def
|
304
|
-
@
|
415
|
+
def running_pipeline?(pipeline_id)
|
416
|
+
thread = @pipelines[pipeline_id].thread
|
417
|
+
thread.is_a?(Thread) && thread.alive?
|
305
418
|
end
|
306
419
|
|
307
|
-
|
308
|
-
|
309
|
-
# @param id [String] the pipeline id to reload
|
310
|
-
def reload_pipeline!(id, force=false)
|
311
|
-
old_pipeline = @pipelines[id]
|
312
|
-
new_config = fetch_config(old_pipeline.settings)
|
313
|
-
|
314
|
-
if old_pipeline.config_str == new_config && force == false
|
315
|
-
@logger.debug("no configuration change for pipeline", :pipeline => id)
|
316
|
-
return
|
317
|
-
end
|
318
|
-
|
319
|
-
# check if this pipeline is not reloadable. it should not happen as per the check below
|
320
|
-
# but keep it here as a safety net if a reloadable pipeline was reloaded with a non reloadable pipeline
|
321
|
-
if !old_pipeline.reloadable?
|
322
|
-
@logger.error("pipeline is not reloadable", :pipeline => id)
|
323
|
-
return
|
324
|
-
end
|
325
|
-
|
326
|
-
# BasePipeline#initialize will compile the config, and load all plugins and raise an exception
|
327
|
-
# on an invalid configuration
|
328
|
-
begin
|
329
|
-
pipeline_validator = LogStash::BasePipeline.new(new_config, old_pipeline.settings)
|
330
|
-
rescue => e
|
331
|
-
increment_reload_failures_metrics(id, e.message, e.backtrace)
|
332
|
-
return
|
333
|
-
end
|
334
|
-
|
335
|
-
# check if the new pipeline will be reloadable in which case we want to log that as an error and abort
|
336
|
-
if !pipeline_validator.reloadable?
|
337
|
-
@logger.error(I18n.t("logstash.agent.non_reloadable_config_reload"), :pipeline_id => id, :plugins => pipeline_validator.non_reloadable_plugins.map(&:class))
|
338
|
-
increment_reload_failures_metrics(id, "non reloadable pipeline")
|
339
|
-
return
|
340
|
-
end
|
341
|
-
|
342
|
-
# we know configis valid so we are fairly comfortable to first stop old pipeline and then start new one
|
343
|
-
upgrade_pipeline(id, old_pipeline.settings, new_config)
|
420
|
+
def clean_state?
|
421
|
+
@pipelines.empty?
|
344
422
|
end
|
345
423
|
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
# @params settings [Settings] the settings for the new pipeline
|
350
|
-
# @params new_config [String] the new pipeline config
|
351
|
-
def upgrade_pipeline(pipeline_id, settings, new_config)
|
352
|
-
@logger.warn("fetched new config for pipeline. upgrading..", :pipeline => pipeline_id, :config => new_config)
|
353
|
-
|
354
|
-
# first step: stop the old pipeline.
|
355
|
-
# IMPORTANT: a new pipeline with same settings should not be instantiated before the previous one is shutdown
|
356
|
-
|
357
|
-
stop_pipeline(pipeline_id)
|
358
|
-
reset_pipeline_metrics(pipeline_id)
|
359
|
-
|
360
|
-
# second step create and start a new pipeline now that the old one is shutdown
|
424
|
+
def setting(key)
|
425
|
+
@settings.get(key)
|
426
|
+
end
|
361
427
|
|
362
|
-
|
363
|
-
|
364
|
-
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
|
428
|
+
# Methods related to the creation of all metrics
|
429
|
+
# related to states changes and failures
|
430
|
+
#
|
431
|
+
# I think we could use an observer here to decouple the metrics, but moving the code
|
432
|
+
# into separate function is the first step we take.
|
433
|
+
def update_metrics(converge_result)
|
434
|
+
converge_result.failed_actions.each do |action, action_result|
|
435
|
+
update_failures_metrics(action, action_result)
|
369
436
|
end
|
370
437
|
|
371
|
-
|
372
|
-
|
373
|
-
# check if the new pipeline will be reloadable in which case we want to log that as an error and abort. this should normally not
|
374
|
-
# happen since the check should be done in reload_pipeline! prior to get here.
|
375
|
-
if !new_pipeline.reloadable?
|
376
|
-
@logger.error(I18n.t("logstash.agent.non_reloadable_config_reload"), :pipeline_id => pipeline_id, :plugins => new_pipeline.non_reloadable_plugins.map(&:class))
|
377
|
-
increment_reload_failures_metrics(pipeline_id, "non reloadable pipeline")
|
378
|
-
new_pipeline.close
|
379
|
-
return
|
438
|
+
converge_result.successful_actions.each do |action, action_result|
|
439
|
+
update_success_metrics(action, action_result)
|
380
440
|
end
|
441
|
+
end
|
381
442
|
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
|
443
|
+
def update_success_metrics(action, action_result)
|
444
|
+
case action
|
445
|
+
when LogStash::PipelineAction::Create
|
446
|
+
# When a pipeline is successfully created we create the metric
|
447
|
+
# place holder related to the lifecycle of the pipeline
|
448
|
+
initialize_pipeline_metrics(action)
|
449
|
+
when LogStash::PipelineAction::Reload
|
450
|
+
update_successful_reload_metrics(action, action_result)
|
390
451
|
end
|
452
|
+
end
|
391
453
|
|
392
|
-
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
n.gauge(:last_success_timestamp, LogStash::Timestamp.now)
|
454
|
+
def update_failures_metrics(action, action_result)
|
455
|
+
if action.is_a?(LogStash::PipelineAction::Create)
|
456
|
+
# force to create the metric fields
|
457
|
+
initialize_pipeline_metrics(action)
|
397
458
|
end
|
398
|
-
end
|
399
459
|
|
400
|
-
|
401
|
-
pipeline = @pipelines[id]
|
402
|
-
return unless pipeline.is_a?(LogStash::Pipeline)
|
403
|
-
return if pipeline.ready?
|
404
|
-
@logger.debug("starting pipeline", :id => id)
|
405
|
-
t = Thread.new do
|
406
|
-
LogStash::Util.set_thread_name("pipeline.#{id}")
|
407
|
-
begin
|
408
|
-
pipeline.run
|
409
|
-
rescue => e
|
410
|
-
@instance_reload_metric.increment(:failures)
|
411
|
-
@pipeline_reload_metric.namespace([id.to_sym, :reloads]).tap do |n|
|
412
|
-
n.increment(:failures)
|
413
|
-
n.gauge(:last_error, { :message => e.message, :backtrace => e.backtrace})
|
414
|
-
n.gauge(:last_failure_timestamp, LogStash::Timestamp.now)
|
415
|
-
end
|
416
|
-
@logger.error("Pipeline aborted due to error", :exception => e, :backtrace => e.backtrace)
|
460
|
+
@instance_reload_metric.increment(:failures)
|
417
461
|
|
418
|
-
|
419
|
-
|
420
|
-
|
421
|
-
|
422
|
-
if !t.alive?
|
423
|
-
return false
|
424
|
-
elsif pipeline.running?
|
425
|
-
return true
|
426
|
-
else
|
427
|
-
sleep 0.01
|
428
|
-
end
|
462
|
+
@pipeline_reload_metric.namespace([action.pipeline_id, :reloads]).tap do |n|
|
463
|
+
n.increment(:failures)
|
464
|
+
n.gauge(:last_error, { :message => action_result.message, :backtrace => action_result.backtrace})
|
465
|
+
n.gauge(:last_failure_timestamp, LogStash::Timestamp.now)
|
429
466
|
end
|
430
467
|
end
|
431
468
|
|
432
|
-
def
|
433
|
-
pipeline = @pipelines[id]
|
434
|
-
return unless pipeline
|
435
|
-
@logger.warn("stopping pipeline", :id => id)
|
436
|
-
pipeline.shutdown { LogStash::ShutdownWatcher.start(pipeline) }
|
437
|
-
@pipelines[id].thread.join
|
438
|
-
end
|
439
|
-
|
440
|
-
def start_pipelines
|
469
|
+
def initialize_agent_metrics
|
441
470
|
@instance_reload_metric.increment(:successes, 0)
|
442
471
|
@instance_reload_metric.increment(:failures, 0)
|
443
|
-
@pipelines.each do |id, pipeline|
|
444
|
-
start_pipeline(id)
|
445
|
-
pipeline.collect_stats
|
446
|
-
# no reloads yet, initialize all the reload metrics
|
447
|
-
init_pipeline_reload_metrics(id)
|
448
|
-
end
|
449
472
|
end
|
450
473
|
|
451
|
-
def
|
452
|
-
@
|
453
|
-
end
|
454
|
-
|
455
|
-
def running_pipeline?(pipeline_id)
|
456
|
-
thread = @pipelines[pipeline_id].thread
|
457
|
-
thread.is_a?(Thread) && thread.alive?
|
458
|
-
end
|
459
|
-
|
460
|
-
def clean_state?
|
461
|
-
@pipelines.empty?
|
462
|
-
end
|
463
|
-
|
464
|
-
def setting(key)
|
465
|
-
@settings.get(key)
|
466
|
-
end
|
467
|
-
|
468
|
-
def init_pipeline_reload_metrics(id)
|
469
|
-
@pipeline_reload_metric.namespace([id.to_sym, :reloads]).tap do |n|
|
474
|
+
def initialize_pipeline_metrics(action)
|
475
|
+
@pipeline_reload_metric.namespace([action.pipeline_id, :reloads]).tap do |n|
|
470
476
|
n.increment(:successes, 0)
|
471
477
|
n.increment(:failures, 0)
|
472
478
|
n.gauge(:last_error, nil)
|
@@ -474,4 +480,13 @@ class LogStash::Agent
|
|
474
480
|
n.gauge(:last_failure_timestamp, nil)
|
475
481
|
end
|
476
482
|
end
|
483
|
+
|
484
|
+
def update_successful_reload_metrics(action, action_result)
|
485
|
+
@instance_reload_metric.increment(:successes)
|
486
|
+
|
487
|
+
@pipeline_reload_metric.namespace([action.pipeline_id, :reloads]).tap do |n|
|
488
|
+
n.increment(:successes)
|
489
|
+
n.gauge(:last_success_timestamp, action_result.executed_at)
|
490
|
+
end
|
491
|
+
end
|
477
492
|
end # class LogStash::Agent
|