logstash-core 5.0.0.alpha6.snapshot5-java → 5.0.0-java

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of logstash-core might be problematic. Click here for more details.

Files changed (48) hide show
  1. checksums.yaml +4 -4
  2. data/lib/jars.rb +1 -1
  3. data/lib/logstash-core/version.rb +1 -1
  4. data/lib/logstash/agent.rb +45 -11
  5. data/lib/logstash/api/app_helpers.rb +43 -7
  6. data/lib/logstash/api/commands/stats.rb +2 -1
  7. data/lib/logstash/api/errors.rb +28 -0
  8. data/lib/logstash/api/modules/base.rb +9 -7
  9. data/lib/logstash/api/modules/logging.rb +52 -0
  10. data/lib/logstash/api/modules/node.rb +13 -9
  11. data/lib/logstash/api/modules/root.rb +0 -2
  12. data/lib/logstash/api/modules/stats.rb +0 -2
  13. data/lib/logstash/api/rack_app.rb +5 -3
  14. data/lib/logstash/environment.rb +4 -5
  15. data/lib/logstash/instrument/collector.rb +4 -0
  16. data/lib/logstash/instrument/metric_store.rb +27 -2
  17. data/lib/logstash/logging/logger.rb +15 -4
  18. data/lib/logstash/patches/puma.rb +44 -0
  19. data/lib/logstash/pipeline.rb +8 -15
  20. data/lib/logstash/runner.rb +31 -17
  21. data/lib/logstash/settings.rb +34 -9
  22. data/lib/logstash/util/wrapped_synchronous_queue.rb +26 -9
  23. data/lib/logstash/version.rb +1 -1
  24. data/lib/logstash/webserver.rb +13 -2
  25. data/locales/en.yml +7 -2
  26. data/logstash-core.gemspec +1 -1
  27. data/spec/api/lib/api/logging_spec.rb +41 -0
  28. data/spec/api/lib/api/node_plugins_spec.rb +4 -3
  29. data/spec/api/lib/api/node_spec.rb +2 -0
  30. data/spec/api/lib/api/node_stats_spec.rb +2 -0
  31. data/spec/api/lib/api/plugins_spec.rb +3 -1
  32. data/spec/api/lib/api/root_spec.rb +3 -0
  33. data/spec/api/lib/errors_spec.rb +27 -0
  34. data/spec/api/lib/rack_app_spec.rb +4 -4
  35. data/spec/logstash/agent_spec.rb +112 -26
  36. data/spec/logstash/instrument/metric_store_spec.rb +37 -0
  37. data/spec/logstash/pipeline_spec.rb +54 -0
  38. data/spec/logstash/runner_spec.rb +2 -1
  39. data/spec/logstash/setting_spec.rb +23 -1
  40. data/spec/logstash/settings/string_spec.rb +1 -1
  41. data/spec/logstash/settings_spec.rb +27 -0
  42. data/spec/logstash/util/wrapped_synchronous_queue_spec.rb +49 -11
  43. data/spec/logstash/webserver_spec.rb +76 -18
  44. data/spec/support/helpers.rb +8 -0
  45. data/spec/support/mocks_classes.rb +22 -0
  46. data/spec/support/shared_examples.rb +10 -0
  47. data/vendor/jars/org/logstash/logstash-core/{5.0.0-alpha6/logstash-core-5.0.0-alpha6.jar → 5.0.0/logstash-core-5.0.0.jar} +0 -0
  48. metadata +16 -7
@@ -11,4 +11,4 @@
11
11
  # eventually this file should be in the root logstash lib fir and dependencies in logstash-core should be
12
12
  # fixed.
13
13
 
14
- LOGSTASH_VERSION = "5.0.0.alpha6.snapshot5"
14
+ LOGSTASH_VERSION = "5.0.0"
@@ -2,7 +2,9 @@
2
2
  require "logstash/api/rack_app"
3
3
  require "puma"
4
4
  require "puma/server"
5
+ require "logstash/patches/puma"
5
6
  require "concurrent"
7
+ require "thread"
6
8
 
7
9
  module LogStash
8
10
  class WebServer
@@ -71,12 +73,21 @@ module LogStash
71
73
  end
72
74
 
73
75
  def start_webserver(port)
76
+ # wrap any output that puma could generate into a wrapped logger
77
+ # use the puma namespace to override STDERR, STDOUT in that scope.
78
+ Puma::STDERR.logger = logger
79
+ Puma::STDOUT.logger = logger
80
+
81
+ io_wrapped_logger = LogStash::IOWrappedLogger.new(logger)
82
+
74
83
  app = LogStash::Api::RackApp.app(logger, agent, http_environment)
75
84
 
76
- @server = ::Puma::Server.new(app)
85
+ events = ::Puma::Events.new(io_wrapped_logger, io_wrapped_logger)
86
+
87
+ @server = ::Puma::Server.new(app, events)
77
88
  @server.add_tcp_listener(http_host, port)
78
89
 
79
- logger.info("Succesfully started Logstash API", :port => @port)
90
+ logger.info("Successfully started Logstash API endpoint", :port => @port)
80
91
 
81
92
  @server.run.join
82
93
  end
@@ -74,15 +74,18 @@ en:
74
74
  Logstash is not able to start since configuration auto reloading was enabled but the configuration contains plugins that don't support it. Quitting...
75
75
  web_api:
76
76
  cant_bind_to_port: |-
77
- Logstash tried to bind to port %{port}, but the port is already in use. You can specify a new port by launching logtash with the --http-port option."
77
+ Logstash tried to bind to port %{port}, but the port is already in use. You can specify a new port by launching logtash with the --http.port option."
78
78
  cant_bind_to_port_in_range: |-
79
- Logstash tried to bind to port range %{http_ports}, but all the ports are already in use. You can specify a new port by launching logtash with the --http-port option."
79
+ Logstash tried to bind to port range %{http_ports}, but all the ports are already in use. You can specify a new port by launching logtash with the --http.port option."
80
80
  hot_threads:
81
81
  title: |-
82
82
  ::: {%{hostname}}
83
83
  Hot threads at %{time}, busiestThreads=%{top_count}:
84
84
  thread_title: |-
85
85
  %{percent_of_cpu_time} % of cpu usage, state: %{thread_state}, thread name: '%{thread_name}'
86
+ logging:
87
+ unrecognized_option: |-
88
+ unrecognized option [%{option}]
86
89
  runner:
87
90
  short-help: |-
88
91
  usage:
@@ -194,6 +197,8 @@ en:
194
197
  path_settings: |+
195
198
  Directory containing logstash.yml file. This can also be
196
199
  set through the LS_SETTINGS_DIR environment variable.
200
+ path_logs: |+
201
+ Directory to Write Logstash internal logs to.
197
202
  auto_reload: |+
198
203
  Monitor configuration changes and reload
199
204
  whenever it is changed.
@@ -17,7 +17,7 @@ Gem::Specification.new do |gem|
17
17
  gem.require_paths = ["lib", "vendor/jars"]
18
18
  gem.version = LOGSTASH_CORE_VERSION
19
19
 
20
- gem.add_runtime_dependency "logstash-core-event-java", "5.0.0.alpha6.snapshot5"
20
+ gem.add_runtime_dependency "logstash-core-event-java", "5.0.0"
21
21
 
22
22
  gem.add_runtime_dependency "pry", "~> 0.10.1" #(Ruby license)
23
23
  gem.add_runtime_dependency "stud", "~> 0.0.19" #(Apache 2.0 license)
@@ -0,0 +1,41 @@
1
+ # encoding: utf-8
2
+ require_relative "../../spec_helper"
3
+ require "sinatra"
4
+ require "logstash/api/modules/logging"
5
+ require "logstash/json"
6
+
7
+ describe LogStash::Api::Modules::Logging do
8
+ include_context "api setup"
9
+
10
+ describe "#logging" do
11
+
12
+ context "when setting a logger's log level" do
13
+ before(:all) do
14
+ @runner = LogStashRunner.new
15
+ @runner.start
16
+ end
17
+
18
+ after(:all) do
19
+ @runner.stop
20
+ end
21
+
22
+ it "should return a positive acknowledgement on success" do
23
+ put '/', '{"logger.logstash": "ERROR"}'
24
+ payload = LogStash::Json.load(last_response.body)
25
+ expect(payload['acknowledged']).to eq(true)
26
+ end
27
+
28
+ it "should throw error when level is invalid" do
29
+ put '/', '{"logger.logstash": "invalid"}'
30
+ payload = LogStash::Json.load(last_response.body)
31
+ expect(payload['error']).to eq("invalid level[invalid] for logger[logstash]")
32
+ end
33
+
34
+ it "should throw error when key logger is invalid" do
35
+ put '/', '{"invalid" : "ERROR"}'
36
+ payload = LogStash::Json.load(last_response.body)
37
+ expect(payload['error']).to eq("unrecognized option [invalid]")
38
+ end
39
+ end
40
+ end
41
+ end
@@ -1,4 +1,5 @@
1
1
  # encoding: utf-8
2
+ require_relative "../../../support/shared_examples"
2
3
  require_relative "../../spec_helper"
3
4
  require "sinatra"
4
5
  require "logstash/api/modules/plugins"
@@ -6,10 +7,11 @@ require "logstash/json"
6
7
 
7
8
  describe LogStash::Api::Modules::Plugins do
8
9
  include_context "api setup"
10
+ include_examples "not found"
9
11
 
10
12
  extend ResourceDSLMethods
11
13
 
12
- before(:all) do
14
+ before(:each) do
13
15
  do_request { get "/" }
14
16
  end
15
17
 
@@ -20,13 +22,12 @@ describe LogStash::Api::Modules::Plugins do
20
22
  expect(last_response).to be_ok
21
23
  end
22
24
 
23
- it "should return a list of plugins" do
25
+ it "should return a list of plugins" do
24
26
  expect(payload["plugins"]).to be_a(Array)
25
27
  end
26
28
 
27
29
  it "should return the total number of plugins" do
28
30
  expect(payload["total"]).to be_a(Numeric)
29
31
  end
30
-
31
32
  end
32
33
  end
@@ -1,11 +1,13 @@
1
1
  # encoding: utf-8
2
2
  require_relative "../../spec_helper"
3
+ require_relative "../../../support/shared_examples"
3
4
  require "sinatra"
4
5
  require "logstash/api/modules/node"
5
6
  require "logstash/json"
6
7
 
7
8
  describe LogStash::Api::Modules::Node do
8
9
  include_context "api setup"
10
+ include_examples "not found"
9
11
 
10
12
  describe "#hot threads" do
11
13
 
@@ -1,11 +1,13 @@
1
1
  # encoding: utf-8
2
2
  require_relative "../../spec_helper"
3
+ require_relative "../../../support/shared_examples"
3
4
  require "sinatra"
4
5
  require "logstash/api/modules/node_stats"
5
6
  require "logstash/json"
6
7
 
7
8
  describe LogStash::Api::Modules::NodeStats do
8
9
  include_context "api setup"
10
+ include_examples "not found"
9
11
 
10
12
  extend ResourceDSLMethods
11
13
 
@@ -1,13 +1,15 @@
1
1
  # encoding: utf-8
2
2
  require_relative "../../spec_helper"
3
+ require_relative "../../../support/shared_examples"
3
4
  require "sinatra"
4
5
  require "logstash/api/modules/plugins"
5
6
  require "logstash/json"
6
7
 
7
8
  describe LogStash::Api::Modules::Plugins do
8
9
  include_context "api setup"
10
+ include_examples "not found"
9
11
 
10
- before(:all) do
12
+ before(:each) do
11
13
  get "/"
12
14
  end
13
15
 
@@ -1,5 +1,6 @@
1
1
  # encoding: utf-8
2
2
  require_relative "../../spec_helper"
3
+ require_relative "../../../support/shared_examples"
3
4
  require "sinatra"
4
5
  require "logstash/api/modules/root"
5
6
  require "logstash/json"
@@ -11,5 +12,7 @@ describe LogStash::Api::Modules::Root do
11
12
  do_request { get "/" }
12
13
  expect(last_response).to be_ok
13
14
  end
15
+
16
+ include_examples "not found"
14
17
  end
15
18
 
@@ -0,0 +1,27 @@
1
+ # encoding: utf-8
2
+ require_relative "../spec_helper"
3
+ require "logstash/api/errors"
4
+
5
+ describe LogStash::Api::ApiError do
6
+ subject { described_class.new }
7
+
8
+ it "#status_code returns 500" do
9
+ expect(subject.status_code).to eq(500)
10
+ end
11
+
12
+ it "#to_hash return the message of the exception" do
13
+ expect(subject.to_hash).to include(:message => "Api Error")
14
+ end
15
+ end
16
+
17
+ describe LogStash::Api::NotFoundError do
18
+ subject { described_class.new }
19
+
20
+ it "#status_code returns 404" do
21
+ expect(subject.status_code).to eq(404)
22
+ end
23
+
24
+ it "#to_hash return the message of the exception" do
25
+ expect(subject.to_hash).to include(:message => "Not Found")
26
+ end
27
+ end
@@ -76,14 +76,14 @@ describe LogStash::Api::RackApp do
76
76
  end
77
77
 
78
78
  it "should log good requests as info" do
79
- expect(logger).to receive(:info?).and_return(true)
80
- expect(logger).to receive(:info).with(LogStash::Api::RackApp::ApiLogger::LOG_MESSAGE, anything).once
79
+ expect(logger).to receive(:debug?).and_return(true)
80
+ expect(logger).to receive(:debug).with(LogStash::Api::RackApp::ApiLogger::LOG_MESSAGE, anything).once
81
81
  get "/good-page"
82
82
  end
83
83
 
84
84
  it "should log 5xx requests as warnings" do
85
- expect(logger).to receive(:warn?).and_return(true)
86
- expect(logger).to receive(:warn).with(LogStash::Api::RackApp::ApiLogger::LOG_MESSAGE, anything).once
85
+ expect(logger).to receive(:error?).and_return(true)
86
+ expect(logger).to receive(:error).with(LogStash::Api::RackApp::ApiLogger::LOG_MESSAGE, anything).once
87
87
  get "/service-unavailable"
88
88
  end
89
89
  end
@@ -47,11 +47,11 @@ describe LogStash::Agent do
47
47
  let(:pipeline_id) { "main" }
48
48
  let(:config_string) { "input { } filter { } output { }" }
49
49
  let(:agent_args) do
50
- {
50
+ {
51
51
  "config.string" => config_string,
52
52
  "config.reload.automatic" => true,
53
53
  "config.reload.interval" => 0.01,
54
- "pipeline.workers" => 4,
54
+ "pipeline.workers" => 4,
55
55
  }
56
56
  end
57
57
 
@@ -331,8 +331,6 @@ describe LogStash::Agent do
331
331
 
332
332
  context "metrics after config reloading" do
333
333
  let(:config) { "input { generator { } } output { dummyoutput { } }" }
334
- let(:new_config_generator_counter) { 500 }
335
- let(:new_config) { "input { generator { count => #{new_config_generator_counter} } } output { dummyoutput2 {} }" }
336
334
  let(:config_path) do
337
335
  f = Stud::Temporary.file
338
336
  f.write(config)
@@ -351,7 +349,7 @@ describe LogStash::Agent do
351
349
  super.merge({ "config.reload.automatic" => true,
352
350
  "config.reload.interval" => interval,
353
351
  "metric.collect" => true })
354
- end
352
+ end
355
353
 
356
354
  # We need to create theses dummy classes to know how many
357
355
  # events where actually generated by the pipeline and successfully send to the output.
@@ -390,33 +388,121 @@ describe LogStash::Agent do
390
388
  end
391
389
  end
392
390
 
393
- it "resets the metric collector" do
394
- # We know that the store has more events coming in.
395
- i = 0
396
- while dummy_output.events.size <= new_config_generator_counter
397
- i += 1
398
- raise "Waiting too long!" if i > 20
399
- sleep(0.1)
391
+ context "when reloading a good config" do
392
+ let(:new_config_generator_counter) { 500 }
393
+ let(:new_config) { "input { generator { count => #{new_config_generator_counter} } } output { dummyoutput2 {} }" }
394
+ before :each do
395
+ # We know that the store has more events coming in.
396
+ i = 0
397
+ while dummy_output.events.size <= new_config_generator_counter
398
+ i += 1
399
+ raise "Waiting too long!" if i > 20
400
+ sleep(0.1)
401
+ end
402
+
403
+
404
+ # Also force a flush to disk to make sure ruby reload it.
405
+ File.open(config_path, "w") do |f|
406
+ f.write(new_config)
407
+ f.fsync
408
+ end
409
+
410
+ sleep(interval * 3) # Give time to reload the config
411
+
412
+ # be eventually consistent.
413
+ sleep(0.01) while dummy_output2.events.size < new_config_generator_counter
400
414
  end
401
415
 
402
- snapshot = subject.metric.collector.snapshot_metric
403
- expect(snapshot.metric_store.get_with_path("/stats/events")[:stats][:events][:in].value).to be > new_config_generator_counter
416
+ it "resets the pipeline metric collector" do
417
+ snapshot = subject.metric.collector.snapshot_metric
418
+ value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:events][:in].value
419
+ expect(value).to eq(new_config_generator_counter)
420
+ end
404
421
 
405
- # update the configuration and give some time to logstash to pick it up and do the work
406
- # Also force a flush to disk to make sure ruby reload it.
407
- File.open(config_path, "w") do |f|
408
- f.write(new_config)
409
- f.fsync
422
+ it "does not reset the global event count" do
423
+ snapshot = subject.metric.collector.snapshot_metric
424
+ value = snapshot.metric_store.get_with_path("/stats/events")[:stats][:events][:in].value
425
+ expect(value).to be > new_config_generator_counter
410
426
  end
411
427
 
412
- sleep(interval * 3) # Give time to reload the config
413
-
414
- # be eventually consistent.
415
- sleep(0.01) while dummy_output2.events.size < new_config_generator_counter
428
+ it "increases the successful reload count" do
429
+ snapshot = subject.metric.collector.snapshot_metric
430
+ value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads][:successes].value
431
+ expect(value).to be(1)
432
+ end
416
433
 
417
- snapshot = subject.metric.collector.snapshot_metric
418
- value = snapshot.metric_store.get_with_path("/stats/events")[:stats][:events][:in].value
419
- expect(value).to eq(new_config_generator_counter)
434
+ it "does not set the failure reload timestamp" do
435
+ snapshot = subject.metric.collector.snapshot_metric
436
+ value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads][:last_failure_timestamp].value
437
+ expect(value).to be(nil)
438
+ end
439
+
440
+ it "sets the success reload timestamp" do
441
+ snapshot = subject.metric.collector.snapshot_metric
442
+ value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads][:last_success_timestamp].value
443
+ expect(value).to be_a(LogStash::Timestamp)
444
+ end
445
+
446
+ it "does not set the last reload error" do
447
+ snapshot = subject.metric.collector.snapshot_metric
448
+ value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads][:last_error].value
449
+ expect(value).to be(nil)
450
+ end
451
+
452
+ end
453
+
454
+ context "when reloading a bad config" do
455
+ let(:new_config) { "input { generator { count => " }
456
+ let(:new_config_generator_counter) { 500 }
457
+ before :each do
458
+ # We know that the store has more events coming in.
459
+ i = 0
460
+ while dummy_output.events.size <= new_config_generator_counter
461
+ i += 1
462
+ raise "Waiting too long!" if i > 20
463
+ sleep(0.1)
464
+ end
465
+
466
+
467
+ # Also force a flush to disk to make sure ruby reload it.
468
+ File.open(config_path, "w") do |f|
469
+ f.write(new_config)
470
+ f.fsync
471
+ end
472
+
473
+ sleep(interval * 3) # Give time to reload the config
474
+ end
475
+
476
+ it "does not increase the successful reload count" do
477
+ snapshot = subject.metric.collector.snapshot_metric
478
+ value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads][:successes].value
479
+ expect(value).to be(0)
480
+ end
481
+
482
+ it "does not set the successful reload timestamp" do
483
+ snapshot = subject.metric.collector.snapshot_metric
484
+ value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads][:last_success_timestamp].value
485
+ expect(value).to be(nil)
486
+ end
487
+
488
+ it "sets the failure reload timestamp" do
489
+ snapshot = subject.metric.collector.snapshot_metric
490
+ value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads][:last_failure_timestamp].value
491
+ expect(value).to be_a(LogStash::Timestamp)
492
+ end
493
+
494
+ it "sets the last reload error" do
495
+ snapshot = subject.metric.collector.snapshot_metric
496
+ value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads][:last_error].value
497
+ expect(value).to be_a(Hash)
498
+ expect(value).to include(:message, :backtrace)
499
+ end
500
+
501
+ it "increases the failed reload count" do
502
+ snapshot = subject.metric.collector.snapshot_metric
503
+ value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads][:failures].value
504
+ expect(value).to be > 0
505
+ end
420
506
  end
421
507
  end
422
508
  end
@@ -203,6 +203,12 @@ describe LogStash::Instrument::MetricStore do
203
203
  end
204
204
  end
205
205
 
206
+ describe "#size" do
207
+ it "returns the number of unique metrics" do
208
+ expect(subject.size).to eq(metric_events.size)
209
+ end
210
+ end
211
+
206
212
  describe "#each" do
207
213
  it "retrieves all the metric" do
208
214
  expect(subject.each.size).to eq(metric_events.size)
@@ -221,4 +227,35 @@ describe LogStash::Instrument::MetricStore do
221
227
  end
222
228
  end
223
229
  end
230
+
231
+ describe "#prune" do
232
+ let(:metric_events) {
233
+ [
234
+ [[:node, :sashimi, :pipelines, :pipeline01, :plugins, :"logstash-output-elasticsearch"], :event_in, :increment],
235
+ [[:node, :sashimi, :pipelines, :pipeline01], :processed_events_in, :increment],
236
+ [[:node, :sashimi, :pipelines, :pipeline01], :processed_events_out, :increment],
237
+ [[:node, :sashimi, :pipelines, :pipeline02], :processed_events_out, :increment],
238
+ ]
239
+ }
240
+
241
+ before :each do
242
+ # Lets add a few metrics in the store before trying to find them
243
+ metric_events.each do |namespaces, metric_key, action|
244
+ metric = subject.fetch_or_store(namespaces, metric_key, LogStash::Instrument::MetricType::Counter.new(namespaces, metric_key))
245
+ metric.execute(action)
246
+ end
247
+ end
248
+
249
+ it "should remove all keys with the same starting path as the argument" do
250
+ expect(subject.get(:node, :sashimi, :pipelines, :pipeline01)).to be_a(Hash)
251
+ subject.prune("/node/sashimi/pipelines/pipeline01")
252
+ expect { subject.get(:node, :sashimi, :pipelines, :pipeline01) }.to raise_error LogStash::Instrument::MetricStore::MetricNotFound
253
+ end
254
+
255
+ it "should keep other metrics on different path branches" do
256
+ expect(subject.get(:node, :sashimi, :pipelines, :pipeline02)).to be_a(Hash)
257
+ subject.prune("/node/sashimi/pipelines/pipeline01")
258
+ expect { subject.get(:node, :sashimi, :pipelines, :pipeline02) }.to_not raise_error
259
+ end
260
+ end
224
261
  end