logstash-core 5.1.2-java → 5.2.0-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. checksums.yaml +4 -4
  2. data/gemspec_jars.rb +0 -1
  3. data/lib/logstash-core/logstash-core.jar +0 -0
  4. data/lib/logstash-core/version.rb +1 -1
  5. data/lib/logstash-core_jars.rb +0 -2
  6. data/lib/logstash/agent.rb +26 -10
  7. data/lib/logstash/api/commands/default_metadata.rb +3 -1
  8. data/lib/logstash/api/commands/stats.rb +17 -1
  9. data/lib/logstash/api/modules/node_stats.rb +9 -0
  10. data/lib/logstash/api/modules/stats.rb +3 -2
  11. data/lib/logstash/config/mixin.rb +5 -8
  12. data/lib/logstash/instrument/collector.rb +1 -46
  13. data/lib/logstash/instrument/periodic_poller/base.rb +2 -0
  14. data/lib/logstash/instrument/periodic_poller/cgroup.rb +137 -0
  15. data/lib/logstash/instrument/periodic_poller/jvm.rb +1 -2
  16. data/lib/logstash/instrument/periodic_poller/os.rb +21 -0
  17. data/lib/logstash/instrument/periodic_poller/pq.rb +20 -0
  18. data/lib/logstash/instrument/periodic_pollers.rb +4 -2
  19. data/lib/logstash/output_delegator.rb +2 -0
  20. data/lib/logstash/pipeline.rb +31 -2
  21. data/lib/logstash/runner.rb +6 -1
  22. data/lib/logstash/util/wrapped_acked_queue.rb +11 -0
  23. data/lib/logstash/util/wrapped_synchronous_queue.rb +9 -0
  24. data/lib/logstash/version.rb +1 -1
  25. data/lib/logstash/webserver.rb +9 -1
  26. data/locales/en.yml +0 -3
  27. data/spec/api/lib/api/node_stats_spec.rb +5 -1
  28. data/spec/api/spec_helper.rb +3 -1
  29. data/spec/logstash/agent_spec.rb +2 -0
  30. data/spec/logstash/instrument/collector_spec.rb +4 -0
  31. data/spec/logstash/instrument/periodic_poller/cgroup_spec.rb +148 -0
  32. data/spec/logstash/instrument/periodic_poller/os_spec.rb +85 -0
  33. data/spec/logstash/output_delegator_spec.rb +12 -4
  34. data/spec/logstash/pipeline_reporter_spec.rb +2 -26
  35. data/spec/logstash/pipeline_spec.rb +102 -40
  36. data/spec/logstash/plugin_spec.rb +2 -6
  37. data/spec/logstash/util/wrapped_synchronous_queue_spec.rb +34 -4
  38. data/spec/support/mocks_classes.rb +2 -2
  39. metadata +12 -7
@@ -0,0 +1,85 @@
1
+ # encoding: utf-8
2
+ require "logstash/instrument/periodic_poller/os"
3
+ require "logstash/instrument/metric"
4
+ require "logstash/instrument/collector"
5
+
6
+ describe LogStash::Instrument::PeriodicPoller::Os do
7
+ let(:metric) { LogStash::Instrument::Metric.new(LogStash::Instrument::Collector.new) }
8
+
9
+ context "recorded cgroup metrics (mocked cgroup env)" do
10
+ subject { described_class.new(metric, {})}
11
+
12
+ let(:snapshot_store) { metric.collector.snapshot_metric.metric_store }
13
+ let(:os_metrics) { snapshot_store.get_shallow(:os) }
14
+
15
+ let(:cpuacct_control_group) { "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61" }
16
+ let(:cpuacct_usage) { 1982 }
17
+ let(:cpu_control_group) { "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61" }
18
+ let(:cpu_period_micros) { 500 }
19
+ let(:cpu_quota_micros) { 98 }
20
+ let(:cpu_stats_number_of_periods) { 1 }
21
+ let(:cpu_stats_number_of_time_throttled) { 2 }
22
+ let(:cpu_stats_time_throttled_nanos) { 3 }
23
+ let(:proc_self_cgroup_content) {
24
+ %W(14:name=systemd,holaunlimited:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
25
+ 13:pids:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
26
+ 12:hugetlb:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
27
+ 11:net_prio:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
28
+ 10:perf_event:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
29
+ 9:net_cls:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
30
+ 8:freezer:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
31
+ 7:devices:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
32
+ 6:memory:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
33
+ 5:blkio:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
34
+ 4:cpuacct:#{cpuacct_control_group}
35
+ 3:cpu:#{cpu_control_group}
36
+ 2:cpuset:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
37
+ 1:name=openrc:/docker) }
38
+ let(:cpu_stat_file_content) {
39
+ [
40
+ "nr_periods #{cpu_stats_number_of_periods}",
41
+ "nr_throttled #{cpu_stats_number_of_time_throttled}",
42
+ "throttled_time #{cpu_stats_time_throttled_nanos}"
43
+ ]
44
+ }
45
+
46
+ before do
47
+ allow(LogStash::Instrument::PeriodicPoller::Cgroup).to receive(:are_cgroup_available?).and_return(true)
48
+
49
+ allow(LogStash::Instrument::PeriodicPoller::Cgroup).to receive(:read_proc_self_cgroup_lines).and_return(proc_self_cgroup_content)
50
+ allow(LogStash::Instrument::PeriodicPoller::Cgroup).to receive(:read_sys_fs_cgroup_cpuacct_cpu_stat).and_return(cpu_stat_file_content)
51
+
52
+ allow(LogStash::Instrument::PeriodicPoller::Cgroup).to receive(:cgroup_cpuacct_usage_nanos).with(cpuacct_control_group).and_return(cpuacct_usage)
53
+ allow(LogStash::Instrument::PeriodicPoller::Cgroup).to receive(:cgroup_cpu_fs_period_micros).with(cpu_control_group).and_return(cpu_period_micros)
54
+ allow(LogStash::Instrument::PeriodicPoller::Cgroup).to receive(:cgroup_cpu_fs_quota_micros).with(cpu_control_group).and_return(cpu_quota_micros)
55
+
56
+ subject.collect
57
+ end
58
+
59
+ def mval(*metric_path)
60
+ metric_path.reduce(os_metrics) {|acc,k| acc[k]}.value
61
+ end
62
+
63
+ it "should have a value for #{[:cgroup, :cpuacc, :control_group]} that is a String" do
64
+ expect(mval(:cgroup, :cpuacct, :control_group)).to be_a(String)
65
+ end
66
+
67
+ it "should have a value for #{[:cgroup, :cpu, :control_group]} that is a String" do
68
+ expect(mval(:cgroup, :cpu, :control_group)).to be_a(String)
69
+ end
70
+
71
+ [
72
+ [:cgroup, :cpuacct, :usage_nanos],
73
+ [:cgroup, :cpu, :cfs_period_micros],
74
+ [:cgroup, :cpu, :cfs_quota_micros],
75
+ [:cgroup, :cpu, :stat, :number_of_elapsed_periods],
76
+ [:cgroup, :cpu, :stat, :number_of_times_throttled],
77
+ [:cgroup, :cpu, :stat, :time_throttled_nanos]
78
+ ].each do |path|
79
+ path = Array(path)
80
+ it "should have a value for #{path} that is Numeric" do
81
+ expect(mval(*path)).to be_a(Numeric)
82
+ end
83
+ end
84
+ end
85
+ end
@@ -45,16 +45,24 @@ describe LogStash::OutputDelegator do
45
45
  context "after having received a batch of events" do
46
46
  before do
47
47
  subject.register
48
- subject.multi_receive(events)
49
48
  end
50
49
 
51
50
  it "should pass the events through" do
52
- expect(out_inst).to have_received(:multi_receive).with(events)
51
+ expect(out_inst).to receive(:multi_receive).with(events)
52
+ subject.multi_receive(events)
53
53
  end
54
54
 
55
55
  it "should increment the number of events received" do
56
- expect(subject.metric_events).to have_received(:increment).with(:in, events.length)
57
- expect(subject.metric_events).to have_received(:increment).with(:out, events.length)
56
+ expect(subject.metric_events).to receive(:increment).with(:in, events.length)
57
+ expect(subject.metric_events).to receive(:increment).with(:out, events.length)
58
+ subject.multi_receive(events)
59
+ end
60
+
61
+ it "should record the `duration_in_millis`" do
62
+ clock = spy("clock")
63
+ expect(subject.metric_events).to receive(:time).with(:duration_in_millis).and_return(clock)
64
+ expect(clock).to receive(:stop)
65
+ subject.multi_receive(events)
58
66
  end
59
67
  end
60
68
 
@@ -2,31 +2,7 @@
2
2
  require "spec_helper"
3
3
  require "logstash/pipeline"
4
4
  require "logstash/pipeline_reporter"
5
-
6
- class DummyOutput < LogStash::Outputs::Base
7
-
8
- config_name "dummyoutput"
9
- milestone 2
10
-
11
- attr_reader :num_closes, :events
12
-
13
- def initialize(params={})
14
- super
15
- @num_closes = 0
16
- @events = []
17
- end
18
-
19
- def register
20
- end
21
-
22
- def receive(event)
23
- @events << event
24
- end
25
-
26
- def close
27
- @num_closes += 1
28
- end
29
- end
5
+ require_relative "../support/mocks_classes"
30
6
 
31
7
  #TODO: Figure out how to add more tests that actually cover inflight events
32
8
  #This will require some janky multithreading stuff
@@ -39,7 +15,7 @@ describe LogStash::PipelineReporter do
39
15
  let(:reporter) { pipeline.reporter }
40
16
 
41
17
  before do
42
- allow(LogStash::Plugin).to receive(:lookup).with("output", "dummyoutput").and_return(DummyOutput)
18
+ allow(LogStash::Plugin).to receive(:lookup).with("output", "dummyoutput").and_return(::LogStash::Outputs::DummyOutput)
43
19
  allow(LogStash::Plugin).to receive(:lookup).with("input", "generator").and_call_original
44
20
  allow(LogStash::Plugin).to receive(:lookup).with("codec", "plain").and_call_original
45
21
 
@@ -49,7 +49,7 @@ class DummyCodec < LogStash::Codecs::Base
49
49
  end
50
50
  end
51
51
 
52
- class DummyOutputMore < DummyOutput
52
+ class DummyOutputMore < ::LogStash::Outputs::DummyOutput
53
53
  config_name "dummyoutputmore"
54
54
  end
55
55
 
@@ -158,7 +158,7 @@ describe LogStash::Pipeline do
158
158
  before(:each) do
159
159
  allow(LogStash::Plugin).to receive(:lookup).with("input", "dummyinput").and_return(DummyInput)
160
160
  allow(LogStash::Plugin).to receive(:lookup).with("codec", "plain").and_return(DummyCodec)
161
- allow(LogStash::Plugin).to receive(:lookup).with("output", "dummyoutput").and_return(DummyOutput)
161
+ allow(LogStash::Plugin).to receive(:lookup).with("output", "dummyoutput").and_return(::LogStash::Outputs::DummyOutput)
162
162
  allow(LogStash::Plugin).to receive(:lookup).with("filter", "dummyfilter").and_return(DummyFilter)
163
163
  allow(LogStash::Plugin).to receive(:lookup).with("filter", "dummysafefilter").and_return(DummySafeFilter)
164
164
  end
@@ -258,7 +258,7 @@ describe LogStash::Pipeline do
258
258
  before(:each) do
259
259
  allow(LogStash::Plugin).to receive(:lookup).with("input", "dummyinput").and_return(DummyInput)
260
260
  allow(LogStash::Plugin).to receive(:lookup).with("codec", "plain").and_return(DummyCodec)
261
- allow(LogStash::Plugin).to receive(:lookup).with("output", "dummyoutput").and_return(DummyOutput)
261
+ allow(LogStash::Plugin).to receive(:lookup).with("output", "dummyoutput").and_return(::LogStash::Outputs::DummyOutput)
262
262
  end
263
263
 
264
264
 
@@ -313,7 +313,7 @@ describe LogStash::Pipeline do
313
313
  before(:each) do
314
314
  allow(LogStash::Plugin).to receive(:lookup).with("input", "dummyinput").and_return(DummyInput)
315
315
  allow(LogStash::Plugin).to receive(:lookup).with("codec", "plain").and_return(DummyCodec)
316
- allow(LogStash::Plugin).to receive(:lookup).with("output", "dummyoutput").and_return(DummyOutput)
316
+ allow(LogStash::Plugin).to receive(:lookup).with("output", "dummyoutput").and_return(::LogStash::Outputs::DummyOutput)
317
317
  end
318
318
 
319
319
  let(:config) { "input { dummyinput {} } output { dummyoutput {} }"}
@@ -378,12 +378,12 @@ describe LogStash::Pipeline do
378
378
  let(:pipeline_settings) { { "pipeline.batch.size" => batch_size, "pipeline.workers" => 1 } }
379
379
  let(:pipeline) { LogStash::Pipeline.new(config, pipeline_settings_obj) }
380
380
  let(:logger) { pipeline.logger }
381
- let(:warning_prefix) { /CAUTION: Recommended inflight events max exceeded!/ }
381
+ let(:warning_prefix) { Regexp.new("CAUTION: Recommended inflight events max exceeded!") }
382
382
 
383
383
  before(:each) do
384
384
  allow(LogStash::Plugin).to receive(:lookup).with("input", "dummyinput").and_return(DummyInput)
385
385
  allow(LogStash::Plugin).to receive(:lookup).with("codec", "plain").and_return(DummyCodec)
386
- allow(LogStash::Plugin).to receive(:lookup).with("output", "dummyoutput").and_return(DummyOutput)
386
+ allow(LogStash::Plugin).to receive(:lookup).with("output", "dummyoutput").and_return(::LogStash::Outputs::DummyOutput)
387
387
  allow(logger).to receive(:warn)
388
388
  thread = Thread.new { pipeline.run }
389
389
  pipeline.shutdown
@@ -435,28 +435,87 @@ describe LogStash::Pipeline do
435
435
  end
436
436
 
437
437
  context "metrics" do
438
- config <<-CONFIG
439
- input { }
440
- filter { }
441
- output { }
442
- CONFIG
443
-
444
- it "uses a `NullMetric` object if `metric.collect` is set to false" do
445
- settings = double("LogStash::SETTINGS")
446
-
447
- allow(settings).to receive(:get_value).with("pipeline.id").and_return("main")
448
- allow(settings).to receive(:get_value).with("metric.collect").and_return(false)
449
- allow(settings).to receive(:get_value).with("config.debug").and_return(false)
450
- allow(settings).to receive(:get).with("queue.type").and_return("memory")
451
- allow(settings).to receive(:get).with("queue.page_capacity").and_return(1024 * 1024)
452
- allow(settings).to receive(:get).with("queue.max_events").and_return(250)
453
- allow(settings).to receive(:get).with("queue.max_bytes").and_return(1024 * 1024 * 1024)
454
- allow(settings).to receive(:get).with("queue.checkpoint.acks").and_return(1024)
455
- allow(settings).to receive(:get).with("queue.checkpoint.writes").and_return(1024)
456
- allow(settings).to receive(:get).with("queue.checkpoint.interval").and_return(1000)
457
-
458
- pipeline = LogStash::Pipeline.new(config, settings)
459
- expect(pipeline.metric).to be_kind_of(LogStash::Instrument::NullMetric)
438
+ config = "input { } filter { } output { }"
439
+
440
+ let(:settings) { LogStash::SETTINGS.clone }
441
+ subject { LogStash::Pipeline.new(config, settings, metric) }
442
+
443
+ context "when metric.collect is disabled" do
444
+ before :each do
445
+ settings.set("metric.collect", false)
446
+ end
447
+
448
+ context "if namespaced_metric is nil" do
449
+ let(:metric) { nil }
450
+ it "uses a `NullMetric` object" do
451
+ expect(subject.metric).to be_a(LogStash::Instrument::NullMetric)
452
+ end
453
+ end
454
+
455
+ context "if namespaced_metric is a Metric object" do
456
+ let(:collector) { ::LogStash::Instrument::Collector.new }
457
+ let(:metric) { ::LogStash::Instrument::Metric.new(collector) }
458
+
459
+ it "uses a `NullMetric` object" do
460
+ expect(subject.metric).to be_a(LogStash::Instrument::NullMetric)
461
+ end
462
+
463
+ it "uses the same collector" do
464
+ expect(subject.metric.collector).to be(collector)
465
+ end
466
+ end
467
+
468
+ context "if namespaced_metric is a NullMetric object" do
469
+ let(:collector) { ::LogStash::Instrument::Collector.new }
470
+ let(:metric) { ::LogStash::Instrument::NullMetric.new(collector) }
471
+
472
+ it "uses a `NullMetric` object" do
473
+ expect(subject.metric).to be_a(::LogStash::Instrument::NullMetric)
474
+ end
475
+
476
+ it "uses the same collector" do
477
+ expect(subject.metric.collector).to be(collector)
478
+ end
479
+ end
480
+ end
481
+
482
+ context "when metric.collect is enabled" do
483
+ before :each do
484
+ settings.set("metric.collect", true)
485
+ end
486
+
487
+ context "if namespaced_metric is nil" do
488
+ let(:metric) { nil }
489
+ it "uses a `NullMetric` object" do
490
+ expect(subject.metric).to be_a(LogStash::Instrument::NullMetric)
491
+ end
492
+ end
493
+
494
+ context "if namespaced_metric is a Metric object" do
495
+ let(:collector) { ::LogStash::Instrument::Collector.new }
496
+ let(:metric) { ::LogStash::Instrument::Metric.new(collector) }
497
+
498
+ it "uses a `Metric` object" do
499
+ expect(subject.metric).to be_a(LogStash::Instrument::Metric)
500
+ end
501
+
502
+ it "uses the same collector" do
503
+ expect(subject.metric.collector).to be(collector)
504
+ end
505
+ end
506
+
507
+ context "if namespaced_metric is a NullMetric object" do
508
+ let(:collector) { ::LogStash::Instrument::Collector.new }
509
+ let(:metric) { ::LogStash::Instrument::NullMetric.new(collector) }
510
+
511
+ it "uses a `NullMetric` object" do
512
+ expect(subject.metric).to be_a(LogStash::Instrument::NullMetric)
513
+ end
514
+
515
+ it "uses the same collector" do
516
+ expect(subject.metric.collector).to be(collector)
517
+ end
518
+ end
460
519
  end
461
520
  end
462
521
 
@@ -465,7 +524,7 @@ describe LogStash::Pipeline do
465
524
  allow(LogStash::Plugin).to receive(:lookup).with("input", "dummyinputgenerator").and_return(DummyInputGenerator)
466
525
  allow(LogStash::Plugin).to receive(:lookup).with("codec", "plain").and_return(DummyCodec)
467
526
  allow(LogStash::Plugin).to receive(:lookup).with("filter", "dummyfilter").and_return(DummyFilter)
468
- allow(LogStash::Plugin).to receive(:lookup).with("output", "dummyoutput").and_return(DummyOutput)
527
+ allow(LogStash::Plugin).to receive(:lookup).with("output", "dummyoutput").and_return(::LogStash::Outputs::DummyOutput)
469
528
  allow(LogStash::Plugin).to receive(:lookup).with("output", "dummyoutputmore").and_return(DummyOutputMore)
470
529
  end
471
530
 
@@ -501,14 +560,14 @@ describe LogStash::Pipeline do
501
560
  }
502
561
  EOS
503
562
  end
504
- let(:output) { DummyOutput.new }
563
+ let(:output) { ::LogStash::Outputs::DummyOutput.new }
505
564
 
506
565
  before do
507
- allow(DummyOutput).to receive(:new).with(any_args).and_return(output)
566
+ allow(::LogStash::Outputs::DummyOutput).to receive(:new).with(any_args).and_return(output)
508
567
  allow(LogStash::Plugin).to receive(:lookup).with("input", "generator").and_return(LogStash::Inputs::Generator)
509
568
  allow(LogStash::Plugin).to receive(:lookup).with("codec", "plain").and_return(LogStash::Codecs::Plain)
510
569
  allow(LogStash::Plugin).to receive(:lookup).with("filter", "multiline").and_return(LogStash::Filters::Multiline)
511
- allow(LogStash::Plugin).to receive(:lookup).with("output", "dummyoutput").and_return(DummyOutput)
570
+ allow(LogStash::Plugin).to receive(:lookup).with("output", "dummyoutput").and_return(::LogStash::Outputs::DummyOutput)
512
571
  end
513
572
 
514
573
  it "flushes the buffered contents of the filter" do
@@ -531,7 +590,7 @@ describe LogStash::Pipeline do
531
590
  allow(LogStash::Plugin).to receive(:lookup).with("input", "generator").and_return(LogStash::Inputs::Generator)
532
591
  allow(LogStash::Plugin).to receive(:lookup).with("codec", "plain").and_return(DummyCodec)
533
592
  allow(LogStash::Plugin).to receive(:lookup).with("filter", "dummyfilter").and_return(DummyFilter)
534
- allow(LogStash::Plugin).to receive(:lookup).with("output", "dummyoutput").and_return(DummyOutput)
593
+ allow(LogStash::Plugin).to receive(:lookup).with("output", "dummyoutput").and_return(::LogStash::Outputs::DummyOutput)
535
594
  end
536
595
 
537
596
  let(:pipeline1) { LogStash::Pipeline.new("input { generator {} } filter { dummyfilter {} } output { dummyoutput {}}") }
@@ -643,22 +702,22 @@ describe LogStash::Pipeline do
643
702
  }
644
703
  EOS
645
704
  end
646
- let(:dummyoutput) { DummyOutput.new({ "id" => dummy_output_id }) }
705
+ let(:dummyoutput) { ::LogStash::Outputs::DummyOutput.new({ "id" => dummy_output_id }) }
647
706
  let(:metric_store) { subject.metric.collector.snapshot_metric.metric_store }
648
707
 
649
708
  before :each do
650
- allow(DummyOutput).to receive(:new).with(any_args).and_return(dummyoutput)
709
+ allow(::LogStash::Outputs::DummyOutput).to receive(:new).with(any_args).and_return(dummyoutput)
651
710
  allow(LogStash::Plugin).to receive(:lookup).with("input", "generator").and_return(LogStash::Inputs::Generator)
652
711
  allow(LogStash::Plugin).to receive(:lookup).with("codec", "plain").and_return(LogStash::Codecs::Plain)
653
712
  allow(LogStash::Plugin).to receive(:lookup).with("filter", "multiline").and_return(LogStash::Filters::Multiline)
654
- allow(LogStash::Plugin).to receive(:lookup).with("output", "dummyoutput").and_return(DummyOutput)
713
+ allow(LogStash::Plugin).to receive(:lookup).with("output", "dummyoutput").and_return(::LogStash::Outputs::DummyOutput)
655
714
 
656
715
  Thread.new { subject.run }
657
716
  # make sure we have received all the generated events
658
717
  wait(3).for do
659
718
  # give us a bit of time to flush the events
660
- dummyoutput.events.size < number_of_events
661
- end.to be_falsey
719
+ dummyoutput.events.size >= number_of_events
720
+ end.to be_truthy
662
721
  end
663
722
 
664
723
  after :each do
@@ -697,12 +756,15 @@ describe LogStash::Pipeline do
697
756
 
698
757
  it "populates the output metrics" do
699
758
  plugin_name = dummy_output_id.to_sym
759
+
760
+ expect(collected_metric[:stats][:pipelines][:main][:plugins][:outputs][plugin_name][:events][:in].value).to eq(number_of_events)
700
761
  expect(collected_metric[:stats][:pipelines][:main][:plugins][:outputs][plugin_name][:events][:out].value).to eq(number_of_events)
762
+ expect(collected_metric[:stats][:pipelines][:main][:plugins][:outputs][plugin_name][:events][:duration_in_millis].value).not_to be_nil
701
763
  end
702
764
 
703
765
  it "populates the name of the output plugin" do
704
766
  plugin_name = dummy_output_id.to_sym
705
- expect(collected_metric[:stats][:pipelines][:main][:plugins][:outputs][plugin_name][:name].value).to eq(DummyOutput.config_name)
767
+ expect(collected_metric[:stats][:pipelines][:main][:plugins][:outputs][plugin_name][:name].value).to eq(::LogStash::Outputs::DummyOutput.config_name)
706
768
  end
707
769
 
708
770
  it "populates the name of the filter plugin" do
@@ -719,7 +781,7 @@ describe LogStash::Pipeline do
719
781
  allow(LogStash::Plugin).to receive(:lookup).with("input", "generator").and_return(LogStash::Inputs::Generator)
720
782
  allow(LogStash::Plugin).to receive(:lookup).with("codec", "plain").and_return(DummyCodec)
721
783
  allow(LogStash::Plugin).to receive(:lookup).with("filter", "dummyfilter").and_return(DummyFilter)
722
- allow(LogStash::Plugin).to receive(:lookup).with("output", "dummyoutput").and_return(DummyOutput)
784
+ allow(LogStash::Plugin).to receive(:lookup).with("output", "dummyoutput").and_return(::LogStash::Outputs::DummyOutput)
723
785
  end
724
786
 
725
787
  let(:pipeline1) { LogStash::Pipeline.new("input { generator {} } filter { dummyfilter {} } output { dummyoutput {}}") }
@@ -106,12 +106,8 @@ describe LogStash::Plugin do
106
106
  one_notice.validate({})
107
107
  end
108
108
 
109
- it "warns the user if we can't find a defined version" do
110
- expect_any_instance_of(LogStash::Logging::Logger).to receive(:warn)
111
- .once
112
- .with(/plugin doesn't have a version/)
113
-
114
- subject.validate({})
109
+ it "doesn't raise an exception if no version is found" do
110
+ expect { subject.validate({}) }.not_to raise_error
115
111
  end
116
112
 
117
113
 
@@ -63,7 +63,30 @@ describe LogStash::Util::WrappedSynchronousQueue do
63
63
  batch = read_client.take_batch
64
64
  read_client.close_batch(batch)
65
65
  store = collector.snapshot_metric.metric_store
66
- expect(store.size).to eq(0)
66
+
67
+ expect(store.get_shallow(:events, :in).value).to eq(0)
68
+ expect(store.get_shallow(:events, :in)).to be_kind_of(LogStash::Instrument::MetricType::Counter)
69
+
70
+ expect(store.get_shallow(:events, :out).value).to eq(0)
71
+ expect(store.get_shallow(:events, :out)).to be_kind_of(LogStash::Instrument::MetricType::Counter)
72
+
73
+ expect(store.get_shallow(:events, :filtered).value).to eq(0)
74
+ expect(store.get_shallow(:events, :filtered)).to be_kind_of(LogStash::Instrument::MetricType::Counter)
75
+
76
+ expect(store.get_shallow(:events, :duration_in_millis).value).to eq(0)
77
+ expect(store.get_shallow(:events, :duration_in_millis)).to be_kind_of(LogStash::Instrument::MetricType::Counter)
78
+
79
+ expect(store.get_shallow(:pipeline, :in).value).to eq(0)
80
+ expect(store.get_shallow(:pipeline, :in)).to be_kind_of(LogStash::Instrument::MetricType::Counter)
81
+
82
+ expect(store.get_shallow(:pipeline, :duration_in_millis).value).to eq(0)
83
+ expect(store.get_shallow(:pipeline, :duration_in_millis)).to be_kind_of(LogStash::Instrument::MetricType::Counter)
84
+
85
+ expect(store.get_shallow(:pipeline, :out).value).to eq(0)
86
+ expect(store.get_shallow(:pipeline, :out)).to be_kind_of(LogStash::Instrument::MetricType::Counter)
87
+
88
+ expect(store.get_shallow(:pipeline, :filtered).value).to eq(0)
89
+ expect(store.get_shallow(:pipeline, :filtered)).to be_kind_of(LogStash::Instrument::MetricType::Counter)
67
90
  end
68
91
  end
69
92
 
@@ -73,15 +96,22 @@ describe LogStash::Util::WrappedSynchronousQueue do
73
96
  5.times {|i| batch.push("value-#{i}")}
74
97
  write_client.push_batch(batch)
75
98
  read_batch = read_client.take_batch
76
- sleep(0.1) # simulate some work?
77
- read_client.close_batch(batch)
99
+ sleep(0.1) # simulate some work for the `duration_in_millis`
100
+ # TODO: this interaction should be cleaned in an upcoming PR,
101
+ # This is what the current pipeline does.
102
+ read_client.add_filtered_metrics(read_batch)
103
+ read_client.add_output_metrics(read_batch)
104
+ read_client.close_batch(read_batch)
78
105
  store = collector.snapshot_metric.metric_store
79
106
 
80
- expect(store.size).to eq(4)
81
107
  expect(store.get_shallow(:events, :in).value).to eq(5)
108
+ expect(store.get_shallow(:events, :out).value).to eq(5)
109
+ expect(store.get_shallow(:events, :filtered).value).to eq(5)
82
110
  expect(store.get_shallow(:events, :duration_in_millis).value).to be > 0
83
111
  expect(store.get_shallow(:pipeline, :in).value).to eq(5)
84
112
  expect(store.get_shallow(:pipeline, :duration_in_millis).value).to be > 0
113
+ expect(store.get_shallow(:pipeline, :out).value).to eq(5)
114
+ expect(store.get_shallow(:pipeline, :filtered).value).to eq(5)
85
115
  end
86
116
  end
87
117
  end