logstash-core 5.0.0.alpha4.snapshot3-java → 5.0.0.alpha5.snapshot1-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of logstash-core might be problematic. Click here for more details.

Files changed (48) hide show
  1. checksums.yaml +4 -4
  2. data/lib/logstash/agent.rb +6 -2
  3. data/lib/logstash/api/app_helpers.rb +15 -0
  4. data/lib/logstash/api/command_factory.rb +3 -1
  5. data/lib/logstash/api/commands/base.rb +3 -5
  6. data/lib/logstash/api/commands/default_metadata.rb +27 -0
  7. data/lib/logstash/api/commands/hot_threads_reporter.rb +61 -0
  8. data/lib/logstash/api/commands/node.rb +9 -63
  9. data/lib/logstash/api/commands/stats.rb +5 -61
  10. data/lib/logstash/api/commands/system/basicinfo_command.rb +3 -6
  11. data/lib/logstash/api/modules/base.rb +3 -1
  12. data/lib/logstash/api/modules/node.rb +8 -18
  13. data/lib/logstash/api/modules/node_stats.rb +5 -41
  14. data/lib/logstash/api/modules/stats.rb +13 -33
  15. data/lib/logstash/build.rb +6 -0
  16. data/lib/logstash/environment.rb +9 -0
  17. data/lib/logstash/filter_delegator.rb +1 -1
  18. data/lib/logstash/instrument/metric.rb +7 -6
  19. data/lib/logstash/instrument/metric_type/base.rb +1 -4
  20. data/lib/logstash/instrument/namespaced_metric.rb +1 -1
  21. data/lib/logstash/instrument/null_metric.rb +6 -1
  22. data/lib/logstash/output_delegator.rb +2 -0
  23. data/lib/logstash/pipeline.rb +62 -93
  24. data/lib/logstash/pipeline_reporter.rb +14 -13
  25. data/lib/logstash/plugin.rb +8 -2
  26. data/lib/logstash/runner.rb +7 -1
  27. data/lib/logstash/settings.rb +17 -7
  28. data/lib/logstash/util/wrapped_synchronous_queue.rb +220 -0
  29. data/lib/logstash/version.rb +1 -1
  30. data/lib/logstash/webserver.rb +4 -0
  31. data/lib/logstash-core/version.rb +1 -1
  32. data/locales/en.yml +4 -0
  33. data/logstash-core.gemspec +2 -2
  34. data/spec/api/lib/api/node_spec.rb +0 -1
  35. data/spec/api/lib/api/node_stats_spec.rb +36 -34
  36. data/spec/api/lib/api/support/resource_dsl_methods.rb +15 -0
  37. data/spec/api/spec_helper.rb +5 -2
  38. data/spec/logstash/inputs/metrics_spec.rb +1 -1
  39. data/spec/logstash/instrument/metric_type/counter_spec.rb +1 -6
  40. data/spec/logstash/instrument/metric_type/gauge_spec.rb +1 -4
  41. data/spec/logstash/instrument/namespaced_metric_spec.rb +61 -2
  42. data/spec/logstash/instrument/null_metric_spec.rb +7 -9
  43. data/spec/logstash/pipeline_spec.rb +7 -7
  44. data/spec/logstash/plugin_spec.rb +73 -0
  45. data/spec/logstash/settings/string_spec.rb +21 -0
  46. data/spec/logstash/util/wrapped_synchronous_queue_spec.rb +70 -22
  47. data/spec/support/shared_examples.rb +98 -0
  48. metadata +11 -4
@@ -37,5 +37,225 @@ module LogStash; module Util
37
37
  def poll(millis)
38
38
  @queue.poll(millis, TimeUnit::MILLISECONDS)
39
39
  end
40
+
41
+ def write_client
42
+ WriteClient.new(self)
43
+ end
44
+
45
+ def read_client()
46
+ ReadClient.new(self)
47
+ end
48
+
49
+ class ReadClient
50
+ # We generally only want one thread at a time able to access pop/take/poll operations
51
+ # from this queue. We also depend on this to be able to block consumers while we snapshot
52
+ # in-flight buffers
53
+
54
+ def initialize(queue, batch_size = 125, wait_for = 5)
55
+ @queue = queue
56
+ @mutex = Mutex.new
57
+ # Note that @infilght_batches as a central mechanism for tracking inflight
58
+ # batches will fail if we have multiple read clients in the pipeline.
59
+ @inflight_batches = {}
60
+ @batch_size = batch_size
61
+ @wait_for = wait_for
62
+ end
63
+
64
+ def set_batch_dimensions(batch_size, wait_for)
65
+ @batch_size = batch_size
66
+ @wait_for = wait_for
67
+ end
68
+
69
+ def set_events_metric(metric)
70
+ @event_metric = metric
71
+ end
72
+
73
+ def set_pipeline_metric(metric)
74
+ @pipeline_metric = metric
75
+ end
76
+
77
+ def inflight_batches
78
+ @mutex.synchronize do
79
+ yield(@inflight_batches)
80
+ end
81
+ end
82
+
83
+ def current_inflight_batch
84
+ @inflight_batches.fetch(Thread.current, [])
85
+ end
86
+
87
+ def take_batch
88
+ @mutex.synchronize do
89
+ batch = ReadBatch.new(@queue, @batch_size, @wait_for)
90
+ add_starting_metrics(batch)
91
+ set_current_thread_inflight_batch(batch)
92
+ batch
93
+ end
94
+ end
95
+
96
+ def set_current_thread_inflight_batch(batch)
97
+ @inflight_batches[Thread.current] = batch
98
+ end
99
+
100
+ def close_batch(batch)
101
+ @mutex.synchronize do
102
+ @inflight_batches.delete(Thread.current)
103
+ end
104
+ end
105
+
106
+ def add_starting_metrics(batch)
107
+ return if @event_metric.nil? || @pipeline_metric.nil?
108
+ @event_metric.increment(:in, batch.starting_size)
109
+ @pipeline_metric.increment(:in, batch.starting_size)
110
+ end
111
+
112
+ def add_filtered_metrics(batch)
113
+ @event_metric.increment(:filtered, batch.filtered_size)
114
+ @pipeline_metric.increment(:filtered, batch.filtered_size)
115
+ end
116
+
117
+ def add_output_metrics(batch)
118
+ @event_metric.increment(:out, batch.filtered_size)
119
+ @pipeline_metric.increment(:out, batch.filtered_size)
120
+ end
121
+ end
122
+
123
+ class ReadBatch
124
+ def initialize(queue, size, wait)
125
+ @shutdown_signal_received = false
126
+ @flush_signal_received = false
127
+ @originals = Hash.new
128
+ @cancelled = Hash.new
129
+ @generated = Hash.new
130
+ @iterating_temp = Hash.new
131
+ @iterating = false # Atomic Boolean maybe? Although batches are not shared across threads
132
+ take_originals_from_queue(queue, size, wait)
133
+ end
134
+
135
+ def merge(event)
136
+ return if event.nil? || @originals.key?(event)
137
+ # take care not to cause @generated to change during iteration
138
+ # @iterating_temp is merged after the iteration
139
+ if iterating?
140
+ @iterating_temp[event] = true
141
+ else
142
+ # the periodic flush could generate events outside of an each iteration
143
+ @generated[event] = true
144
+ end
145
+ end
146
+
147
+ def cancel(event)
148
+ @cancelled[event] = true
149
+ end
150
+
151
+ def each(&blk)
152
+ # take care not to cause @originals or @generated to change during iteration
153
+ @iterating = true
154
+ @originals.each do |e, _|
155
+ blk.call(e) unless @cancelled.include?(e)
156
+ end
157
+ @generated.each do |e, _|
158
+ blk.call(e) unless @cancelled.include?(e)
159
+ end
160
+ @iterating = false
161
+ update_generated
162
+ end
163
+
164
+ def size
165
+ filtered_size
166
+ end
167
+
168
+ def starting_size
169
+ @originals.size
170
+ end
171
+
172
+ def filtered_size
173
+ @originals.size + @generated.size
174
+ end
175
+
176
+ def cancelled_size
177
+ @cancelled.size
178
+ end
179
+
180
+ def shutdown_signal_received?
181
+ @shutdown_signal_received
182
+ end
183
+
184
+ def flush_signal_received?
185
+ @flush_signal_received
186
+ end
187
+
188
+ private
189
+
190
+ def iterating?
191
+ @iterating
192
+ end
193
+
194
+ def update_generated
195
+ @generated.update(@iterating_temp)
196
+ @iterating_temp.clear
197
+ end
198
+
199
+ def take_originals_from_queue(queue, size, wait)
200
+ size.times do |t|
201
+ event = (t == 0) ? queue.take : queue.poll(wait)
202
+ if event.nil?
203
+ # queue poll timed out
204
+ next
205
+ elsif event == LogStash::SHUTDOWN
206
+ # We MUST break here. If a batch consumes two SHUTDOWN events
207
+ # then another worker may have its SHUTDOWN 'stolen', thus blocking
208
+ # the pipeline.
209
+ @shutdown_signal_received = true
210
+ break
211
+ elsif event == LogStash::FLUSH
212
+ # See comment above
213
+ # We should stop doing work after flush as well.
214
+ @flush_signal_received = true
215
+ break
216
+ else
217
+ @originals[event] = true
218
+ end
219
+ end
220
+ end
221
+ end
222
+
223
+ class WriteClient
224
+ def initialize(queue)
225
+ @queue = queue
226
+ end
227
+
228
+ def get_new_batch
229
+ WriteBatch.new
230
+ end
231
+
232
+ def push(event)
233
+ @queue.push(event)
234
+ end
235
+ alias_method(:<<, :push)
236
+
237
+ def push_batch(batch)
238
+ batch.each do |event|
239
+ push(event)
240
+ end
241
+ end
242
+ end
243
+
244
+ class WriteBatch
245
+ def initialize
246
+ @events = []
247
+ end
248
+
249
+ def push(event)
250
+ @events.push(event)
251
+ end
252
+ alias_method(:<<, :push)
253
+
254
+ def each(&blk)
255
+ @events.each do |e|
256
+ blk.call(e)
257
+ end
258
+ end
259
+ end
40
260
  end
41
261
  end end
@@ -11,4 +11,4 @@
11
11
  # eventually this file should be in the root logstash lib fir and dependencies in logstash-core should be
12
12
  # fixed.
13
13
 
14
- LOGSTASH_VERSION = "5.0.0-alpha4.snapshot3"
14
+ LOGSTASH_VERSION = "5.0.0.alpha5.snapshot1"
@@ -55,6 +55,10 @@ module LogStash
55
55
  def error(str)
56
56
  logger.error(str)
57
57
  end
58
+
59
+ def address
60
+ "#{http_host}:#{http_port}"
61
+ end
58
62
 
59
63
  # Empty method, this method is required because of the puma usage we make through
60
64
  # the Single interface, https://github.com/puma/puma/blob/master/lib/puma/single.rb#L82
@@ -5,4 +5,4 @@
5
5
  # Note to authors: this should not include dashes because 'gem' barfs if
6
6
  # you include a dash in the version string.
7
7
 
8
- LOGSTASH_CORE_VERSION = "5.0.0-alpha4.snapshot3"
8
+ LOGSTASH_CORE_VERSION = "5.0.0.alpha5.snapshot1"
data/locales/en.yml CHANGED
@@ -210,6 +210,10 @@ en:
210
210
  version: |+
211
211
  Emit the version of logstash and its friends,
212
212
  then exit.
213
+ datapath: |+
214
+ This should point to a writable directory. Logstash
215
+ will use this directory whenever it needs to store
216
+ data. Plugins will also have access to this path.
213
217
  pluginpath: |+
214
218
  A path of where to find plugins. This flag
215
219
  can be given multiple times to include
@@ -15,9 +15,9 @@ Gem::Specification.new do |gem|
15
15
  gem.test_files = gem.files.grep(%r{^(test|spec|features)/})
16
16
  gem.name = "logstash-core"
17
17
  gem.require_paths = ["lib"]
18
- gem.version = LOGSTASH_CORE_VERSION.gsub(/-/, '.')
18
+ gem.version = LOGSTASH_CORE_VERSION
19
19
 
20
- gem.add_runtime_dependency "logstash-core-event-java", "5.0.0.alpha4.snapshot3"
20
+ gem.add_runtime_dependency "logstash-core-event-java", "5.0.0.alpha5.snapshot1"
21
21
 
22
22
  gem.add_runtime_dependency "cabin", "~> 0.8.0" #(Apache 2.0 license)
23
23
  gem.add_runtime_dependency "pry", "~> 0.10.1" #(Ruby license)
@@ -85,7 +85,6 @@ describe LogStash::Api::Modules::Node do
85
85
  }
86
86
  },
87
87
  "hot_threads"=> {
88
- "hostname" => String,
89
88
  "time" => String,
90
89
  "busiest_threads" => Numeric,
91
90
  "threads" => Array
@@ -11,15 +11,38 @@ describe LogStash::Api::Modules::NodeStats do
11
11
 
12
12
  # DSL describing response structure
13
13
  root_structure = {
14
- "events"=>{
15
- "in"=>Numeric,
16
- "filtered"=>Numeric,
17
- "out"=>Numeric
18
- },
19
14
  "jvm"=>{
20
15
  "threads"=>{
21
16
  "count"=>Numeric,
22
17
  "peak_count"=>Numeric
18
+ },
19
+ "mem" => {
20
+ "heap_used_in_bytes" => Numeric,
21
+ "heap_used_percent" => Numeric,
22
+ "heap_committed_in_bytes" => Numeric,
23
+ "heap_max_in_bytes" => Numeric,
24
+ "non_heap_used_in_bytes" => Numeric,
25
+ "non_heap_committed_in_bytes" => Numeric,
26
+ "pools" => {
27
+ "survivor" => {
28
+ "peak_used_in_bytes" => Numeric,
29
+ "used_in_bytes" => Numeric,
30
+ "peak_max_in_bytes" => Numeric,
31
+ "max_in_bytes" => Numeric
32
+ },
33
+ "old" => {
34
+ "peak_used_in_bytes" => Numeric,
35
+ "used_in_bytes" => Numeric,
36
+ "peak_max_in_bytes" => Numeric,
37
+ "max_in_bytes" => Numeric
38
+ },
39
+ "young" => {
40
+ "peak_used_in_bytes" => Numeric,
41
+ "used_in_bytes" => Numeric,
42
+ "peak_max_in_bytes" => Numeric,
43
+ "max_in_bytes" => Numeric
44
+ }
45
+ }
23
46
  }
24
47
  },
25
48
  "process"=>{
@@ -33,35 +56,14 @@ describe LogStash::Api::Modules::NodeStats do
33
56
  "total_in_millis"=>Numeric,
34
57
  "percent"=>Numeric
35
58
  }
36
- },
37
- "mem" => {
38
- "heap_used_in_bytes" => Numeric,
39
- "heap_used_percent" => Numeric,
40
- "heap_committed_in_bytes" => Numeric,
41
- "heap_max_in_bytes" => Numeric,
42
- "non_heap_used_in_bytes" => Numeric,
43
- "non_heap_committed_in_bytes" => Numeric,
44
- "pools" => {
45
- "survivor" => {
46
- "peak_used_in_bytes" => Numeric,
47
- "used_in_bytes" => Numeric,
48
- "peak_max_in_bytes" => Numeric,
49
- "max_in_bytes" => Numeric
50
- },
51
- "old" => {
52
- "peak_used_in_bytes" => Numeric,
53
- "used_in_bytes" => Numeric,
54
- "peak_max_in_bytes" => Numeric,
55
- "max_in_bytes" => Numeric
56
- },
57
- "young" => {
58
- "peak_used_in_bytes" => Numeric,
59
- "used_in_bytes" => Numeric,
60
- "peak_max_in_bytes" => Numeric,
61
- "max_in_bytes" => Numeric
62
- }
63
- }
64
- }
59
+ },
60
+ "pipeline" => {
61
+ "events" => {
62
+ "in" => Numeric,
63
+ "filtered" => Numeric,
64
+ "out" => Numeric
65
+ }
66
+ }
65
67
  }
66
68
 
67
69
  test_api_and_resources(root_structure)
@@ -23,6 +23,21 @@ module ResourceDSLMethods
23
23
  it "should respond OK" do
24
24
  expect(last_response).to be_ok
25
25
  end
26
+
27
+
28
+ describe "the default metadata" do
29
+ it "should include the host" do
30
+ expect(payload["host"]).to eql(Socket.gethostname)
31
+ end
32
+
33
+ it "should include the version" do
34
+ expect(payload["version"]).to eql(LOGSTASH_CORE_VERSION)
35
+ end
36
+
37
+ it "should include the http address" do
38
+ expect(payload["http_address"]).to eql("#{Socket.gethostname}:#{::LogStash::WebServer::DEFAULT_PORT}")
39
+ end
40
+ end
26
41
 
27
42
  hash_to_mapping(expected).each do |resource_path,klass|
28
43
  dotted = resource_path.join(".")
@@ -1,6 +1,7 @@
1
1
  # encoding: utf-8
2
2
  API_ROOT = File.expand_path(File.join(File.dirname(__FILE__), "..", "..", "lib", "logstash", "api"))
3
3
 
4
+ require "stud/task"
4
5
  require "logstash/devutils/rspec/spec_helper"
5
6
  $LOAD_PATH.unshift(File.expand_path(File.dirname(__FILE__)))
6
7
  require "lib/api/support/resource_dsl_methods"
@@ -17,7 +18,9 @@ end
17
18
 
18
19
  module LogStash
19
20
  class DummyAgent < Agent
20
- def start_webserver; end
21
+ def start_webserver
22
+ @webserver = Struct.new(:address).new("#{Socket.gethostname}:#{::LogStash::WebServer::DEFAULT_PORT}")
23
+ end
21
24
  def stop_webserver; end
22
25
  end
23
26
  end
@@ -92,7 +95,7 @@ shared_context "api setup" do
92
95
  @runner = LogStashRunner.new
93
96
  @runner.start
94
97
  end
95
-
98
+
96
99
  after :all do
97
100
  @runner.stop
98
101
  end
@@ -8,7 +8,7 @@ describe LogStash::Inputs::Metrics do
8
8
  let(:queue) { [] }
9
9
 
10
10
  before :each do
11
- allow(subject).to receive(:metric).and_return(metric)
11
+ subject.metric = metric
12
12
  end
13
13
 
14
14
  describe "#run" do
@@ -28,12 +28,7 @@ describe LogStash::Instrument::MetricType::Counter do
28
28
 
29
29
  context "When creating a hash " do
30
30
  it "creates the hash from all the values" do
31
- metric_hash = {
32
- "key" => key,
33
- "namespaces" => namespaces,
34
- "value" => 0,
35
- "type" => "counter"
36
- }
31
+ metric_hash = { key => 0 }
37
32
  expect(subject.to_hash).to match(metric_hash)
38
33
  end
39
34
  end
@@ -29,10 +29,7 @@ describe LogStash::Instrument::MetricType::Gauge do
29
29
  context "When creating a hash " do
30
30
  it "creates the hash from all the values" do
31
31
  metric_hash = {
32
- "key" => key,
33
- "namespaces" => namespaces,
34
- "value" => value,
35
- "type" => "gauge"
32
+ key => value
36
33
  }
37
34
  expect(subject.to_hash).to match(metric_hash)
38
35
  end
@@ -2,10 +2,11 @@
2
2
  require "logstash/instrument/namespaced_metric"
3
3
  require "logstash/instrument/metric"
4
4
  require_relative "../../support/matchers"
5
+ require_relative "../../support/shared_examples"
5
6
  require "spec_helper"
6
7
 
7
8
  describe LogStash::Instrument::NamespacedMetric do
8
- let(:namespace) { :stats }
9
+ let(:namespace) { :root }
9
10
  let(:collector) { [] }
10
11
  let(:metric) { LogStash::Instrument::Metric.new(collector) }
11
12
 
@@ -27,6 +28,64 @@ describe LogStash::Instrument::NamespacedMetric do
27
28
  new_namespace = subject.namespace(:wally)
28
29
 
29
30
  expect(subject.namespace_name).to eq([namespace])
30
- expect(new_namespace.namespace_name).to eq([:stats, :wally])
31
+ expect(new_namespace.namespace_name).to eq([:root, :wally])
31
32
  end
33
+
34
+ context "#increment" do
35
+ it "a counter by 1" do
36
+ metric = subject.increment(:error_rate)
37
+ expect(collector).to be_a_metric_event([:root, :error_rate], :counter, :increment, 1)
38
+ end
39
+
40
+ it "a counter by a provided value" do
41
+ metric = subject.increment(:error_rate, 20)
42
+ expect(collector).to be_a_metric_event([:root, :error_rate], :counter, :increment, 20)
43
+ end
44
+ end
45
+
46
+ context "#decrement" do
47
+ it "a counter by 1" do
48
+ metric = subject.decrement(:error_rate)
49
+ expect(collector).to be_a_metric_event([:root, :error_rate], :counter, :decrement, 1)
50
+ end
51
+
52
+ it "a counter by a provided value" do
53
+ metric = subject.decrement(:error_rate, 20)
54
+ expect(collector).to be_a_metric_event([:root, :error_rate], :counter, :decrement, 20)
55
+ end
56
+ end
57
+
58
+ context "#gauge" do
59
+ it "set the value of a key" do
60
+ metric = subject.gauge(:size_queue, 20)
61
+ expect(collector).to be_a_metric_event([:root, :size_queue], :gauge, :set, 20)
62
+ end
63
+ end
64
+
65
+ context "#time" do
66
+ let(:sleep_time) { 2 }
67
+ let(:sleep_time_ms) { sleep_time * 1_000 }
68
+
69
+ it "records the duration" do
70
+ subject.time(:duration_ms) { sleep(sleep_time) }
71
+
72
+ expect(collector.last).to be_within(sleep_time_ms).of(sleep_time_ms + 5)
73
+ expect(collector[0]).to match([:root])
74
+ expect(collector[1]).to be(:duration_ms)
75
+ expect(collector[2]).to be(:counter)
76
+ end
77
+
78
+ it "return a TimedExecution" do
79
+ execution = subject.time(:duration_ms)
80
+ sleep(sleep_time)
81
+ execution.stop
82
+
83
+ expect(collector.last).to be_within(sleep_time_ms).of(sleep_time_ms + 0.1)
84
+ expect(collector[0]).to match([:root])
85
+ expect(collector[1]).to be(:duration_ms)
86
+ expect(collector[2]).to be(:counter)
87
+ end
88
+ end
89
+
90
+ include_examples "metrics commons operations"
32
91
  end
@@ -1,21 +1,19 @@
1
1
  # encoding: utf-8
2
2
  require "logstash/instrument/null_metric"
3
3
  require "logstash/instrument/namespaced_metric"
4
- require_relative "../../support/matchers"
4
+ require_relative "../../support/shared_examples"
5
5
 
6
6
  describe LogStash::Instrument::NullMetric do
7
+ # This is defined in the `namespaced_metric_spec`
8
+ include_examples "metrics commons operations"
9
+
7
10
  it "defines the same interface as `Metric`" do
8
11
  expect(described_class).to implement_interface_of(LogStash::Instrument::NamespacedMetric)
9
12
  end
10
13
 
11
- describe "#time" do
12
- it "returns the value of the block without recording any metrics" do
13
- expect(subject.time(:execution_time) { "hello" }).to eq("hello")
14
- end
15
-
16
- it "return a TimedExecution" do
17
- execution = subject.time(:do_something)
18
- expect { execution.stop }.not_to raise_error
14
+ describe "#namespace" do
15
+ it "return a NullMetric" do
16
+ expect(subject.namespace(key)).to be_kind_of LogStash::Instrument::NullMetric
19
17
  end
20
18
  end
21
19
  end
@@ -360,7 +360,6 @@ describe LogStash::Pipeline do
360
360
  end
361
361
 
362
362
  context "compiled filter funtions" do
363
-
364
363
  context "new events should propagate down the filters" do
365
364
  config <<-CONFIG
366
365
  filter {
@@ -466,11 +465,12 @@ describe LogStash::Pipeline do
466
465
  pipeline = LogStash::Pipeline.new(config, pipeline_settings_obj)
467
466
  Thread.new { pipeline.run }
468
467
  sleep 0.1 while !pipeline.ready?
469
- # give us a bit of time to flush the events
470
468
  wait(5).for do
471
- next unless output && output.events && !(event = output.events.pop).nil?
472
- event.get("message").split("\n").count
473
- end.to eq(number_of_events)
469
+ # give us a bit of time to flush the events
470
+ output.events.empty?
471
+ end.to be_falsey
472
+ event = output.events.pop
473
+ expect(event.get("message").count("\n")).to eq(99)
474
474
  pipeline.shutdown
475
475
  end
476
476
  end
@@ -604,7 +604,7 @@ describe LogStash::Pipeline do
604
604
 
605
605
  Thread.new { subject.run }
606
606
  # make sure we have received all the generated events
607
- sleep 1 while dummyoutput.events.size < number_of_events
607
+ sleep 0.25 while dummyoutput.events.size < number_of_events
608
608
  end
609
609
 
610
610
  after :each do
@@ -614,7 +614,7 @@ describe LogStash::Pipeline do
614
614
  context "global metric" do
615
615
  let(:collected_metric) { metric_store.get_with_path("stats/events") }
616
616
 
617
- it "populates the differents" do
617
+ it "populates the different metrics" do
618
618
  expect(collected_metric[:stats][:events][:in].value).to eq(number_of_events)
619
619
  expect(collected_metric[:stats][:events][:filtered].value).to eq(number_of_events)
620
620
  expect(collected_metric[:stats][:events][:out].value).to eq(number_of_events)
@@ -270,4 +270,77 @@ describe LogStash::Plugin do
270
270
  end
271
271
  end
272
272
  end
273
+
274
+
275
+ context "When the plugin record a metric" do
276
+ let(:config) { {} }
277
+
278
+ [LogStash::Inputs::Base, LogStash::Filters::Base, LogStash::Outputs::Base].each do |base|
279
+ let(:plugin) do
280
+ Class.new(base) do
281
+ config_name "testing"
282
+
283
+ def register
284
+ metric.gauge("power_level", 9000)
285
+ end
286
+ end
287
+ end
288
+
289
+ subject { plugin.new(config) }
290
+
291
+ context "when no metric is set to the plugin" do
292
+ context "when `enable_metric` is TRUE" do
293
+ it "recording metric should not raise an exception" do
294
+ expect { subject.register }.not_to raise_error
295
+ end
296
+
297
+ it "should use a `NullMetric`" do
298
+ expect(subject.metric).to be_kind_of(LogStash::Instrument::NullMetric)
299
+ end
300
+ end
301
+
302
+ context "when `enable_metric` is FALSE" do
303
+ let(:config) { { "enable_metric" => false } }
304
+
305
+ it "recording metric should not raise an exception" do
306
+ expect { subject.register }.not_to raise_error
307
+ end
308
+
309
+ it "should use a `NullMetric`" do
310
+ expect(subject.metric).to be_kind_of(LogStash::Instrument::NullMetric)
311
+ end
312
+ end
313
+ end
314
+
315
+ context "When a specific metric collector is configured" do
316
+ context "when `enable_metric` is TRUE" do
317
+ let(:metric) { LogStash::Instrument::Metric.new(LogStash::Instrument::Collector.new).namespace("dbz") }
318
+
319
+ before :each do
320
+ subject.metric = metric
321
+ end
322
+
323
+ it "recording metric should not raise an exception" do
324
+ expect { subject.register }.not_to raise_error
325
+ end
326
+
327
+ it "should use the configured metric" do
328
+ expect(subject.metric).to eq(metric)
329
+ end
330
+ end
331
+
332
+ context "when `enable_metric` is FALSE" do
333
+ let(:config) { { "enable_metric" => false } }
334
+
335
+ it "recording metric should not raise an exception" do
336
+ expect { subject.register }.not_to raise_error
337
+ end
338
+
339
+ it "should use a `NullMetric`" do
340
+ expect(subject.metric).to be_kind_of(LogStash::Instrument::NullMetric)
341
+ end
342
+ end
343
+ end
344
+ end
345
+ end
273
346
  end