logstash-core 6.0.0.beta1-java → 6.0.0.beta2-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. checksums.yaml +4 -4
  2. data/lib/logstash-core/logstash-core.jar +0 -0
  3. data/lib/logstash-core/version.rb +1 -1
  4. data/lib/logstash/agent.rb +0 -16
  5. data/lib/logstash/compiler/lscl.rb +2 -53
  6. data/lib/logstash/compiler/lscl/helpers.rb +55 -0
  7. data/lib/logstash/config/config_ast.rb +6 -3
  8. data/lib/logstash/config/modules_common.rb +4 -1
  9. data/lib/logstash/elasticsearch_client.rb +4 -1
  10. data/lib/logstash/environment.rb +8 -2
  11. data/lib/logstash/filter_delegator.rb +11 -6
  12. data/lib/logstash/instrument/collector.rb +7 -5
  13. data/lib/logstash/instrument/metric_store.rb +6 -9
  14. data/lib/logstash/instrument/namespaced_metric.rb +4 -0
  15. data/lib/logstash/instrument/namespaced_null_metric.rb +4 -0
  16. data/lib/logstash/instrument/null_metric.rb +10 -0
  17. data/lib/logstash/instrument/wrapped_write_client.rb +33 -24
  18. data/lib/logstash/modules/kibana_client.rb +5 -3
  19. data/lib/logstash/modules/kibana_config.rb +1 -4
  20. data/lib/logstash/modules/scaffold.rb +2 -0
  21. data/lib/logstash/modules/settings_merger.rb +52 -4
  22. data/lib/logstash/output_delegator.rb +7 -5
  23. data/lib/logstash/pipeline.rb +37 -14
  24. data/lib/logstash/pipeline_settings.rb +2 -0
  25. data/lib/logstash/runner.rb +14 -2
  26. data/lib/logstash/settings.rb +26 -0
  27. data/lib/logstash/util/cloud_setting_auth.rb +29 -0
  28. data/lib/logstash/util/cloud_setting_id.rb +41 -0
  29. data/lib/logstash/util/modules_setting_array.rb +28 -0
  30. data/lib/logstash/util/wrapped_acked_queue.rb +5 -6
  31. data/lib/logstash/util/wrapped_synchronous_queue.rb +14 -9
  32. data/lib/logstash/version.rb +1 -1
  33. data/locales/en.yml +16 -0
  34. data/spec/logstash/agent/converge_spec.rb +6 -7
  35. data/spec/logstash/config/source/multi_local_spec.rb +11 -0
  36. data/spec/logstash/filter_delegator_spec.rb +20 -8
  37. data/spec/logstash/legacy_ruby_event_spec.rb +4 -4
  38. data/spec/logstash/modules/scaffold_spec.rb +2 -7
  39. data/spec/logstash/modules/settings_merger_spec.rb +111 -0
  40. data/spec/logstash/output_delegator_spec.rb +15 -5
  41. data/spec/logstash/pipeline_spec.rb +39 -7
  42. data/spec/logstash/runner_spec.rb +4 -1
  43. data/spec/logstash/settings/modules_spec.rb +115 -0
  44. metadata +10 -2
@@ -205,19 +205,18 @@ module LogStash; module Util
205
205
  end
206
206
 
207
207
  def start_clock
208
- @inflight_clocks[Thread.current] = [
209
- @event_metric.time(:duration_in_millis),
210
- @pipeline_metric.time(:duration_in_millis)
211
- ]
208
+ @inflight_clocks[Thread.current] = java.lang.System.current_time_millis
212
209
  end
213
210
 
214
211
  def stop_clock(batch)
215
212
  unless @inflight_clocks[Thread.current].nil?
216
213
  if batch.size > 0
217
- # onl/y stop (which also records) the metrics if the batch is non-empty.
214
+ # only stop (which also records) the metrics if the batch is non-empty.
218
215
  # start_clock is now called at empty batch creation and an empty batch could
219
216
  # stay empty all the way down to the close_batch call.
220
- @inflight_clocks[Thread.current].each(&:stop)
217
+ time_taken = java.lang.System.current_time_millis - @inflight_clocks[Thread.current]
218
+ @event_metric.report_time(:duration_in_millis, time_taken)
219
+ @pipeline_metric.report_time(:duration_in_millis, time_taken)
221
220
  end
222
221
  @inflight_clocks.delete(Thread.current)
223
222
  end
@@ -68,11 +68,17 @@ module LogStash; module Util
68
68
 
69
69
  def set_events_metric(metric)
70
70
  @event_metric = metric
71
+ @event_metric_out = @event_metric.counter(:out)
72
+ @event_metric_filtered = @event_metric.counter(:filtered)
73
+ @event_metric_time = @event_metric.counter(:duration_in_millis)
71
74
  define_initial_metrics_values(@event_metric)
72
75
  end
73
76
 
74
77
  def set_pipeline_metric(metric)
75
78
  @pipeline_metric = metric
79
+ @pipeline_metric_out = @pipeline_metric.counter(:out)
80
+ @pipeline_metric_filtered = @pipeline_metric.counter(:filtered)
81
+ @pipeline_metric_time = @pipeline_metric.counter(:duration_in_millis)
76
82
  define_initial_metrics_values(@pipeline_metric)
77
83
  end
78
84
 
@@ -140,10 +146,7 @@ module LogStash; module Util
140
146
  end
141
147
 
142
148
  def start_clock
143
- @inflight_clocks[Thread.current] = [
144
- @event_metric.time(:duration_in_millis),
145
- @pipeline_metric.time(:duration_in_millis)
146
- ]
149
+ @inflight_clocks[Thread.current] = java.lang.System.current_time_millis
147
150
  end
148
151
 
149
152
  def stop_clock(batch)
@@ -152,20 +155,22 @@ module LogStash; module Util
152
155
  # only stop (which also records) the metrics if the batch is non-empty.
153
156
  # start_clock is now called at empty batch creation and an empty batch could
154
157
  # stay empty all the way down to the close_batch call.
155
- @inflight_clocks[Thread.current].each(&:stop)
158
+ time_taken = java.lang.System.current_time_millis - @inflight_clocks[Thread.current]
159
+ @event_metric_time.increment(time_taken)
160
+ @pipeline_metric_time.increment(time_taken)
156
161
  end
157
162
  @inflight_clocks.delete(Thread.current)
158
163
  end
159
164
  end
160
165
 
161
166
  def add_filtered_metrics(batch)
162
- @event_metric.increment(:filtered, batch.filtered_size)
163
- @pipeline_metric.increment(:filtered, batch.filtered_size)
167
+ @event_metric_filtered.increment(batch.filtered_size)
168
+ @pipeline_metric_filtered.increment(batch.filtered_size)
164
169
  end
165
170
 
166
171
  def add_output_metrics(batch)
167
- @event_metric.increment(:out, batch.filtered_size)
168
- @pipeline_metric.increment(:out, batch.filtered_size)
172
+ @event_metric_out.increment(batch.filtered_size)
173
+ @pipeline_metric_out.increment(batch.filtered_size)
169
174
  end
170
175
  end
171
176
 
@@ -11,4 +11,4 @@
11
11
  # eventually this file should be in the root logstash lib fir and dependencies in logstash-core should be
12
12
  # fixed.
13
13
 
14
- LOGSTASH_VERSION = "6.0.0-beta1"
14
+ LOGSTASH_VERSION = "6.0.0-beta2"
@@ -242,6 +242,22 @@ en:
242
242
  '-M "MODULE_NAME.var.PLUGIN_TYPE.PLUGIN_NAME.VARIABLE_NAME=VALUE"'
243
243
  as in
244
244
  '-M "example.var.filter.mutate.fieldname=fieldvalue"'
245
+ modules_setup: |+
246
+ Load index template into Elasticsearch, and saved searches,
247
+ index-pattern, visualizations, and dashboards into Kibana when
248
+ running modules.
249
+ cloud_id: |+
250
+ Sets the elasticsearch and kibana host settings for
251
+ module connections in Elastic Cloud.
252
+ Your Elastic Cloud User interface or the Cloud support
253
+ team should provide this.
254
+ Add an optional label prefix '<label>:' to help you
255
+ identify multiple cloud.ids.
256
+ e.g. 'staging:dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyRub3RhcmVhbCRpZGVudGlmaWVy'
257
+ cloud_auth: |+
258
+ Sets the elasticsearch and kibana username and password
259
+ for module connections in Elastic Cloud
260
+ e.g. 'username:<password>'
245
261
  configtest: |+
246
262
  Check configuration for valid syntax and then exit.
247
263
  http_host: Web API binding host
@@ -74,19 +74,18 @@ describe LogStash::Agent do
74
74
  describe "#running_user_defined_pipelines" do
75
75
  it "returns the user defined pipelines" do
76
76
  start_agent(subject)
77
- subject.with_running_user_defined_pipelines do |pipelines|
78
- expect(pipelines).to include(:main)
79
- expect(pipelines).not_to include(:system_pipeline)
80
- end
81
- subject.shutdown
77
+ wait_for do
78
+ subject.with_running_user_defined_pipelines {|pipelines| pipelines.keys }
79
+ end.to eq([:main])
82
80
  end
83
81
  end
84
82
 
85
83
  describe "#running_user_defined_pipelines?" do
86
84
  it "returns true" do
87
85
  start_agent(subject)
88
- expect(subject.running_user_defined_pipelines?).to be_truthy
89
- subject.shutdown
86
+ wait_for do
87
+ subject.running_user_defined_pipelines?
88
+ end.to be_truthy
90
89
  end
91
90
  end
92
91
  end
@@ -155,5 +155,16 @@ describe LogStash::Config::Source::MultiLocal do
155
155
  expect { subject.pipeline_configs }.to raise_error(ArgumentError)
156
156
  end
157
157
  end
158
+
159
+ context 'using dead letter queue settings' do
160
+ let(:retrieved_pipelines) do [
161
+ { "pipeline.id" => "main", "path.dead_letter_queue" => "/tmp", "dead_letter_queue.max_bytes" => 10000 },
162
+ ]
163
+ end
164
+ it "should not raise an error" do
165
+ expect { subject.pipeline_configs }.not_to raise_error(ArgumentError)
166
+ end
167
+
168
+ end
158
169
  end
159
170
  end
@@ -7,20 +7,32 @@ require "logstash/execution_context"
7
7
  require "support/shared_contexts"
8
8
 
9
9
  describe LogStash::FilterDelegator do
10
+
11
+ class MockGauge
12
+ def increment(_)
13
+ end
14
+ end
15
+
10
16
  include_context "execution_context"
11
-
17
+
12
18
  let(:logger) { double(:logger) }
13
19
  let(:filter_id) { "my-filter" }
14
20
  let(:config) do
15
21
  { "host" => "127.0.0.1", "id" => filter_id }
16
22
  end
17
23
  let(:collector) { [] }
24
+ let(:counter_in) { MockGauge.new }
25
+ let(:counter_out) { MockGauge.new }
26
+ let(:counter_time) { MockGauge.new }
18
27
  let(:metric) { LogStash::Instrument::NamespacedNullMetric.new(collector, :null) }
19
28
  let(:events) { [LogStash::Event.new, LogStash::Event.new] }
20
29
 
21
30
  before :each do
22
31
  allow(pipeline).to receive(:id).and_return(pipeline_id)
23
32
  allow(metric).to receive(:namespace).with(anything).and_return(metric)
33
+ allow(metric).to receive(:counter).with(:in).and_return(counter_in)
34
+ allow(metric).to receive(:counter).with(:out).and_return(counter_out)
35
+ allow(metric).to receive(:counter).with(:duration_in_millis).and_return(counter_time)
24
36
  end
25
37
 
26
38
  let(:plugin_klass) do
@@ -60,7 +72,7 @@ describe LogStash::FilterDelegator do
60
72
  context "when the flush return events" do
61
73
  it "increments the out" do
62
74
  subject.multi_filter([LogStash::Event.new])
63
- expect(metric).to receive(:increment).with(:out, 1)
75
+ expect(counter_out).to receive(:increment).with(1)
64
76
  subject.flush({})
65
77
  end
66
78
  end
@@ -78,12 +90,12 @@ describe LogStash::FilterDelegator do
78
90
  end
79
91
 
80
92
  it "has incremented :in" do
81
- expect(metric).to receive(:increment).with(:in, events.size)
93
+ expect(counter_in).to receive(:increment).with(events.size)
82
94
  subject.multi_filter(events)
83
95
  end
84
96
 
85
97
  it "has not incremented :out" do
86
- expect(metric).not_to receive(:increment).with(:out, anything)
98
+ expect(counter_out).not_to receive(:increment).with(anything)
87
99
  subject.multi_filter(events)
88
100
  end
89
101
  end
@@ -109,8 +121,8 @@ describe LogStash::FilterDelegator do
109
121
  end
110
122
 
111
123
  it "increments the in/out of the metric" do
112
- expect(metric).to receive(:increment).with(:in, events.size)
113
- expect(metric).to receive(:increment).with(:out, events.size * 2)
124
+ expect(counter_in).to receive(:increment).with(events.size)
125
+ expect(counter_out).to receive(:increment).with(events.size * 2)
114
126
 
115
127
  subject.multi_filter(events)
116
128
  end
@@ -138,8 +150,8 @@ describe LogStash::FilterDelegator do
138
150
  end
139
151
 
140
152
  it "increments the in/out of the metric" do
141
- expect(metric).to receive(:increment).with(:in, events.size)
142
- expect(metric).to receive(:increment).with(:out, events.size)
153
+ expect(counter_in).to receive(:increment).with(events.size)
154
+ expect(counter_out).to receive(:increment).with(events.size)
143
155
 
144
156
  subject.multi_filter(events)
145
157
  end
@@ -399,17 +399,17 @@ describe LogStash::Event do
399
399
  end
400
400
 
401
401
  it "should assign current time when no timestamp" do
402
- expect(LogStash::Event.new({}).timestamp.to_i).to be_within(1).of (Time.now.to_i)
402
+ expect(LogStash::Event.new({}).timestamp.to_i).to be_within(2).of (Time.now.to_i)
403
403
  end
404
404
 
405
405
  it "should tag for invalid value" do
406
406
  event = LogStash::Event.new("@timestamp" => "foo")
407
- expect(event.timestamp.to_i).to be_within(1).of Time.now.to_i
407
+ expect(event.timestamp.to_i).to be_within(2).of Time.now.to_i
408
408
  expect(event.get("tags")).to eq([LogStash::Event::TIMESTAMP_FAILURE_TAG])
409
409
  expect(event.get(LogStash::Event::TIMESTAMP_FAILURE_FIELD)).to eq("foo")
410
410
 
411
411
  event = LogStash::Event.new("@timestamp" => 666)
412
- expect(event.timestamp.to_i).to be_within(1).of Time.now.to_i
412
+ expect(event.timestamp.to_i).to be_within(2).of Time.now.to_i
413
413
  expect(event.get("tags")).to eq([LogStash::Event::TIMESTAMP_FAILURE_TAG])
414
414
  expect(event.get(LogStash::Event::TIMESTAMP_FAILURE_FIELD)).to eq(666)
415
415
  end
@@ -421,7 +421,7 @@ describe LogStash::Event do
421
421
 
422
422
  it "should tag for invalid string format" do
423
423
  event = LogStash::Event.new("@timestamp" => "foo")
424
- expect(event.timestamp.to_i).to be_within(1).of Time.now.to_i
424
+ expect(event.timestamp.to_i).to be_within(2).of Time.now.to_i
425
425
  expect(event.get("tags")).to eq([LogStash::Event::TIMESTAMP_FAILURE_TAG])
426
426
  expect(event.get(LogStash::Event::TIMESTAMP_FAILURE_FIELD)).to eq("foo")
427
427
  end
@@ -124,18 +124,13 @@ ERB
124
124
  expect(resource2).to be_a(LogStash::Modules::KibanaDashboards)
125
125
  expect(resource1.import_path).to eq("api/kibana/settings")
126
126
  expect(resource1.content).to be_a(Array)
127
- expect(resource1.content.size).to eq(2)
127
+ expect(resource1.content.size).to eq(1)
128
128
 
129
129
  test_object = resource1.content[0]
130
130
  expect(test_object).to be_a(LogStash::Modules::KibanaSettings::Setting)
131
131
  expect(test_object.name).to eq("defaultIndex")
132
132
  expect(test_object.value).to eq("foo-*")
133
133
 
134
- test_object = resource1.content[1]
135
- expect(test_object).to be_a(LogStash::Modules::KibanaSettings::Setting)
136
- expect(test_object.name).to eq("metrics:max_buckets")
137
- expect(test_object.value).to eq(86400)
138
-
139
134
  expect(resource2.import_path).to eq("api/kibana/dashboards/import")
140
135
  expect(resource2.content).to be_a(Array)
141
136
  expect(resource2.content.size).to eq(5)
@@ -207,7 +202,7 @@ ERB
207
202
  test_module.with_settings(module_settings)
208
203
  test_module.import(LogStash::Modules::ElasticsearchImporter.new(client), LogStash::Modules::KibanaImporter.new(kbnclient))
209
204
  expect(paths).to eq(expected_paths)
210
- expect(contents[0]).to eq({"changes"=>{"defaultIndex"=>"tester-*", "metrics:max_buckets"=>"86400"}})
205
+ expect(contents[0]).to eq({"changes"=>{"defaultIndex"=>"tester-*"}})
211
206
  second_kbn_post = contents[1]
212
207
  expect(second_kbn_post[:version]).to eq("9.8.7-6")
213
208
  expect(second_kbn_post[:objects]).to be_a(Array)
@@ -0,0 +1,111 @@
1
+ # encoding: utf-8
2
+ require "spec_helper"
3
+ require "logstash/util/cloud_setting_id"
4
+ require "logstash/util/cloud_setting_auth"
5
+ require "logstash/modules/settings_merger"
6
+ require "logstash/util/password"
7
+
8
+ class SubstituteSettingsForRSpec
9
+ def initialize(hash = {}) @hash = hash; end
10
+ def put(key, value) @hash[key] = value; end
11
+ def get(key) @hash[key]; end
12
+ end
13
+
14
+ describe LogStash::Modules::SettingsMerger do
15
+ describe "#merge" do
16
+ let(:cli) {[{"name"=>"mod1", "var.input.tcp.port"=>"3333"}, {"name"=>"mod2"}]}
17
+ let(:yml) {[{"name"=>"mod1", "var.input.tcp.port"=>2222, "var.kibana.username"=>"rupert", "var.kibana.password"=>"fotherington"}, {"name"=>"mod3", "var.input.tcp.port"=>4445}]}
18
+ subject(:results) { described_class.merge(cli, yml) }
19
+ it "merges cli overwriting any common fields in yml" do
20
+ expect(results).to be_a(Array)
21
+ expect(results.size).to eq(3)
22
+ expect(results[0]["name"]).to eq("mod1")
23
+ expect(results[0]["var.input.tcp.port"]).to eq("3333")
24
+ expect(results[0]["var.kibana.username"]).to eq("rupert")
25
+ expect(results[1]["name"]).to eq("mod2")
26
+ expect(results[2]["name"]).to eq("mod3")
27
+ expect(results[2]["var.input.tcp.port"]).to eq(4445)
28
+ end
29
+ end
30
+
31
+ describe "#merge_cloud_settings" do
32
+ let(:cloud_id) { LogStash::Util::CloudSettingId.new("label:dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyRub3RhcmVhbCRpZGVudGlmaWVy") }
33
+ let(:cloud_auth) { LogStash::Util::CloudSettingAuth.new("elastix:bigwhoppingfairytail") }
34
+ let(:mod_settings) { {} }
35
+
36
+ context "when both are supplied" do
37
+ let(:expected_table) do
38
+ {
39
+ "var.kibana.scheme" => "https",
40
+ "var.kibana.host" => "identifier.us-east-1.aws.found.io:443",
41
+ "var.elasticsearch.hosts" => "notareal.us-east-1.aws.found.io:443",
42
+ "var.elasticsearch.username" => "elastix",
43
+ "var.kibana.username" => "elastix"
44
+ }
45
+ end
46
+ let(:ls_settings) { SubstituteSettingsForRSpec.new({"cloud.id" => cloud_id, "cloud.auth" => cloud_auth}) }
47
+
48
+ before do
49
+ described_class.merge_cloud_settings(mod_settings, ls_settings)
50
+ end
51
+
52
+ it "adds entries to module settings" do
53
+ expected_table.each do |key, expected|
54
+ expect(mod_settings[key]).to eq(expected)
55
+ end
56
+ expect(mod_settings["var.elasticsearch.password"].value).to eq("bigwhoppingfairytail")
57
+ expect(mod_settings["var.kibana.password"].value).to eq("bigwhoppingfairytail")
58
+ end
59
+ end
60
+
61
+ context "when cloud.id is supplied" do
62
+ let(:expected_table) do
63
+ {
64
+ "var.kibana.scheme" => "https",
65
+ "var.kibana.host" => "identifier.us-east-1.aws.found.io:443",
66
+ "var.elasticsearch.hosts" => "notareal.us-east-1.aws.found.io:443",
67
+ }
68
+ end
69
+ let(:ls_settings) { SubstituteSettingsForRSpec.new({"cloud.id" => cloud_id}) }
70
+
71
+ before do
72
+ described_class.merge_cloud_settings(mod_settings, ls_settings)
73
+ end
74
+
75
+ it "adds entries to module settings" do
76
+ expected_table.each do |key, expected|
77
+ expect(mod_settings[key]).to eq(expected)
78
+ end
79
+ end
80
+ end
81
+
82
+ context "when only cloud.auth is supplied" do
83
+ let(:ls_settings) { SubstituteSettingsForRSpec.new({"cloud.auth" => cloud_auth}) }
84
+ it "should raise an error" do
85
+ expect{ described_class.merge_cloud_settings(mod_settings, ls_settings) }.to raise_exception(ArgumentError)
86
+ end
87
+ end
88
+
89
+ context "when neither cloud.id nor cloud.auth is supplied" do
90
+ let(:ls_settings) { SubstituteSettingsForRSpec.new() }
91
+ it "should do nothing" do
92
+ expect(mod_settings).to be_empty
93
+ end
94
+ end
95
+ end
96
+
97
+ describe "#format_module_settings" do
98
+ let(:before_hash) { {"foo" => "red", "bar" => "blue", "qux" => "pink"} }
99
+ let(:after_hash) { {"foo" => "red", "bar" => "steel-blue", "baz" => LogStash::Util::Password.new("cyan"), "qux" => nil} }
100
+ subject(:results) { described_class.format_module_settings(before_hash, after_hash) }
101
+ it "yields an array of formatted lines for ease of logging" do
102
+ expect(results.size).to eq(after_hash.size + 2)
103
+ expect(results.first).to eq("-------- Module Settings ---------")
104
+ expect(results.last).to eq("-------- Module Settings ---------")
105
+ expect(results[1]).to eq("foo: 'red'")
106
+ expect(results[2]).to eq("bar: 'steel-blue', was: 'blue'")
107
+ expect(results[3]).to eq("baz: '<password>', was: ''")
108
+ expect(results[4]).to eq("qux: '', was: 'pink'")
109
+ end
110
+ end
111
+ end
@@ -5,10 +5,19 @@ require "spec_helper"
5
5
  require "support/shared_contexts"
6
6
 
7
7
  describe LogStash::OutputDelegator do
8
+
9
+ class MockGauge
10
+ def increment(_)
11
+ end
12
+ end
13
+
8
14
  let(:logger) { double("logger") }
9
15
  let(:events) { 7.times.map { LogStash::Event.new }}
10
16
  let(:plugin_args) { {"id" => "foo", "arg1" => "val1"} }
11
17
  let(:collector) { [] }
18
+ let(:counter_in) { MockGauge.new }
19
+ let(:counter_out) { MockGauge.new }
20
+ let(:counter_time) { MockGauge.new }
12
21
  let(:metric) { LogStash::Instrument::NamespacedNullMetric.new(collector, :null) }
13
22
 
14
23
  include_context "execution_context"
@@ -23,6 +32,9 @@ describe LogStash::OutputDelegator do
23
32
  before(:each) do
24
33
  # use the same metric instance
25
34
  allow(metric).to receive(:namespace).with(any_args).and_return(metric)
35
+ allow(metric).to receive(:counter).with(:in).and_return(counter_in)
36
+ allow(metric).to receive(:counter).with(:out).and_return(counter_out)
37
+ allow(metric).to receive(:counter).with(:duration_in_millis).and_return(counter_time)
26
38
 
27
39
  allow(out_klass).to receive(:new).with(any_args).and_return(out_inst)
28
40
  allow(out_klass).to receive(:name).and_return("example")
@@ -58,15 +70,13 @@ describe LogStash::OutputDelegator do
58
70
  end
59
71
 
60
72
  it "should increment the number of events received" do
61
- expect(subject.metric_events).to receive(:increment).with(:in, events.length)
62
- expect(subject.metric_events).to receive(:increment).with(:out, events.length)
73
+ expect(counter_in).to receive(:increment).with(events.length)
74
+ expect(counter_out).to receive(:increment).with(events.length)
63
75
  subject.multi_receive(events)
64
76
  end
65
77
 
66
78
  it "should record the `duration_in_millis`" do
67
- clock = spy("clock")
68
- expect(subject.metric_events).to receive(:time).with(:duration_in_millis).and_return(clock)
69
- expect(clock).to receive(:stop)
79
+ expect(counter_time).to receive(:increment).with(Integer)
70
80
  subject.multi_receive(events)
71
81
  end
72
82
  end