logstash-input-azure_event_hubs 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +4 -0
- data/CONTRIBUTORS +11 -0
- data/Gemfile +14 -0
- data/LICENSE +201 -0
- data/README.md +88 -0
- data/VERSION +1 -0
- data/lib/logstash-input-azure_event_hubs.rb +10 -0
- data/lib/logstash/inputs/azure_event_hubs.rb +479 -0
- data/lib/logstash/inputs/error_notification_handler.rb +22 -0
- data/lib/logstash/inputs/look_back_position_provider.rb +26 -0
- data/lib/logstash/inputs/named_thread_factory.rb +20 -0
- data/lib/logstash/inputs/processor.rb +82 -0
- data/lib/logstash/inputs/processor_factory.rb +27 -0
- data/logstash-input-azure_event_hubs.gemspec +28 -0
- data/spec/inputs/azure_event_hub_spec.rb +322 -0
- data/vendor/jar-dependencies/com/google/code/gson/gson/2.8.3/gson-2.8.3.jar +0 -0
- data/vendor/jar-dependencies/com/microsoft/azure/azure-eventhubs-eph/1.0.0/azure-eventhubs-eph-1.0.0.jar +0 -0
- data/vendor/jar-dependencies/com/microsoft/azure/azure-eventhubs/1.0.1/azure-eventhubs-1.0.1.jar +0 -0
- data/vendor/jar-dependencies/com/microsoft/azure/azure-storage/7.0.0/azure-storage-7.0.0.jar +0 -0
- data/vendor/jar-dependencies/org/apache/logging/log4j/log4j-api/2.9.1/log4j-api-2.9.1.jar +0 -0
- data/vendor/jar-dependencies/org/apache/logging/log4j/log4j-slf4j-impl/2.9.1/log4j-slf4j-impl-2.9.1.jar +0 -0
- data/vendor/jar-dependencies/org/apache/qpid/proton-j/0.27.1/proton-j-0.27.1.jar +0 -0
- metadata +141 -0
@@ -0,0 +1,22 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require "logstash/util/loggable"
|
3
|
+
java_import java.util.function.Consumer
|
4
|
+
|
5
|
+
module LogStash
|
6
|
+
module Inputs
|
7
|
+
module Azure
|
8
|
+
class ErrorNotificationHandler
|
9
|
+
include Consumer
|
10
|
+
include LogStash::Util::Loggable
|
11
|
+
|
12
|
+
def initialize
|
13
|
+
@logger = self.logger
|
14
|
+
end
|
15
|
+
|
16
|
+
def accept(exception_received_event_args)
|
17
|
+
@logger.error("Error with Event Processor Host. ", :exception_received_event_args => exception_received_event_args.to_s)
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
@@ -0,0 +1,26 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require "logstash/util/loggable"
|
3
|
+
java_import java.util.function.Function
|
4
|
+
java_import com.microsoft.azure.eventhubs.EventPosition
|
5
|
+
java_import java.time.Instant
|
6
|
+
|
7
|
+
module LogStash
|
8
|
+
module Inputs
|
9
|
+
module Azure
|
10
|
+
class LookBackPositionProvider
|
11
|
+
include Function
|
12
|
+
include LogStash::Util::Loggable
|
13
|
+
|
14
|
+
def initialize(look_back_seconds)
|
15
|
+
@logger = self.logger
|
16
|
+
@look_back = Instant.ofEpochSecond(Instant.now.getEpochSecond - look_back_seconds.to_i)
|
17
|
+
@logger.debug("Look back date/time: #{@look_back}")
|
18
|
+
end
|
19
|
+
|
20
|
+
def apply(t)
|
21
|
+
EventPosition.fromEnqueuedTime(@look_back);
|
22
|
+
end
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
@@ -0,0 +1,20 @@
|
|
1
|
+
module LogStash
|
2
|
+
module Inputs
|
3
|
+
module Azure
|
4
|
+
class NamedThreadFactory
|
5
|
+
include java.util.concurrent.ThreadFactory
|
6
|
+
java_import java.util.concurrent.atomic.AtomicInteger
|
7
|
+
|
8
|
+
def initialize(name, id)
|
9
|
+
@name = name
|
10
|
+
@id = id
|
11
|
+
@counter = AtomicInteger.new(-1)
|
12
|
+
end
|
13
|
+
|
14
|
+
def newThread(runnable)
|
15
|
+
java.lang.Thread.new(runnable, @name + "-" + @counter.increment_and_get.to_s + "-" + @id.to_s)
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
@@ -0,0 +1,82 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require "logstash/util/loggable"
|
3
|
+
module LogStash
|
4
|
+
module Inputs
|
5
|
+
module Azure
|
6
|
+
class Processor
|
7
|
+
include LogStash::Util::Loggable
|
8
|
+
include com.microsoft.azure.eventprocessorhost.IEventProcessor
|
9
|
+
|
10
|
+
def initialize(queue, codec, checkpoint_interval, decorator, meta_data)
|
11
|
+
@queue = queue
|
12
|
+
@codec = codec
|
13
|
+
@checkpoint_interval = checkpoint_interval
|
14
|
+
@decorator = decorator
|
15
|
+
@meta_data = meta_data
|
16
|
+
@logger = self.logger
|
17
|
+
|
18
|
+
end
|
19
|
+
|
20
|
+
def onOpen(context)
|
21
|
+
@logger.info("Event Hub: #{context.getEventHubPath.to_s}, Partition: #{context.getPartitionId.to_s} is opening.")
|
22
|
+
end
|
23
|
+
|
24
|
+
def onClose(context, reason)
|
25
|
+
@logger.info("Event Hub: #{context.getEventHubPath.to_s}, Partition: #{context.getPartitionId.to_s} is closing. (reason=#{reason.to_s})")
|
26
|
+
end
|
27
|
+
|
28
|
+
def onEvents(context, batch)
|
29
|
+
@logger.debug("Event Hub: #{context.getEventHubPath.to_s}, Partition: #{context.getPartitionId.to_s} is processing a batch.") if @logger.debug?
|
30
|
+
last_payload = nil
|
31
|
+
batch_size = 0
|
32
|
+
batch.each do |payload|
|
33
|
+
last_checkpoint = Time.now.to_i
|
34
|
+
bytes = payload.getBytes
|
35
|
+
batch_size += bytes.size
|
36
|
+
@logger.trace("Event Hub: #{context.getEventHubPath.to_s}, Partition: #{context.getPartitionId.to_s}, Offset: #{payload.getSystemProperties.getOffset.to_s},"+
|
37
|
+
" Sequence: #{payload.getSystemProperties.getSequenceNumber.to_s}, Size: #{bytes.size}") if @logger.trace?
|
38
|
+
|
39
|
+
@codec.decode(bytes.to_a.pack('C*')) do |event|
|
40
|
+
|
41
|
+
@decorator.call(event)
|
42
|
+
if @meta_data
|
43
|
+
event.set("[@metadata][azure_event_hubs][name]", context.getEventHubPath)
|
44
|
+
event.set("[@metadata][azure_event_hubs][consumer_group]", context.getConsumerGroupName)
|
45
|
+
event.set("[@metadata][azure_event_hubs][processor_host]", context.getOwner)
|
46
|
+
event.set("[@metadata][azure_event_hubs][partition]", context.getPartitionId)
|
47
|
+
event.set("[@metadata][azure_event_hubs][offset]", payload.getSystemProperties.getOffset)
|
48
|
+
event.set("[@metadata][azure_event_hubs][sequence]", payload.getSystemProperties.getSequenceNumber)
|
49
|
+
event.set("[@metadata][azure_event_hubs][timestamp]",payload.getSystemProperties.getEnqueuedTime.getEpochSecond)
|
50
|
+
event.set("[@metadata][azure_event_hubs][event_size]", bytes.size)
|
51
|
+
end
|
52
|
+
@queue << event
|
53
|
+
if @checkpoint_interval > 0
|
54
|
+
now = Time.now.to_i
|
55
|
+
since_last_check_point = now - last_checkpoint
|
56
|
+
if since_last_check_point >= @checkpoint_interval
|
57
|
+
context.checkpoint(payload).get
|
58
|
+
last_checkpoint = now
|
59
|
+
end
|
60
|
+
end
|
61
|
+
end
|
62
|
+
last_payload = payload
|
63
|
+
end
|
64
|
+
|
65
|
+
@codec.flush
|
66
|
+
#always create checkpoint at end of onEvents in case of sparse events
|
67
|
+
context.checkpoint(last_payload).get if last_payload
|
68
|
+
@logger.debug("Event Hub: #{context.getEventHubPath.to_s}, Partition: #{context.getPartitionId.to_s} finished processing a batch of #{batch_size} bytes.") if @logger.debug?
|
69
|
+
end
|
70
|
+
|
71
|
+
def onError(context, error)
|
72
|
+
@logger.error("Event Hub: #{context.getEventHubPath.to_s}, Partition: #{context.getPartitionId.to_s} experienced an error #{error.to_s})")
|
73
|
+
end
|
74
|
+
end
|
75
|
+
end
|
76
|
+
end
|
77
|
+
end
|
78
|
+
|
79
|
+
|
80
|
+
|
81
|
+
|
82
|
+
|
@@ -0,0 +1,27 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require "logstash/inputs/processor"
|
3
|
+
module LogStash
|
4
|
+
module Inputs
|
5
|
+
module Azure
|
6
|
+
class ProcessorFactory
|
7
|
+
include com.microsoft.azure.eventprocessorhost.IEventProcessorFactory
|
8
|
+
|
9
|
+
def initialize(queue, codec, checkpoint_interval, decorator, meta_data)
|
10
|
+
@queue = queue
|
11
|
+
@codec = codec
|
12
|
+
@checkpoint_interval = checkpoint_interval
|
13
|
+
@decorator = decorator
|
14
|
+
@meta_data = meta_data
|
15
|
+
end
|
16
|
+
|
17
|
+
def createEventProcessor(context)
|
18
|
+
Processor.new(@queue, @codec.clone, @checkpoint_interval, @decorator, @meta_data)
|
19
|
+
end
|
20
|
+
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
24
|
+
end
|
25
|
+
|
26
|
+
|
27
|
+
|
@@ -0,0 +1,28 @@
|
|
1
|
+
GEM_VERSION = File.read(File.expand_path(File.join(File.dirname(__FILE__), "VERSION"))).strip unless defined?(GEM_VERSION)
|
2
|
+
|
3
|
+
Gem::Specification.new do |s|
|
4
|
+
s.name = 'logstash-input-azure_event_hubs'
|
5
|
+
s.version = GEM_VERSION
|
6
|
+
s.licenses = ['Apache-2.0']
|
7
|
+
s.summary = 'Consumes events from Azure Event Hubs for use with Logstash'
|
8
|
+
s.description = 'This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program'
|
9
|
+
s.homepage = 'http://www.elastic.co/guide/en/logstash/current/index.html'
|
10
|
+
s.authors = ['Elastic']
|
11
|
+
s.email = 'info@elastic.co'
|
12
|
+
s.require_paths = ['lib', 'vendor/jar-dependencies']
|
13
|
+
|
14
|
+
# Files
|
15
|
+
s.files = Dir['lib/**/*','spec/**/*','vendor/**/*', 'vendor/jar-dependencies/**/*.jar','*.gemspec','*.md','CONTRIBUTORS','Gemfile','LICENSE','NOTICE.TXT', 'VERSION']
|
16
|
+
# Tests
|
17
|
+
s.test_files = s.files.grep(%r{^(test|spec|features)/})
|
18
|
+
|
19
|
+
# Special flag to let us know this is actually a logstash plugin
|
20
|
+
s.metadata = { "logstash_plugin" => "true", "logstash_group" => "input" }
|
21
|
+
|
22
|
+
# Gem dependencies
|
23
|
+
s.add_runtime_dependency "logstash-core-plugin-api", "~> 2.0"
|
24
|
+
s.add_runtime_dependency 'logstash-codec-plain'
|
25
|
+
s.add_runtime_dependency 'logstash-codec-json'
|
26
|
+
s.add_runtime_dependency 'stud', '>= 0.0.22'
|
27
|
+
s.add_development_dependency 'logstash-devutils', '>= 0.0.16'
|
28
|
+
end
|
@@ -0,0 +1,322 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require "logstash/devutils/rspec/spec_helper"
|
3
|
+
require "logstash/inputs/azure_event_hubs"
|
4
|
+
|
5
|
+
|
6
|
+
java_import com.microsoft.azure.eventprocessorhost.EventProcessorHost
|
7
|
+
java_import com.microsoft.azure.eventprocessorhost.EventProcessorOptions
|
8
|
+
java_import com.microsoft.azure.eventprocessorhost.InMemoryCheckpointManager
|
9
|
+
java_import com.microsoft.azure.eventprocessorhost.InMemoryLeaseManager
|
10
|
+
java_import java.util.concurrent.ScheduledThreadPoolExecutor
|
11
|
+
java_import java.util.concurrent.CompletableFuture
|
12
|
+
java_import java.util.concurrent.TimeUnit
|
13
|
+
java_import java.util.concurrent.atomic.AtomicInteger
|
14
|
+
|
15
|
+
|
16
|
+
describe LogStash::Inputs::AzureEventHubs do
|
17
|
+
|
18
|
+
|
19
|
+
subject(:input) {LogStash::Plugin.lookup("input", "azure_event_hubs").new(config)}
|
20
|
+
|
21
|
+
describe "Event Hubs Configuration -> " do
|
22
|
+
shared_examples "an exploded Event Hub config" do |x|
|
23
|
+
it "it explodes #{x} event hub(s) correctly" do
|
24
|
+
exploded_config = input.event_hubs_exploded
|
25
|
+
x.times do |i|
|
26
|
+
expect(exploded_config[i]['event_hubs'].size).to be == 1 #always 1 in the exploded form
|
27
|
+
expect(exploded_config[i]['event_hubs'][0]).to eql('event_hub_name' + i.to_s)
|
28
|
+
expect(exploded_config[i]['event_hub_connections'][0].value).to start_with('Endpoint=sb://...')
|
29
|
+
expect(exploded_config[i]['storage_connection'].value).to eql('DefaultEndpointsProtocol=https;AccountName=...')
|
30
|
+
expect(exploded_config[i]['threads']).to be == 9
|
31
|
+
expect(exploded_config[i]['codec']).to be_a_kind_of(LogStash::Codecs::Plain)
|
32
|
+
expect(exploded_config[i]['consumer_group']).to eql('cg')
|
33
|
+
expect(exploded_config[i]['max_batch_size']).to be == 20
|
34
|
+
expect(exploded_config[i]['prefetch_count']).to be == 30
|
35
|
+
expect(exploded_config[i]['receive_timeout']).to be == 40
|
36
|
+
expect(exploded_config[i]['initial_position']).to eql('look_back')
|
37
|
+
expect(exploded_config[i]['initial_position_look_back']).to be == 50
|
38
|
+
expect(exploded_config[i]['checkpoint_interval']).to be == 60
|
39
|
+
expect(exploded_config[i]['decorate_events']).to be_truthy
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
43
|
+
|
44
|
+
describe "Basic Config" do
|
45
|
+
before do
|
46
|
+
input.register
|
47
|
+
end
|
48
|
+
let(:config) do
|
49
|
+
{
|
50
|
+
'event_hub_connections' => ['Endpoint=sb://...;EntityPath=event_hub_name0', 'Endpoint=sb://...;EntityPath=event_hub_name1'],
|
51
|
+
'storage_connection' => 'DefaultEndpointsProtocol=https;AccountName=...',
|
52
|
+
'threads' => 9,
|
53
|
+
'codec' => 'plain',
|
54
|
+
'consumer_group' => 'cg',
|
55
|
+
'max_batch_size' => 20,
|
56
|
+
'prefetch_count' => 30,
|
57
|
+
'receive_timeout' => 40,
|
58
|
+
'initial_position' => 'look_back',
|
59
|
+
'initial_position_look_back' => 50,
|
60
|
+
'checkpoint_interval' => 60,
|
61
|
+
'decorate_events' => true
|
62
|
+
}
|
63
|
+
end
|
64
|
+
it_behaves_like "an exploded Event Hub config", 2
|
65
|
+
|
66
|
+
it "it runs the Event Processor Host" do
|
67
|
+
mock_queue = double("queue")
|
68
|
+
mock_host = double("event_processor_host")
|
69
|
+
mock_host_context = double("host_context")
|
70
|
+
completable_future = CompletableFuture.new
|
71
|
+
#simulate work being done before completing the future
|
72
|
+
Thread.new do
|
73
|
+
sleep 2
|
74
|
+
completable_future.complete("")
|
75
|
+
end
|
76
|
+
|
77
|
+
# rspec has issues with counters and concurrent code, so use threadsafe counters instead
|
78
|
+
host_counter = AtomicInteger.new
|
79
|
+
register_counter = AtomicInteger.new
|
80
|
+
unregister_counter = AtomicInteger.new
|
81
|
+
assertion_count = AtomicInteger.new
|
82
|
+
|
83
|
+
allow(mock_host).to receive(:getHostContext) {mock_host_context}
|
84
|
+
allow(mock_host_context).to receive(:getEventHubPath) {"foo"}
|
85
|
+
|
86
|
+
expect(mock_host).to receive(:registerEventProcessorFactory).at_most(2).times {
|
87
|
+
register_counter.incrementAndGet
|
88
|
+
completable_future
|
89
|
+
}
|
90
|
+
expect(mock_host).to receive(:unregisterEventProcessor).at_most(2).times {
|
91
|
+
unregister_counter.incrementAndGet
|
92
|
+
completable_future
|
93
|
+
}
|
94
|
+
expect(EventProcessorHost).to receive(:new).at_most(2).times {|host_name, event_hub_name, consumer_group, event_hub_connection, storage_connection, container, executor|
|
95
|
+
case event_hub_name
|
96
|
+
when 'event_hub_name0'
|
97
|
+
|
98
|
+
assertion_count.incrementAndGet
|
99
|
+
expect(event_hub_connection).to eql(config['event_hub_connections'][0])
|
100
|
+
expect(container).to eql('event_hub_name0') # default
|
101
|
+
|
102
|
+
when 'event_hub_name1'
|
103
|
+
assertion_count.incrementAndGet
|
104
|
+
expect(host_name).to start_with('logstash')
|
105
|
+
expect(event_hub_connection).to eql(config['event_hub_connections'][1])
|
106
|
+
expect(container).to eql('event_hub_name1') # default
|
107
|
+
end
|
108
|
+
expect(host_name).to start_with('logstash')
|
109
|
+
expect(storage_connection).to eql(config['storage_connection'])
|
110
|
+
|
111
|
+
host_counter.incrementAndGet
|
112
|
+
mock_host
|
113
|
+
}
|
114
|
+
# signal the stop first since the run method blocks until stop is called.
|
115
|
+
input.do_stop
|
116
|
+
input.run(mock_queue)
|
117
|
+
expect(host_counter.get).to be == 2
|
118
|
+
expect(register_counter.get).to be == 2
|
119
|
+
expect(unregister_counter.get).to be == 2
|
120
|
+
expect(assertion_count.get).to be == 2
|
121
|
+
end
|
122
|
+
|
123
|
+
describe "single connection, no array syntax" do
|
124
|
+
let(:config) do
|
125
|
+
{
|
126
|
+
'event_hub_connections' => 'Endpoint=sb://logstash/;SharedAccessKeyName=activity-log-readonly;SharedAccessKey=something;EntityPath=event_hub1'
|
127
|
+
}
|
128
|
+
end
|
129
|
+
it "it can handle a single connection without the array notation" do
|
130
|
+
expect {input}.to_not raise_error
|
131
|
+
exploded_config = input.event_hubs_exploded
|
132
|
+
expect(exploded_config.size).to be == 1
|
133
|
+
expect(exploded_config[0]['event_hub_connections'][0].value).to eql('Endpoint=sb://logstash/;SharedAccessKeyName=activity-log-readonly;SharedAccessKey=something;EntityPath=event_hub1')
|
134
|
+
end
|
135
|
+
end
|
136
|
+
|
137
|
+
|
138
|
+
end
|
139
|
+
|
140
|
+
describe "Advanced Config" do
|
141
|
+
before do
|
142
|
+
input.register
|
143
|
+
end
|
144
|
+
let(:config) do
|
145
|
+
{
|
146
|
+
'config_mode' => 'advanced',
|
147
|
+
'event_hubs' => [
|
148
|
+
{'event_hub_name0' => {
|
149
|
+
'event_hub_connection' => 'Endpoint=sb://...',
|
150
|
+
'storage_connection' => 'DefaultEndpointsProtocol=https;AccountName=...',
|
151
|
+
'codec' => 'plain',
|
152
|
+
'consumer_group' => 'cg',
|
153
|
+
'max_batch_size' => 20,
|
154
|
+
'prefetch_count' => 30,
|
155
|
+
'receive_timeout' => 40,
|
156
|
+
'initial_position' => 'look_back',
|
157
|
+
'initial_position_look_back' => 50,
|
158
|
+
'checkpoint_interval' => 60,
|
159
|
+
'decorate_events' => true}},
|
160
|
+
{'event_hub_name1' => {
|
161
|
+
'event_hub_connection' => '1Endpoint=sb://...',
|
162
|
+
'storage_connection' => '1DefaultEndpointsProtocol=https;AccountName=...',
|
163
|
+
'codec' => 'json',
|
164
|
+
'consumer_group' => 'cg1',
|
165
|
+
'receive_timeout' => 41,
|
166
|
+
'initial_position' => 'end',
|
167
|
+
'checkpoint_interval' => 61,
|
168
|
+
'decorate_events' => false,
|
169
|
+
'storage_container' => 'alt_container'}},
|
170
|
+
# same named event hub with different configuration is allowed
|
171
|
+
{'event_hub_name0' => {
|
172
|
+
'event_hub_connection' => 'Endpoint=sb://...',
|
173
|
+
'consumer_group' => 'ls'}}
|
174
|
+
],
|
175
|
+
'codec' => 'plain',
|
176
|
+
'consumer_group' => 'default_consumer_group',
|
177
|
+
'max_batch_size' => 21,
|
178
|
+
'threads' => 9
|
179
|
+
}
|
180
|
+
end
|
181
|
+
it_behaves_like "an exploded Event Hub config", 1
|
182
|
+
it "it explodes the 2cnd advanced config event hub correctly" do
|
183
|
+
exploded_config = input.event_hubs_exploded
|
184
|
+
expect(exploded_config[1]['event_hubs'].size).to be == 1 #always 1 in the exploded form
|
185
|
+
expect(exploded_config[1]['event_hubs'][0]).to eql('event_hub_name1')
|
186
|
+
expect(exploded_config[1]['event_hub_connections'][0].value).to eql('1Endpoint=sb://...')
|
187
|
+
expect(exploded_config[1]['storage_connection'].value).to eql('1DefaultEndpointsProtocol=https;AccountName=...')
|
188
|
+
expect(exploded_config[1]['threads']).to be == 9
|
189
|
+
expect(exploded_config[1]['codec']).to be_a_kind_of(LogStash::Codecs::JSON) # different between configs
|
190
|
+
expect(exploded_config[1]['consumer_group']).to eql('cg1') # override global
|
191
|
+
expect(exploded_config[1]['max_batch_size']).to be == 21 # filled from global
|
192
|
+
expect(exploded_config[1]['prefetch_count']).to be == 300 # default
|
193
|
+
expect(exploded_config[1]['receive_timeout']).to be == 41
|
194
|
+
expect(exploded_config[1]['initial_position']).to eql('end')
|
195
|
+
expect(exploded_config[1]['initial_position_look_back']).to be == 86400 # default
|
196
|
+
expect(exploded_config[1]['checkpoint_interval']).to be == 61
|
197
|
+
expect(exploded_config[1]['decorate_events']).to be_falsy
|
198
|
+
expect(exploded_config[1]['storage_container']).to eq('alt_container')
|
199
|
+
end
|
200
|
+
|
201
|
+
it "it runs the Event Processor Host" do
|
202
|
+
mock_queue = double("queue")
|
203
|
+
mock_host = double("event_processor_host")
|
204
|
+
mock_host_context = double("host_context")
|
205
|
+
completable_future = CompletableFuture.new
|
206
|
+
#simulate work being done before completing the future
|
207
|
+
Thread.new do
|
208
|
+
sleep 2
|
209
|
+
completable_future.complete("")
|
210
|
+
end
|
211
|
+
|
212
|
+
# rspec has issues with counters and concurrent code, so use threadsafe counters instead
|
213
|
+
host_counter = AtomicInteger.new
|
214
|
+
register_counter = AtomicInteger.new
|
215
|
+
unregister_counter = AtomicInteger.new
|
216
|
+
assertion_count = AtomicInteger.new
|
217
|
+
allow_any_instance_of(InMemoryLeaseManager).to receive(:java_send)
|
218
|
+
allow_any_instance_of(InMemoryCheckpointManager).to receive(:java_send)
|
219
|
+
|
220
|
+
allow(mock_host).to receive(:getHostContext) {mock_host_context}
|
221
|
+
allow(mock_host_context).to receive(:getEventHubPath) {"foo"}
|
222
|
+
|
223
|
+
expect(mock_host).to receive(:registerEventProcessorFactory).at_most(3).times {
|
224
|
+
register_counter.incrementAndGet
|
225
|
+
completable_future
|
226
|
+
}
|
227
|
+
expect(mock_host).to receive(:unregisterEventProcessor).at_most(3).times {
|
228
|
+
unregister_counter.incrementAndGet
|
229
|
+
completable_future
|
230
|
+
}
|
231
|
+
expect(EventProcessorHost).to receive(:new).at_most(3).times {|host_name, event_hub_name, consumer_group, event_hub_connection, storage_connection, container, executor|
|
232
|
+
case event_hub_name
|
233
|
+
when 'event_hub_name0'
|
234
|
+
if consumer_group.eql?('cg')
|
235
|
+
assertion_count.incrementAndGet
|
236
|
+
expect(host_name).to start_with('logstash')
|
237
|
+
expect(event_hub_connection).to eql(config['event_hubs'][0]['event_hub_name0']['event_hub_connections'][0].value)
|
238
|
+
expect(storage_connection).to eql(config['event_hubs'][0]['event_hub_name0']['storage_connection'].value)
|
239
|
+
expect(container).to eql('event_hub_name0') # default
|
240
|
+
elsif consumer_group.eql?('ls')
|
241
|
+
assertion_count.incrementAndGet
|
242
|
+
expect(event_hub_connection).to eql(config['event_hubs'][2]['event_hub_name0']['event_hub_connections'][0].value)
|
243
|
+
# in this mode, storage connection and container are replaced with in memory offset management
|
244
|
+
expect(storage_connection).to be_kind_of(InMemoryCheckpointManager)
|
245
|
+
expect(container).to be_kind_of(InMemoryLeaseManager)
|
246
|
+
end
|
247
|
+
when 'event_hub_name1'
|
248
|
+
assertion_count.incrementAndGet
|
249
|
+
expect(host_name).to start_with('logstash')
|
250
|
+
expect(event_hub_connection).to eql(config['event_hubs'][1]['event_hub_name1']['event_hub_connections'][0].value)
|
251
|
+
expect(storage_connection).to eql(config['event_hubs'][1]['event_hub_name1']['storage_connection'].value)
|
252
|
+
expect(container).to eql(config['event_hubs'][1]['event_hub_name1']['storage_container'])
|
253
|
+
end
|
254
|
+
host_counter.incrementAndGet
|
255
|
+
mock_host
|
256
|
+
}
|
257
|
+
# signal the stop first since the run method blocks until stop is called.
|
258
|
+
input.do_stop
|
259
|
+
input.run(mock_queue)
|
260
|
+
expect(host_counter.get).to be == 3
|
261
|
+
expect(register_counter.get).to be == 3
|
262
|
+
expect(unregister_counter.get).to be == 3
|
263
|
+
expect(assertion_count.get).to be == 3
|
264
|
+
end
|
265
|
+
|
266
|
+
end
|
267
|
+
|
268
|
+
describe "Bad Basic Config" do
|
269
|
+
describe "Offset overwritting" do
|
270
|
+
let(:config) do
|
271
|
+
{
|
272
|
+
'event_hub_connections' => ['Endpoint=sb://...;EntityPath=event_hub_name0', 'Endpoint=sb://...;EntityPath=event_hub_name0'],
|
273
|
+
'storage_connection' => 'DefaultEndpointsProtocol=https;AccountName=...'
|
274
|
+
}
|
275
|
+
end
|
276
|
+
it "it errors when using same consumer group and storage container" do
|
277
|
+
expect {input}.to raise_error(/overwriting offsets/)
|
278
|
+
end
|
279
|
+
end
|
280
|
+
|
281
|
+
describe "Invalid Event Hub name" do
|
282
|
+
let(:config) do
|
283
|
+
{
|
284
|
+
'event_hub_connections' => ['Endpoint=sb://logstash/;SharedAccessKeyName=activity-log-readonly;SharedAccessKey=thisshouldnotbepartofthelogmessage'],
|
285
|
+
'storage_connection' => 'DefaultEndpointsProtocol=https;AccountName=...'
|
286
|
+
}
|
287
|
+
end
|
288
|
+
it "it errors when using same consumer group and storage container" do
|
289
|
+
expect {input}.to raise_error(/that the connection string contains the EntityPath/)
|
290
|
+
expect {input}.to raise_error(/redacted/)
|
291
|
+
expect {input}.to raise_error(/^((?!thisshouldnotbepartofthelogmessage).)*$/)
|
292
|
+
end
|
293
|
+
end
|
294
|
+
end
|
295
|
+
|
296
|
+
describe "Bad Advanced Config" do
|
297
|
+
describe "Offset overwritting" do
|
298
|
+
let(:config) do
|
299
|
+
{
|
300
|
+
'config_mode' => 'advanced',
|
301
|
+
'event_hubs' => [
|
302
|
+
{'event_hub_name0' => {
|
303
|
+
'event_hub_connection' => 'Endpoint=sb://...',
|
304
|
+
}},
|
305
|
+
{'event_hub_name1' => {
|
306
|
+
'event_hub_connection' => '1Endpoint=sb://...',
|
307
|
+
}}
|
308
|
+
],
|
309
|
+
|
310
|
+
'storage_connection' => 'DefaultEndpointsProtocol=https;AccountName=...',
|
311
|
+
'consumer_group' => 'default_consumer_group',
|
312
|
+
'storage_container' => 'logstash'
|
313
|
+
}
|
314
|
+
end
|
315
|
+
it "it errors when using same consumer group and storage container" do
|
316
|
+
expect {input}.to raise_error(/overwriting offsets/)
|
317
|
+
end
|
318
|
+
end
|
319
|
+
end
|
320
|
+
end
|
321
|
+
end
|
322
|
+
|