logstash-input-azureblob 0.9.7 → 0.9.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +28 -1
- data/lib/logstash/inputs/azureblob.rb +318 -72
- data/logstash-input-azureblob.gemspec +10 -8
- metadata +41 -13
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: e72dbef9d73b6b872b786b1cd40ffd3434835ac4
|
4
|
+
data.tar.gz: 9054b485b8073ca0dc9cce2e09c125630caec5fc
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 917b7f0ad4b0a552091a6055c9e5fd5eeef38d6ce0ce85166eb432d2a5737a8d49f75c28579ce0119ed1887c952a0c372dc620cdf41bcdfdd9d30e9160e33c0a
|
7
|
+
data.tar.gz: 3f513712c43563b6ea3407b23516e784b1d4d8fc706213247132b57827f7605ed55cbcacb9c357525fb84bc18920ab367a03e411b658d56e4da7506152560767
|
data/README.md
CHANGED
@@ -27,7 +27,34 @@ The blob container name.
|
|
27
27
|
### Optional Parameters
|
28
28
|
__*endpoint*__
|
29
29
|
|
30
|
-
Specifies the endpoint of Azure Service Management. The default value is
|
30
|
+
Specifies the endpoint of Azure Service Management. The default value is `core.windows.net`.
|
31
|
+
|
32
|
+
__*registry_path*__
|
33
|
+
|
34
|
+
Specifies the file path for the registry file to record offsets and coordinate between multiple clients. The default value is `data/registry`.
|
35
|
+
|
36
|
+
Overwrite this value when there happen to be a file at the path of `data/registry` in the azure blob container.
|
37
|
+
|
38
|
+
__*interval*__
|
39
|
+
|
40
|
+
Set how many seconds to idle before checking for new logs. The default, `30`, means idle for `30` seconds.
|
41
|
+
|
42
|
+
__*registry_create_policy*__
|
43
|
+
|
44
|
+
Specifies the way to initially set offset for existing blob files.
|
45
|
+
|
46
|
+
This option only applies for registry creation.
|
47
|
+
|
48
|
+
Valid values include:
|
49
|
+
|
50
|
+
- resume
|
51
|
+
- start_over
|
52
|
+
|
53
|
+
The default, `resume`, means when the registry is initially created, it assumes all blob has been consumed and it will start to pick up any new content in the blobs.
|
54
|
+
|
55
|
+
When set to `start_over`, it assumes none of the blob is consumed and it will read all blob files from begining.
|
56
|
+
|
57
|
+
Offsets will be picked up from registry file whenever it exists.
|
31
58
|
|
32
59
|
### Examples
|
33
60
|
```
|
@@ -2,98 +2,344 @@
|
|
2
2
|
require "logstash/inputs/base"
|
3
3
|
require "logstash/namespace"
|
4
4
|
|
5
|
-
|
6
|
-
require "
|
5
|
+
# Azure Storage SDK for Ruby
|
6
|
+
require "azure/storage"
|
7
|
+
require 'json' # for registry content
|
8
|
+
require "securerandom" # for generating uuid.
|
7
9
|
|
8
|
-
#
|
9
|
-
class LogStash::Inputs::
|
10
|
+
# Registry item to coordinate between mulitple clients
|
11
|
+
class LogStash::Inputs::RegistryItem
|
12
|
+
attr_accessor :file_path, :etag, :offset, :reader, :gen
|
13
|
+
# Allow json serialization.
|
14
|
+
def as_json(options={})
|
15
|
+
{
|
16
|
+
file_path: @file_path,
|
17
|
+
etag: @etag,
|
18
|
+
reader: @reader,
|
19
|
+
offset: @offset,
|
20
|
+
gen: @gen
|
21
|
+
}
|
22
|
+
end # as_json
|
10
23
|
|
24
|
+
def to_json(*options)
|
25
|
+
as_json(*options).to_json(*options)
|
26
|
+
end # to_json
|
27
|
+
|
28
|
+
def initialize(file_path, etag, reader, offset = 0, gen = 0)
|
29
|
+
@file_path = file_path
|
30
|
+
@etag = etag
|
31
|
+
@reader = reader
|
32
|
+
@offset = offset
|
33
|
+
@gen = gen
|
34
|
+
end # initialize
|
35
|
+
end # class RegistryItem
|
36
|
+
|
37
|
+
|
38
|
+
# Logstash input plugin for Azure Blobs
|
39
|
+
#
|
40
|
+
# This logstash plugin gathers data from Microsoft Azure Blobs
|
41
|
+
class LogStash::Inputs::LogstashInputAzureblob < LogStash::Inputs::Base
|
11
42
|
config_name "azureblob"
|
12
|
-
|
13
|
-
|
43
|
+
|
44
|
+
# If undefined, Logstash will complain, even if codec is unused.
|
14
45
|
default :codec, "json_lines"
|
15
|
-
|
46
|
+
|
47
|
+
# Set the account name for the azure storage account.
|
16
48
|
config :storage_account_name, :validate => :string
|
49
|
+
|
50
|
+
# Set the key to access the storage account.
|
17
51
|
config :storage_access_key, :validate => :string
|
18
52
|
|
53
|
+
# Set the container of the blobs.
|
19
54
|
config :container, :validate => :string
|
20
|
-
config :sleep_time, :validate => :number, :default => 10
|
21
|
-
config :endpoint, :validate => :string, :default => "core.windows.net"
|
22
55
|
|
23
|
-
|
24
|
-
|
25
|
-
|
56
|
+
# Set the endpoint for the blobs.
|
57
|
+
#
|
58
|
+
# The default, `core.windows.net` targets the public azure.
|
59
|
+
config :endpoint, :validate => :string, :default => 'core.windows.net'
|
60
|
+
|
61
|
+
# Set the value of using backup mode.
|
62
|
+
config :backupmode, :validate => :boolean, :default => false, :deprecated => true, :obsolete => 'This option is obsoleted and the settings will be ignored.'
|
63
|
+
|
64
|
+
# Set the value for the registry file.
|
65
|
+
#
|
66
|
+
# The default, `data/registry`, is used to coordinate readings for various instances of the clients.
|
67
|
+
config :registry_path, :validate => :string, :default => 'data/registry'
|
26
68
|
|
69
|
+
# Set how many seconds to keep idle before checking for new logs.
|
70
|
+
#
|
71
|
+
# The default, `30`, means trigger a reading for the log every 30 seconds after entering idle.
|
72
|
+
config :interval, :validate => :number, :default => 30
|
73
|
+
|
74
|
+
# Set the registry create mode
|
75
|
+
#
|
76
|
+
# The default, `resume`, means when the registry is initially created, it assumes all logs has been handled.
|
77
|
+
# When set to `start_over`, it will read all log files from begining.
|
78
|
+
config :registry_create_policy, :validate => :string, :default => 'resume'
|
79
|
+
|
80
|
+
# Constant of max integer
|
81
|
+
MAX = 2 ** ([42].pack('i').size * 16 -2 ) -1
|
82
|
+
|
27
83
|
public
|
28
84
|
def register
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
85
|
+
# this is the reader # for this specific instance.
|
86
|
+
@reader = SecureRandom.uuid
|
87
|
+
@registry_locker = "#{@registry_path}.lock"
|
88
|
+
|
89
|
+
# Setup a specific instance of an Azure::Storage::Client
|
90
|
+
client = Azure::Storage::Client.create(:storage_account_name => @storage_account_name, :storage_access_key => @storage_access_key, :storage_blob_host => "https://#{@storage_account_name}.blob.#{@endpoint}")
|
91
|
+
# Get an azure storage blob service object from a specific instance of an Azure::Storage::Client
|
92
|
+
@azure_blob = client.blob_client
|
93
|
+
# Add retry filter to the service object
|
94
|
+
@azure_blob.with_filter(Azure::Storage::Core::Filter::ExponentialRetryPolicyFilter.new)
|
35
95
|
end # def register
|
96
|
+
|
97
|
+
def run(queue)
|
98
|
+
# we can abort the loop if stop? becomes true
|
99
|
+
while !stop?
|
100
|
+
process(queue)
|
101
|
+
Stud.stoppable_sleep(@interval) { stop? }
|
102
|
+
end # loop
|
103
|
+
end # def run
|
104
|
+
|
105
|
+
def stop
|
106
|
+
cleanup_registry
|
107
|
+
end # def stop
|
36
108
|
|
37
|
-
|
38
|
-
|
109
|
+
# Start processing the next item.
|
110
|
+
def process(queue)
|
111
|
+
begin
|
112
|
+
blob, start_index, gen = register_for_read
|
113
|
+
|
114
|
+
if(!blob.nil?)
|
115
|
+
begin
|
116
|
+
blob_name = blob.name
|
117
|
+
# Work-around: After returned by get_blob, the etag will contains quotes.
|
118
|
+
new_etag = blob.properties[:etag]
|
119
|
+
# ~ Work-around
|
120
|
+
blob, content = @azure_blob.get_blob(@container, blob_name, {:start_range=>start_index} )
|
121
|
+
|
122
|
+
@codec.decode(content) do |event|
|
123
|
+
decorate(event)
|
124
|
+
queue << event
|
125
|
+
end # decode
|
126
|
+
ensure
|
127
|
+
# Making sure the reader is removed from the registry even when there's exception.
|
128
|
+
new_offset = start_index
|
129
|
+
new_offset = new_offset + content.length unless content.nil?
|
130
|
+
new_registry_item = LogStash::Inputs::RegistryItem.new(blob_name, new_etag, nil, new_offset, gen)
|
131
|
+
update_registry(new_registry_item)
|
132
|
+
end # begin
|
133
|
+
end # if
|
134
|
+
rescue StandardError => e
|
135
|
+
@logger.error("Oh My, An error occurred. \nError:#{e}:\nTrace:\n#{e.backtrace}", :exception => e)
|
136
|
+
end # begin
|
137
|
+
end # process
|
138
|
+
|
139
|
+
# Deserialize registry hash from json string.
|
140
|
+
def deserialize_registry_hash (json_string)
|
141
|
+
result = Hash.new
|
142
|
+
temp_hash = JSON.parse(json_string)
|
143
|
+
temp_hash.values.each { |kvp|
|
144
|
+
result[kvp['file_path']] = LogStash::Inputs::RegistryItem.new(kvp['file_path'], kvp['etag'], kvp['reader'], kvp['offset'], kvp['gen'])
|
145
|
+
}
|
146
|
+
return result
|
147
|
+
end #deserialize_registry_hash
|
148
|
+
|
149
|
+
# List all the blobs in the given container.
|
150
|
+
def list_all_blobs
|
151
|
+
blobs = Set.new []
|
39
152
|
continuation_token = NIL
|
40
153
|
loop do
|
41
154
|
entries = @azure_blob.list_blobs(@container, { :timeout => 10, :marker => continuation_token})
|
42
155
|
entries.each do |entry|
|
43
|
-
|
44
|
-
end
|
156
|
+
blobs << entry
|
157
|
+
end # each
|
45
158
|
continuation_token = entries.continuation_token
|
46
159
|
break if continuation_token.empty?
|
47
|
-
end
|
48
|
-
return
|
160
|
+
end # loop
|
161
|
+
return blobs
|
49
162
|
end # def list_blobs
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
163
|
+
|
164
|
+
# Raise generation for blob in registry
|
165
|
+
def raise_gen(registry_hash, file_path)
|
166
|
+
begin
|
167
|
+
target_item = registry_hash[file_path]
|
168
|
+
begin
|
169
|
+
target_item.gen += 1
|
170
|
+
# Protect gen from overflow.
|
171
|
+
target_item.gen = target_item.gen / 2 if target_item.gen == MAX
|
172
|
+
rescue StandardError => e
|
173
|
+
@logger.error("Fail to get the next generation for target item #{target_item}.", :exception => e)
|
174
|
+
target_item.gen = 0
|
175
|
+
end
|
176
|
+
|
177
|
+
min_gen_item = registry_hash.values.min_by { |x| x.gen }
|
178
|
+
while min_gen_item.gen > 0
|
179
|
+
registry_hash.values.each { |value|
|
180
|
+
value.gen -= 1
|
181
|
+
}
|
182
|
+
min_gen_item = registry_hash.values.min_by { |x| x.gen }
|
69
183
|
end
|
70
184
|
end
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
while
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
185
|
+
end # raise_gen
|
186
|
+
|
187
|
+
# Acquire a lease on a blob item with retries.
|
188
|
+
#
|
189
|
+
# By default, it will retry 30 times with 1 second interval.
|
190
|
+
def acquire_lease(blob_name, retry_times = 30, interval_sec = 1)
|
191
|
+
lease = nil;
|
192
|
+
retried = 0;
|
193
|
+
while lease.nil? do
|
194
|
+
begin
|
195
|
+
lease = @azure_blob.acquire_blob_lease(@container, blob_name, {:timeout => 10})
|
196
|
+
rescue StandardError => e
|
197
|
+
if(e.type == 'LeaseAlreadyPresent')
|
198
|
+
if (retried > retry_times)
|
199
|
+
raise
|
200
|
+
end
|
201
|
+
retried += 1
|
202
|
+
sleep interval_sec
|
203
|
+
end
|
204
|
+
end
|
205
|
+
end #while
|
206
|
+
return lease
|
207
|
+
end # acquire_lease
|
208
|
+
|
209
|
+
# Return the next blob for reading as well as the start index.
|
210
|
+
def register_for_read
|
211
|
+
begin
|
212
|
+
all_blobs = list_all_blobs
|
213
|
+
registry = all_blobs.find { |item| item.name.downcase == @registry_path }
|
214
|
+
registry_locker = all_blobs.find { |item| item.name.downcase == @registry_locker }
|
215
|
+
|
216
|
+
candidate_blobs = all_blobs.select { |item| (item.name.downcase != @registry_path) && ( item.name.downcase != @registry_locker ) }
|
217
|
+
|
218
|
+
start_index = 0
|
219
|
+
gen = 0
|
220
|
+
lease = nil
|
221
|
+
|
222
|
+
# Put lease on locker file than the registy file to allow update of the registry as a workaround for Azure Storage Ruby SDK issue # 16.
|
223
|
+
# Workaround: https://github.com/Azure/azure-storage-ruby/issues/16
|
224
|
+
registry_locker = @azure_blob.create_block_blob(@container, @registry_locker, @reader) if registry_locker.nil?
|
225
|
+
lease = acquire_lease(@registry_locker)
|
226
|
+
# ~ Workaround
|
227
|
+
|
228
|
+
if(registry.nil?)
|
229
|
+
registry_hash = create_registry(candidate_blobs)
|
230
|
+
else
|
231
|
+
registry_hash = load_registry
|
232
|
+
end #if
|
233
|
+
|
234
|
+
picked_blobs = Set.new []
|
235
|
+
# Pick up the next candidate
|
236
|
+
picked_blob = nil
|
237
|
+
candidate_blobs.each { |candidate_blob|
|
238
|
+
registry_item = registry_hash[candidate_blob.name]
|
239
|
+
|
240
|
+
# Appending items that doesn't exist in the hash table
|
241
|
+
if registry_item.nil?
|
242
|
+
registry_item = LogStash::Inputs::RegistryItem.new(candidate_blob.name, candidate_blob.properties[:etag], nil, 0, 0)
|
243
|
+
registry_hash[candidate_blob.name] = registry_item
|
244
|
+
end # if
|
245
|
+
|
246
|
+
if ((registry_item.offset < candidate_blob.properties[:content_length]) && (registry_item.reader.nil? || registry_item.reader == @reader))
|
247
|
+
picked_blobs << candidate_blob
|
248
|
+
end
|
249
|
+
}
|
250
|
+
|
251
|
+
picked_blob = picked_blobs.min_by { |b| registry_hash[b.name].gen }
|
252
|
+
if !picked_blob.nil?
|
253
|
+
registry_item = registry_hash[picked_blob.name]
|
254
|
+
registry_item.reader = @reader
|
255
|
+
registry_hash[picked_blob.name] = registry_item
|
256
|
+
start_index = registry_item.offset
|
257
|
+
raise_gen(registry_hash, picked_blob.name)
|
258
|
+
gen = registry_item.gen
|
259
|
+
end #if
|
260
|
+
|
261
|
+
# Save the chnage for the registry
|
262
|
+
save_registry(registry_hash)
|
263
|
+
|
264
|
+
@azure_blob.release_blob_lease(@container, @registry_locker, lease)
|
265
|
+
lease = nil;
|
266
|
+
|
267
|
+
return picked_blob, start_index, gen
|
268
|
+
rescue StandardError => e
|
269
|
+
@logger.error("Oh My, An error occurred. #{e}:\n#{e.backtrace}", :exception => e)
|
270
|
+
return nil, nil, nil
|
271
|
+
ensure
|
272
|
+
@azure_blob.release_blob_lease(@container, @registry_locker, lease) unless lease.nil?
|
273
|
+
lease = nil
|
274
|
+
end # rescue
|
275
|
+
end #register_for_read
|
276
|
+
|
277
|
+
# Update the registry
|
278
|
+
def update_registry (registry_item)
|
279
|
+
begin
|
280
|
+
lease = nil
|
281
|
+
lease = acquire_lease(@registry_locker)
|
282
|
+
registry_hash = load_registry
|
283
|
+
registry_hash[registry_item.file_path] = registry_item
|
284
|
+
save_registry(registry_hash)
|
285
|
+
@azure_blob.release_blob_lease(@container, @registry_locker, lease)
|
286
|
+
lease = nil
|
287
|
+
rescue StandardError => e
|
288
|
+
@logger.error("Oh My, An error occurred. #{e}:\n#{e.backtrace}", :exception => e)
|
289
|
+
ensure
|
290
|
+
@azure_blob.release_blob_lease(@container, @registry_locker, lease) unless lease.nil?
|
291
|
+
lease = nil
|
292
|
+
end #rescue
|
293
|
+
end # def update_registry
|
294
|
+
|
295
|
+
# Clean up the registry.
|
296
|
+
def cleanup_registry
|
297
|
+
begin
|
298
|
+
lease = nil
|
299
|
+
lease = acquire_lease(@registry_locker)
|
300
|
+
registry_hash = load_registry
|
301
|
+
registry_hash.each { | key, registry_item|
|
302
|
+
registry_item.reader = nil if registry_item.reader == @reader
|
303
|
+
}
|
304
|
+
save_registry(registry_hash)
|
305
|
+
@azure_blob.release_blob_lease(@container, @registry_locker, lease)
|
306
|
+
lease = nil
|
307
|
+
rescue StandardError => e
|
308
|
+
@logger.error("Oh My, An error occurred. #{e}:\n#{e.backtrace}", :exception => e)
|
309
|
+
ensure
|
310
|
+
@azure_blob.release_blob_lease(@container, @registry_locker, lease) unless lease.nil?
|
311
|
+
lease = nil
|
312
|
+
end #rescue
|
313
|
+
end # def cleanup_registry
|
314
|
+
|
315
|
+
# Create a registry file to coordinate between multiple azure blob inputs.
|
316
|
+
def create_registry (blob_items)
|
317
|
+
registry_hash = Hash.new
|
318
|
+
|
319
|
+
blob_items.each do |blob_item|
|
320
|
+
initial_offset = 0
|
321
|
+
initial_offset = blob_item.properties[:content_length] if @registry_create_policy == 'resume'
|
322
|
+
registry_item = LogStash::Inputs::RegistryItem.new(blob_item.name, blob_item.properties[:etag], nil, initial_offset, 0)
|
323
|
+
registry_hash[blob_item.name] = registry_item
|
324
|
+
end # each
|
325
|
+
save_registry(registry_hash)
|
326
|
+
return registry_hash
|
327
|
+
end # create_registry
|
328
|
+
|
329
|
+
# Load the content of the registry into the registry hash and return it.
|
330
|
+
def load_registry
|
331
|
+
# Get content
|
332
|
+
registry_blob, registry_blob_body = @azure_blob.get_blob(@container, @registry_path)
|
333
|
+
registry_hash = deserialize_registry_hash(registry_blob_body)
|
334
|
+
return registry_hash
|
335
|
+
end # def load_registry
|
336
|
+
|
337
|
+
# Serialize the registry hash and save it.
|
338
|
+
def save_registry(registry_hash)
|
339
|
+
# Serialize hash to json
|
340
|
+
registry_hash_json = JSON.generate(registry_hash)
|
341
|
+
|
342
|
+
# Upload registry to blob
|
343
|
+
@azure_blob.create_block_blob(@container, @registry_path, registry_hash_json)
|
344
|
+
end # def save_registry
|
345
|
+
end # class LogStash::Inputs::LogstashInputAzureblob
|
@@ -1,13 +1,13 @@
|
|
1
1
|
Gem::Specification.new do |s|
|
2
2
|
s.name = 'logstash-input-azureblob'
|
3
|
-
s.version = '0.9.
|
3
|
+
s.version = '0.9.8'
|
4
4
|
s.licenses = ['Apache License (2.0)']
|
5
|
-
s.summary =
|
6
|
-
s.description =
|
7
|
-
s.
|
5
|
+
s.summary = 'This plugin collects Microsoft Azure Diagnostics data from Azure Storage Blobs.'
|
6
|
+
s.description = 'This gem is a Logstash plugin. It reads and parses data from Azure Storage Blobs.'
|
7
|
+
s.homepage = 'https://github.com/Azure/azure-diagnostics-tools'
|
8
|
+
s.authors = ['Microsoft Corporation']
|
8
9
|
s.email = 'azdiag@microsoft.com'
|
9
|
-
s.
|
10
|
-
s.require_paths = ["lib"]
|
10
|
+
s.require_paths = ['lib']
|
11
11
|
|
12
12
|
# Files
|
13
13
|
s.files = Dir['lib/**/*','spec/**/*','vendor/**/*','*.gemspec','*.md','Gemfile','LICENSE']
|
@@ -18,7 +18,9 @@ Gem::Specification.new do |s|
|
|
18
18
|
s.metadata = { "logstash_plugin" => "true", "logstash_group" => "input" }
|
19
19
|
|
20
20
|
# Gem dependencies
|
21
|
-
s.add_runtime_dependency
|
22
|
-
s.add_runtime_dependency '
|
21
|
+
s.add_runtime_dependency "logstash-core-plugin-api", '>= 1.60', '<= 2.99'
|
22
|
+
s.add_runtime_dependency 'logstash-codec-json_lines'
|
23
|
+
s.add_runtime_dependency 'stud', '>= 0.0.22'
|
24
|
+
s.add_runtime_dependency 'azure-storage', '~> 0.11.4.preview'
|
23
25
|
s.add_development_dependency 'logstash-devutils'
|
24
26
|
end
|
metadata
CHANGED
@@ -1,18 +1,17 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: logstash-input-azureblob
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.9.
|
4
|
+
version: 0.9.8
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Microsoft Corporation
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2017-
|
11
|
+
date: 2017-07-30 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
|
-
|
15
|
-
version_requirements: !ruby/object:Gem::Requirement
|
14
|
+
requirement: !ruby/object:Gem::Requirement
|
16
15
|
requirements:
|
17
16
|
- - ">="
|
18
17
|
- !ruby/object:Gem::Version
|
@@ -20,7 +19,10 @@ dependencies:
|
|
20
19
|
- - "<="
|
21
20
|
- !ruby/object:Gem::Version
|
22
21
|
version: '2.99'
|
23
|
-
|
22
|
+
name: logstash-core-plugin-api
|
23
|
+
prerelease: false
|
24
|
+
type: :runtime
|
25
|
+
version_requirements: !ruby/object:Gem::Requirement
|
24
26
|
requirements:
|
25
27
|
- - ">="
|
26
28
|
- !ruby/object:Gem::Version
|
@@ -28,36 +30,62 @@ dependencies:
|
|
28
30
|
- - "<="
|
29
31
|
- !ruby/object:Gem::Version
|
30
32
|
version: '2.99'
|
33
|
+
- !ruby/object:Gem::Dependency
|
34
|
+
requirement: !ruby/object:Gem::Requirement
|
35
|
+
requirements:
|
36
|
+
- - ">="
|
37
|
+
- !ruby/object:Gem::Version
|
38
|
+
version: '0'
|
39
|
+
name: logstash-codec-json_lines
|
31
40
|
prerelease: false
|
32
41
|
type: :runtime
|
42
|
+
version_requirements: !ruby/object:Gem::Requirement
|
43
|
+
requirements:
|
44
|
+
- - ">="
|
45
|
+
- !ruby/object:Gem::Version
|
46
|
+
version: '0'
|
33
47
|
- !ruby/object:Gem::Dependency
|
34
|
-
|
48
|
+
requirement: !ruby/object:Gem::Requirement
|
49
|
+
requirements:
|
50
|
+
- - ">="
|
51
|
+
- !ruby/object:Gem::Version
|
52
|
+
version: 0.0.22
|
53
|
+
name: stud
|
54
|
+
prerelease: false
|
55
|
+
type: :runtime
|
35
56
|
version_requirements: !ruby/object:Gem::Requirement
|
36
57
|
requirements:
|
37
|
-
- - "
|
58
|
+
- - ">="
|
38
59
|
- !ruby/object:Gem::Version
|
39
|
-
version: 0.
|
60
|
+
version: 0.0.22
|
61
|
+
- !ruby/object:Gem::Dependency
|
40
62
|
requirement: !ruby/object:Gem::Requirement
|
41
63
|
requirements:
|
42
64
|
- - "~>"
|
43
65
|
- !ruby/object:Gem::Version
|
44
|
-
version: 0.
|
66
|
+
version: 0.11.4.preview
|
67
|
+
name: azure-storage
|
45
68
|
prerelease: false
|
46
69
|
type: :runtime
|
47
|
-
- !ruby/object:Gem::Dependency
|
48
|
-
name: logstash-devutils
|
49
70
|
version_requirements: !ruby/object:Gem::Requirement
|
50
71
|
requirements:
|
51
|
-
- - "
|
72
|
+
- - "~>"
|
52
73
|
- !ruby/object:Gem::Version
|
53
|
-
version:
|
74
|
+
version: 0.11.4.preview
|
75
|
+
- !ruby/object:Gem::Dependency
|
54
76
|
requirement: !ruby/object:Gem::Requirement
|
55
77
|
requirements:
|
56
78
|
- - ">="
|
57
79
|
- !ruby/object:Gem::Version
|
58
80
|
version: '0'
|
81
|
+
name: logstash-devutils
|
59
82
|
prerelease: false
|
60
83
|
type: :development
|
84
|
+
version_requirements: !ruby/object:Gem::Requirement
|
85
|
+
requirements:
|
86
|
+
- - ">="
|
87
|
+
- !ruby/object:Gem::Version
|
88
|
+
version: '0'
|
61
89
|
description: This gem is a Logstash plugin. It reads and parses data from Azure Storage Blobs.
|
62
90
|
email: azdiag@microsoft.com
|
63
91
|
executables: []
|