logstash-input-azurewadtable 0.9.11 → 0.9.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
- SHA1:
3
- metadata.gz: c27925f10618e725258c57b9afb50b9f8c25689c
4
- data.tar.gz: 3f4a6dc33cbf979edeaf97b6696470b22bd7f5ff
2
+ SHA256:
3
+ metadata.gz: 5d5f03c67ab4e356e775d2c01ea283c5ab984ed9fe33cbf588406392fa1b1422
4
+ data.tar.gz: 86c7c36b5735e0119e39a1764137736b2fc1cd824126f33696073fbec48ffc6d
5
5
  SHA512:
6
- metadata.gz: 11ac6192c5c76e265bab0412520b795e898e28e5861aeacdebdfd05634da3c718bc4b4aa079f4706959fe63c4ddca2191ae21505785067516934a1028153947c
7
- data.tar.gz: 1581ff53ae508173137aa0ccb8ad96fc5c224f4385bd6b7f1535362196d4c44c050eef4aa4b676d3df162141941b85ff2ec94aca18a62626fa31149f3087be2b
6
+ metadata.gz: 1e8ce77d95b549f2da0eb56ee937604b11bf49ecb6b4b68751246d1c44c03c47d0f88baa9e7e2f263b6acfff8db22f00ee7c0012d20f66a717a54fe6b796f9c3
7
+ data.tar.gz: 6478098b9b5028439215ec1f1c2386eecfe9688ab931590660a439f80584b6dc84321242a2a355f862b0407bdbbb8f70f2477ef7ee57cf3eaf69a6c0bf82c1a0
data/README.md CHANGED
@@ -48,6 +48,9 @@ __*endpoint*__
48
48
 
49
49
  Specifies the endpoint of Azure environment. The default value is "core.windows.net".
50
50
 
51
+ __*past_queries_count*__
52
+ Specifies the number of past queries to run so the plugin doesn't miss late arriving data. By default this is 5
53
+
51
54
  ### Examples
52
55
  ```
53
56
  input
@@ -2,7 +2,8 @@
2
2
  require "logstash/inputs/base"
3
3
  require "logstash/namespace"
4
4
  require "time"
5
- require "azure"
5
+ require "azure/storage"
6
+ require "set"
6
7
 
7
8
  class LogStash::Inputs::AzureWADTable < LogStash::Inputs::Base
8
9
  class Interrupted < StandardError; end
@@ -14,7 +15,7 @@ class LogStash::Inputs::AzureWADTable < LogStash::Inputs::Base
14
15
  config :access_key, :validate => :string
15
16
  config :table_name, :validate => :string
16
17
  config :entity_count_to_process, :validate => :string, :default => 100
17
- config :collection_start_time_utc, :validate => :string, :default => Time.now.utc.iso8601
18
+ config :collection_start_time_utc, :validate => :string, :default => nil #the actual value is set in the ctor (now - data_latency_minutes - 1)
18
19
  config :etw_pretty_print, :validate => :boolean, :default => false
19
20
  config :idle_delay_seconds, :validate => :number, :default => 15
20
21
  config :endpoint, :validate => :string, :default => "core.windows.net"
@@ -23,23 +24,37 @@ class LogStash::Inputs::AzureWADTable < LogStash::Inputs::Base
23
24
  # See issue #23 for more: https://github.com/Azure/azure-diagnostics-tools/issues/23
24
25
  config :data_latency_minutes, :validate => :number, :default => 1
25
26
 
27
+ # Number of past queries to be run, so we don't miss late arriving data
28
+ config :past_queries_count, :validate => :number, :default => 5
29
+
26
30
  TICKS_SINCE_EPOCH = Time.utc(0001, 01, 01).to_i * 10000000
27
31
 
32
+ INITIAL_QUERY_SPLIT_PERIOD_MINUTES = 30
33
+
28
34
  def initialize(*args)
29
35
  super(*args)
36
+ if @collection_start_time_utc.nil?
37
+ @collection_start_time_utc = (Time.now - ( 60 * @data_latency_minutes) - 60).iso8601
38
+ @logger.debug("collection_start_time_utc = #{@collection_start_time_utc}")
39
+ end
30
40
  end # initialize
31
41
 
32
42
  public
33
43
  def register
34
- Azure.configure do |config|
35
- config.storage_account_name = @account_name
36
- config.storage_access_key = @access_key
37
- config.storage_table_host = "https://#{@account_name}.table.#{@endpoint}"
38
- end
39
- @azure_table_service = Azure::Table::TableService.new
44
+ user_agent = "logstash-input-azurewadtable"
45
+ user_agent << "/" << Gem.latest_spec_for("logstash-input-azurewadtable").version.to_s
46
+
47
+ @client = Azure::Storage::Client.create(
48
+ :storage_account_name => @account_name,
49
+ :storage_access_key => @access_key,
50
+ :storage_table_host => "https://#{@account_name}.table.#{@endpoint}",
51
+ :user_agent_prefix => user_agent)
52
+ @azure_table_service = @client.table_client
53
+
40
54
  @last_timestamp = @collection_start_time_utc
41
55
  @idle_delay = @idle_delay_seconds
42
- @continuation_token = nil
56
+ @duplicate_detector = DuplicateDetector.new(@logger, @past_queries_count)
57
+ @first_run = true
43
58
  end # register
44
59
 
45
60
  public
@@ -47,7 +62,7 @@ class LogStash::Inputs::AzureWADTable < LogStash::Inputs::Base
47
62
  while !stop?
48
63
  @logger.debug("Starting process method @" + Time.now.to_s);
49
64
  process(output_queue)
50
- @logger.debug("Starting delay of: " + @idle_delay_seconds.to_s + " seconds @" + Time.now.to_s);
65
+ @logger.debug("Starting delay of: " + @idle_delay.to_s + " seconds @" + Time.now.to_s);
51
66
  sleep @idle_delay
52
67
  end # while
53
68
  end # run
@@ -58,96 +73,105 @@ class LogStash::Inputs::AzureWADTable < LogStash::Inputs::Base
58
73
 
59
74
  def build_latent_query
60
75
  @logger.debug("from #{@last_timestamp} to #{@until_timestamp}")
76
+ if @last_timestamp > @until_timestamp
77
+ @logger.debug("last_timestamp is in the future. Will not run any query!")
78
+ return nil
79
+ end
61
80
  query_filter = "(PartitionKey gt '#{partitionkey_from_datetime(@last_timestamp)}' and PartitionKey lt '#{partitionkey_from_datetime(@until_timestamp)}')"
62
81
  for i in 0..99
63
82
  query_filter << " or (PartitionKey gt '#{i.to_s.rjust(19, '0')}___#{partitionkey_from_datetime(@last_timestamp)}' and PartitionKey lt '#{i.to_s.rjust(19, '0')}___#{partitionkey_from_datetime(@until_timestamp)}')"
64
83
  end # for block
65
84
  query_filter = query_filter.gsub('"','')
66
- query_filter
67
- end
68
-
69
- def build_zero_latency_query
70
- @logger.debug("from #{@last_timestamp} to most recent data")
71
- # query data using start_from_time
72
- query_filter = "(PartitionKey gt '#{partitionkey_from_datetime(@last_timestamp)}')"
73
- for i in 0..99
74
- query_filter << " or (PartitionKey gt '#{i.to_s.rjust(19, '0')}___#{partitionkey_from_datetime(@last_timestamp)}' and PartitionKey lt '#{i.to_s.rjust(19, '0')}___9999999999999999999')"
75
- end # for block
76
- query_filter = query_filter.gsub('"','')
77
- query_filter
85
+ return AzureQuery.new(@logger, @azure_table_service, @table_name, query_filter, @last_timestamp.to_s + "-" + @until_timestamp.to_s, @entity_count_to_process)
78
86
  end
79
87
 
80
88
  def process(output_queue)
81
- if @data_latency_minutes > 0
82
- @until_timestamp = (Time.now - (60 * @data_latency_minutes)).iso8601 unless @continuation_token
83
- query_filter = build_latent_query
84
- else
85
- query_filter = build_zero_latency_query
89
+ @until_timestamp = (Time.now - (60 * @data_latency_minutes)).iso8601
90
+ last_good_timestamp = nil
91
+
92
+ # split first query so we don't fetch old data several times for no reason
93
+ if @first_run
94
+ @first_run = false
95
+ diff = DateTime.iso8601(@until_timestamp).to_time - DateTime.iso8601(@last_timestamp).to_time
96
+ if diff > INITIAL_QUERY_SPLIT_PERIOD_MINUTES * 60
97
+ @logger.debug("Splitting initial query in two")
98
+ original_until = @until_timestamp
99
+
100
+ @until_timestamp = (DateTime.iso8601(@until_timestamp).to_time - INITIAL_QUERY_SPLIT_PERIOD_MINUTES * 60).iso8601
101
+
102
+ query = build_latent_query
103
+ @duplicate_detector.filter_duplicates(query, ->(entity) {
104
+ on_new_data(entity, output_queue, last_good_timestamp)
105
+ }, false)
106
+
107
+ @last_timestamp = (DateTime.iso8601(@until_timestamp).to_time - 1).iso8601
108
+ @until_timestamp = original_until
109
+ end
86
110
  end
87
- @logger.debug("Query filter: " + query_filter)
88
- query = { :top => @entity_count_to_process, :filter => query_filter, :continuation_token => @continuation_token }
89
- result = @azure_table_service.query_entities(@table_name, query)
90
- @continuation_token = result.continuation_token
91
-
92
- if result and result.length > 0
93
- @logger.debug("#{result.length} results found.")
94
- last_good_timestamp = nil
95
- result.each do |entity|
96
- event = LogStash::Event.new(entity.properties)
97
- event.set("type", @table_name)
98
-
99
- # Help pretty print etw files
100
- if (@etw_pretty_print && !event.get("EventMessage").nil? && !event.get("Message").nil?)
101
- @logger.debug("event: " + event.to_s)
102
- eventMessage = event.get("EventMessage").to_s
103
- message = event.get("Message").to_s
104
- @logger.debug("EventMessage: " + eventMessage)
105
- @logger.debug("Message: " + message)
106
- if (eventMessage.include? "%")
107
- @logger.debug("starting pretty print")
108
- toReplace = eventMessage.scan(/%\d+/)
109
- payload = message.scan(/(?<!\\S)([a-zA-Z]+)=(\"[^\"]*\")(?!\\S)/)
110
- # Split up the format string to seperate all of the numbers
111
- toReplace.each do |key|
112
- @logger.debug("Replacing key: " + key.to_s)
113
- index = key.scan(/\d+/).join.to_i
114
- newValue = payload[index - 1][1]
115
- @logger.debug("New Value: " + newValue)
116
- eventMessage[key] = newValue
117
- end # do block
118
- event.set("EventMessage", eventMessage)
119
- @logger.debug("pretty print end. result: " + event.get("EventMessage").to_s)
120
- end
121
- end
122
- decorate(event)
123
- if event.get('PreciseTimeStamp').is_a?(Time)
124
- event.set('PreciseTimeStamp', LogStash::Timestamp.new(event.get('PreciseTimeStamp')))
125
- end
126
- theTIMESTAMP = event.get('TIMESTAMP')
127
- if theTIMESTAMP.is_a?(LogStash::Timestamp)
128
- last_good_timestamp = theTIMESTAMP.to_iso8601
129
- elsif theTIMESTAMP.is_a?(Time)
130
- last_good_timestamp = theTIMESTAMP.iso8601
131
- event.set('TIMESTAMP', LogStash::Timestamp.new(theTIMESTAMP))
132
- else
133
- @logger.warn("Found result with invalid TIMESTAMP. " + event.to_hash.to_s)
134
- end
135
- output_queue << event
136
- end # each block
137
- @idle_delay = 0
111
+
112
+ query = build_latent_query
113
+ filter_result = @duplicate_detector.filter_duplicates(query, ->(entity) {
114
+ last_good_timestamp = on_new_data(entity, output_queue, last_good_timestamp)
115
+ })
116
+
117
+ if filter_result
138
118
  if (!last_good_timestamp.nil?)
139
- @last_timestamp = last_good_timestamp unless @continuation_token
119
+ @last_timestamp = last_good_timestamp
140
120
  end
141
121
  else
142
122
  @logger.debug("No new results found.")
143
- @idle_delay = @idle_delay_seconds
144
- end # if block
123
+ end
145
124
 
146
125
  rescue => e
147
- @logger.error("Oh My, An error occurred.", :exception => e)
126
+ @logger.error("Oh My, An error occurred. Error:#{e}: Trace: #{e.backtrace}", :exception => e)
148
127
  raise
149
128
  end # process
150
129
 
130
+ def on_new_data(entity, output_queue, last_good_timestamp)
131
+ #@logger.debug("new event")
132
+ event = LogStash::Event.new(entity.properties)
133
+ event.set("type", @table_name)
134
+
135
+ # Help pretty print etw files
136
+ if (@etw_pretty_print && !event.get("EventMessage").nil? && !event.get("Message").nil?)
137
+ @logger.debug("event: " + event.to_s)
138
+ eventMessage = event.get("EventMessage").to_s
139
+ message = event.get("Message").to_s
140
+ @logger.debug("EventMessage: " + eventMessage)
141
+ @logger.debug("Message: " + message)
142
+ if (eventMessage.include? "%")
143
+ @logger.debug("starting pretty print")
144
+ toReplace = eventMessage.scan(/%\d+/)
145
+ payload = message.scan(/(?<!\\S)([a-zA-Z]+)=(\"[^\"]*\")(?!\\S)/)
146
+ # Split up the format string to seperate all of the numbers
147
+ toReplace.each do |key|
148
+ @logger.debug("Replacing key: " + key.to_s)
149
+ index = key.scan(/\d+/).join.to_i
150
+ newValue = payload[index - 1][1]
151
+ @logger.debug("New Value: " + newValue)
152
+ eventMessage[key] = newValue
153
+ end # do block
154
+ event.set("EventMessage", eventMessage)
155
+ @logger.debug("pretty print end. result: " + event.get("EventMessage").to_s)
156
+ end
157
+ end
158
+ decorate(event)
159
+ if event.get('PreciseTimeStamp').is_a?(Time)
160
+ event.set('PreciseTimeStamp', LogStash::Timestamp.new(event.get('PreciseTimeStamp')))
161
+ end
162
+ theTIMESTAMP = event.get('TIMESTAMP')
163
+ if theTIMESTAMP.is_a?(LogStash::Timestamp)
164
+ last_good_timestamp = theTIMESTAMP.to_iso8601
165
+ elsif theTIMESTAMP.is_a?(Time)
166
+ last_good_timestamp = theTIMESTAMP.iso8601
167
+ event.set('TIMESTAMP', LogStash::Timestamp.new(theTIMESTAMP))
168
+ else
169
+ @logger.warn("Found result with invalid TIMESTAMP. " + event.to_hash.to_s)
170
+ end
171
+ output_queue << event
172
+ return last_good_timestamp
173
+ end
174
+
151
175
  # Windows Azure Diagnostic's algorithm for determining the partition key based on time is as follows:
152
176
  # 1. Take time in UTC without seconds.
153
177
  # 2. Convert it into .net ticks
@@ -155,7 +179,7 @@ class LogStash::Inputs::AzureWADTable < LogStash::Inputs::Base
155
179
  def partitionkey_from_datetime(time_string)
156
180
  collection_time = Time.parse(time_string)
157
181
  if collection_time
158
- @logger.debug("collection time parsed successfully #{collection_time}")
182
+ #@logger.debug("collection time parsed successfully #{collection_time}")
159
183
  else
160
184
  raise(ArgumentError, "Could not parse the time_string")
161
185
  end # if else block
@@ -167,8 +191,154 @@ class LogStash::Inputs::AzureWADTable < LogStash::Inputs::Base
167
191
 
168
192
  # Convert time to ticks
169
193
  def to_ticks(time_to_convert)
170
- @logger.debug("Converting time to ticks")
194
+ #@logger.debug("Converting time to ticks")
171
195
  time_to_convert.to_i * 10000000 - TICKS_SINCE_EPOCH
172
196
  end # to_ticks
173
197
 
174
198
  end # LogStash::Inputs::AzureWADTable
199
+
200
+ class AzureQuery
201
+ def initialize(logger, azure_table_service, table_name, query_str, query_id, entity_count_to_process)
202
+ @logger = logger
203
+ @query_str = query_str
204
+ @query_id = query_id
205
+ @entity_count_to_process = entity_count_to_process
206
+ @azure_table_service = azure_table_service
207
+ @table_name = table_name
208
+ @continuation_token = nil
209
+ end
210
+
211
+ def reset
212
+ @continuation_token = nil
213
+ end
214
+
215
+ def id
216
+ return @query_id
217
+ end
218
+
219
+ def run(on_result_cbk)
220
+ results_found = false
221
+ @logger.debug("[#{@query_id}]Query filter: " + @query_str)
222
+ begin
223
+ @logger.debug("[#{@query_id}]Running query. continuation_token: #{@continuation_token}")
224
+ query = { :top => @entity_count_to_process, :filter => @query_str, :continuation_token => @continuation_token }
225
+ result = @azure_table_service.query_entities(@table_name, query)
226
+
227
+ if result and result.length > 0
228
+ results_found = true
229
+ @logger.debug("[#{@query_id}] #{result.length} results found.")
230
+ result.each do |entity|
231
+ on_result_cbk.call(entity)
232
+ end
233
+ end
234
+
235
+ @continuation_token = result.continuation_token
236
+ end until !@continuation_token
237
+
238
+ return results_found
239
+ end
240
+ end
241
+
242
+ class QueryData
243
+ def initialize(logger, query)
244
+ @logger = logger
245
+ @query = query
246
+ @results_cache = Set.new
247
+ end
248
+
249
+ def id
250
+ return @query.id
251
+ end
252
+
253
+ def get_unique_id(entity)
254
+ uniqueId = ""
255
+ partitionKey = entity.properties["PartitionKey"]
256
+ rowKey = entity.properties["RowKey"]
257
+ uniqueId << partitionKey << "#" << rowKey
258
+ return uniqueId
259
+ end
260
+
261
+ def run_query(on_new_entity_cbk)
262
+ @query.reset
263
+ @query.run( ->(entity) {
264
+ uniqueId = get_unique_id(entity)
265
+
266
+ if @results_cache.add?(uniqueId).nil?
267
+ @logger.debug("[#{@query.id}][QueryData] #{uniqueId} already processed")
268
+ else
269
+ @logger.debug("[#{@query.id}][QueryData] #{uniqueId} new item")
270
+ on_new_entity_cbk.call(entity)
271
+ end
272
+ })
273
+ end
274
+
275
+ def has_entity(entity)
276
+ return @results_cache.include?(get_unique_id(entity))
277
+ end
278
+
279
+ end
280
+
281
+ class DuplicateDetector
282
+ def initialize(logger, past_queries_count)
283
+ @logger = logger
284
+ @past_queries_count = past_queries_count
285
+ @query_cache = []
286
+ end
287
+
288
+ def filter_duplicates(query, on_new_item_ckb, should_cache_query = true)
289
+ if query.nil?
290
+ @logger.debug("query is nil")
291
+ return false
292
+ end
293
+ #push in front, pop from the back
294
+ latest_query = QueryData.new(@logger, query)
295
+ @query_cache.insert(0, latest_query)
296
+
297
+ found_new_items = false
298
+
299
+ # results is most likely empty or has very few items for older queries (most or all should be de-duplicated by run_query)
300
+ index = 0
301
+ @query_cache.each do |query_data|
302
+ query_data.run_query(->(entity) {
303
+ unique_id = query_data.get_unique_id(entity)
304
+
305
+ # queries overlap. Check for duplicates in all results
306
+ is_duplicate = false
307
+ for j in 0..@query_cache.length - 1
308
+ if j == index
309
+ next
310
+ end
311
+ q = @query_cache[j]
312
+ if q.has_entity(entity)
313
+ @logger.debug("[#{query_data.id}][filter_duplicates] #{unique_id} was already processed by #{q.id}")
314
+ is_duplicate = true
315
+ break
316
+ end
317
+ end
318
+
319
+ if !is_duplicate
320
+ found_new_items = true
321
+ @logger.debug("[#{query_data.id}][filter_duplicates] #{unique_id} new item")
322
+ on_new_item_ckb.call(entity)
323
+ end
324
+
325
+ })
326
+
327
+ index+=1
328
+ end
329
+
330
+ if !should_cache_query
331
+ @logger.debug("Removing first item from queue")
332
+ @query_cache.shift
333
+ end
334
+
335
+ @logger.debug("Query Cache length: #{@query_cache.length}")
336
+ until @query_cache.length <= @past_queries_count do
337
+ @query_cache.pop
338
+ @logger.debug("New Query Cache length: #{@query_cache.length}")
339
+ end
340
+
341
+ return found_new_items
342
+ end
343
+
344
+ end
@@ -1,6 +1,6 @@
1
1
  Gem::Specification.new do |s|
2
2
  s.name = 'logstash-input-azurewadtable'
3
- s.version = '0.9.11'
3
+ s.version = '0.9.12'
4
4
  s.licenses = ['Apache License (2.0)']
5
5
  s.summary = "This plugin collects Microsoft Azure Diagnostics data from Azure Storage Tables."
6
6
  s.description = "This gem is a Logstash plugin. It reads and parses diagnostics data from Azure Storage Tables."
@@ -19,7 +19,7 @@ Gem::Specification.new do |s|
19
19
 
20
20
  # Gem dependencies
21
21
  s.add_runtime_dependency "logstash-core-plugin-api", ">= 1.60", "<= 2.99"
22
- s.add_runtime_dependency 'azure', '~> 0.7.3'
22
+ s.add_runtime_dependency 'azure-storage', '~> 0.13.0.preview'
23
23
  s.add_development_dependency 'logstash-devutils', '>= 1.1.0'
24
24
  end
25
25
 
metadata CHANGED
@@ -1,18 +1,17 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-input-azurewadtable
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.9.11
4
+ version: 0.9.12
5
5
  platform: ruby
6
6
  authors:
7
7
  - Microsoft Corporation
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2016-12-08 00:00:00.000000000 Z
11
+ date: 2017-10-02 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
- name: logstash-core-plugin-api
15
- version_requirements: !ruby/object:Gem::Requirement
14
+ requirement: !ruby/object:Gem::Requirement
16
15
  requirements:
17
16
  - - ">="
18
17
  - !ruby/object:Gem::Version
@@ -20,7 +19,10 @@ dependencies:
20
19
  - - "<="
21
20
  - !ruby/object:Gem::Version
22
21
  version: '2.99'
23
- requirement: !ruby/object:Gem::Requirement
22
+ name: logstash-core-plugin-api
23
+ prerelease: false
24
+ type: :runtime
25
+ version_requirements: !ruby/object:Gem::Requirement
24
26
  requirements:
25
27
  - - ">="
26
28
  - !ruby/object:Gem::Version
@@ -28,37 +30,36 @@ dependencies:
28
30
  - - "<="
29
31
  - !ruby/object:Gem::Version
30
32
  version: '2.99'
31
- prerelease: false
32
- type: :runtime
33
33
  - !ruby/object:Gem::Dependency
34
- name: azure
35
- version_requirements: !ruby/object:Gem::Requirement
36
- requirements:
37
- - - "~>"
38
- - !ruby/object:Gem::Version
39
- version: 0.7.3
40
34
  requirement: !ruby/object:Gem::Requirement
41
35
  requirements:
42
36
  - - "~>"
43
37
  - !ruby/object:Gem::Version
44
- version: 0.7.3
38
+ version: 0.13.0.preview
39
+ name: azure-storage
45
40
  prerelease: false
46
41
  type: :runtime
47
- - !ruby/object:Gem::Dependency
48
- name: logstash-devutils
49
42
  version_requirements: !ruby/object:Gem::Requirement
50
43
  requirements:
51
- - - ">="
44
+ - - "~>"
52
45
  - !ruby/object:Gem::Version
53
- version: 1.1.0
46
+ version: 0.13.0.preview
47
+ - !ruby/object:Gem::Dependency
54
48
  requirement: !ruby/object:Gem::Requirement
55
49
  requirements:
56
50
  - - ">="
57
51
  - !ruby/object:Gem::Version
58
52
  version: 1.1.0
53
+ name: logstash-devutils
59
54
  prerelease: false
60
55
  type: :development
61
- description: This gem is a Logstash plugin. It reads and parses diagnostics data from Azure Storage Tables.
56
+ version_requirements: !ruby/object:Gem::Requirement
57
+ requirements:
58
+ - - ">="
59
+ - !ruby/object:Gem::Version
60
+ version: 1.1.0
61
+ description: This gem is a Logstash plugin. It reads and parses diagnostics data from
62
+ Azure Storage Tables.
62
63
  email: azdiag@microsoft.com
63
64
  executables: []
64
65
  extensions: []
@@ -93,9 +94,10 @@ required_rubygems_version: !ruby/object:Gem::Requirement
93
94
  version: '0'
94
95
  requirements: []
95
96
  rubyforge_project:
96
- rubygems_version: 2.4.8
97
+ rubygems_version: 2.6.13
97
98
  signing_key:
98
99
  specification_version: 4
99
- summary: This plugin collects Microsoft Azure Diagnostics data from Azure Storage Tables.
100
+ summary: This plugin collects Microsoft Azure Diagnostics data from Azure Storage
101
+ Tables.
100
102
  test_files:
101
103
  - spec/inputs/azurewadtable_spec.rb