logstash-output-application_insights 0.1.3 → 0.1.4

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 2992e3a202401238e6ce64bb81b14e36d215a13b
4
- data.tar.gz: ef6fd4ac9fce577765515ab7fb35f964101b2275
3
+ metadata.gz: 12c164429fdbd439168dab5277c28185ce2e4985
4
+ data.tar.gz: 44048dcef57a0bbe542eb8a3b68811f968eb8352
5
5
  SHA512:
6
- metadata.gz: fccb9334a9591040d6bc9255a8e620517cbb2aa6e1907bd8167eb3235c829b8702c742f31faedb82088c03ac8dbec2c7d2a96ff96e4aa1234754fa462a6b7158
7
- data.tar.gz: 066f4e308ab6df69ddc1ce574f987fc82d8a9dcdef9e72dbdd2bf7680d892908f44ba04c0f53323751755114be6fb4669538324ad62bf0567dca6d1834fdf0ce
6
+ metadata.gz: 8388e00a758efb2fb30215ace69730d28690b573c66ee901be7dd635f16660b5a2236ef0f1bb4c0665924b54e8a3150ea868ce8c1771f42409af3cd5ed251ec3
7
+ data.tar.gz: cc27c111592a9bd9aca93fb8f697219eb58698ae7691222b43ff98313ce1cd365263feb9fd19fbfbcaa07cca98a9a114c2e0eb3b1a25319b151d7f789f515880
data/README.md CHANGED
@@ -1,9 +1,12 @@
1
1
  # Microsoft Application Insights Output Plugin for Logstash
2
-
3
2
  [![GitHub version](https://badge.fury.io/gh/microsoft%2Flogstash-output-application-insights.svg)](https://badge.fury.io/gh/microsoft%2Flogstash-output-application-insights)
4
3
  [![Gem Version](https://badge.fury.io/rb/logstash-output-application_insights.svg)](https://badge.fury.io/rb/logstash-output-application_insights)
4
+
5
+ * This project is a plugin for [Logstash](https://github.com/elastic/logstash).
6
+
7
+ * This plugin have to be installed on top of the Logstash core pipeline. It is not a stand-alone program.
5
8
 
6
- This project is a plugin for [Logstash](https://github.com/elastic/logstash).
9
+ * **This plugin outputs events to Microsoft Application Insights Analytics open schema tables.**
7
10
 
8
11
  # Plugin Features
9
12
 
@@ -49,7 +52,7 @@ filter {
49
52
  }
50
53
  output {
51
54
  application_insights {
52
- intrumentation_key => "5a6714a3-ec7b-4999-ab96-232f1da92059"
55
+ instrumentation_key => "5a6714a3-ec7b-4999-ab96-232f1da92059"
53
56
  table_id => "c24394e1-f077-420e-8a25-ef6fdf045938"
54
57
  storage_account_name_key => [ "my-storage-account", "pfrYTwPgKyYNfKBY2QdF+v5sbgx8/eAQp+FFkGpPBnkMDE1k+ZNK3r3qIPqqw8UsOIUqaF3dXBdPDouGJuxNXQ==" ]
55
58
  }
@@ -147,12 +150,12 @@ example:
147
150
  azure_storage_blob_prefix => "myprefix"
148
151
  ```
149
152
 
150
- ### intrumentation_key
151
- Default Application Insights Analytics intrumentation_key. No default
153
+ ### instrumentation_key
154
+ Default Application Insights Analytics instrumentation_key. No default
152
155
  It will be used only in case the key is not specified in the tables property associated to a table_id, or as field or metadata fields in the event
153
156
  example:
154
157
  ```ruby
155
- intrumentation_key => "5A6714A3-EC7B-4999-AB96-232F1DA92059"
158
+ instrumentation_key => "5A6714A3-EC7B-4999-AB96-232F1DA92059"
156
159
  ```
157
160
 
158
161
  ### table_id
@@ -358,11 +361,11 @@ example:
358
361
  ```ruby
359
362
  delete_not_notified_blobs => true
360
363
  ```
361
- ### validate_endpoint
364
+ ### validate_notification
362
365
  When set to true, access to application insights will be validated at initialization and if validation fail, logstash process will abort. Default false
363
366
  example:
364
367
  ```ruby
365
- validate_endpoint => true
368
+ validate_notification => true
366
369
  ```
367
370
 
368
371
  ### validate_storage
@@ -404,22 +407,32 @@ example:
404
407
  stop_on_unknown_io_errors => true
405
408
  ```
406
409
 
407
- ### notification_endpoint
408
- when set notification are sent to an alternative endpoint. Default "https://dc.services.visualstudio.com/v2/track"
409
- Used for troubleshooting
410
+ ### azure_storage_host_suffix
411
+ when set an alternative storage service will be used. Default "core.windows.net"
410
412
  example:
411
413
  ```ruby
412
- stop_on_unknown_io_errors => true
414
+ azure_storage_host_suffix => "core.windows.net"
415
+ ```
416
+
417
+ ### application_insights_endpoint
418
+ when set blob ready notification are sent to an alternative endpoint. Default "https://dc.services.visualstudio.com/v2/track"
419
+ example:
420
+ ```ruby
421
+ application_insights_endpoint => "https://dc.services.visualstudio.com/v2/track"
413
422
  ```
414
423
 
415
424
  ### notification_version
416
425
  Advanced, internal, should not be set, the only current valid value is 1
426
+ example:
427
+ ```ruby
428
+ notification_version => 1
429
+ ```
417
430
 
418
431
  ### tables
419
432
  Allow to support multiple tables, and to configure each table with its own parameters, using the global parameters as defaults.
420
433
  It is only required if the plugin need to support mutiple table.
421
434
  Tables is Hash, where the key is the table_id and the value is a has of specific properties, that their defualt value are the global properties.
422
- The specific properties are: intrumentation_key, table_columns, blob_max_delay, csv_default_value, serialized_event_field, blob_serialization, csv_separator
435
+ The specific properties are: instrumentation_key, table_columns, blob_max_delay, csv_default_value, serialized_event_field, blob_serialization, csv_separator
423
436
  template:
424
437
  ```ruby
425
438
  tables => { "table_id1" => { properties } "table_id2" => { properties } }
@@ -427,12 +440,12 @@ tables => { "table_id1" => { properties } "table_id2" => { properties } }
427
440
 
428
441
  Examples:
429
442
  ```ruby
430
- tables => { "6f29a89e-1385-4317-85af-3ac1cea48058" => { "intrumentation_key" => "76c3b8e9-dfc6-4afd-8d4c-3b02fdadb19f", "blob_max_delay" => 60 } }
443
+ tables => { "6f29a89e-1385-4317-85af-3ac1cea48058" => { "instrumentation_key" => "76c3b8e9-dfc6-4afd-8d4c-3b02fdadb19f", "blob_max_delay" => 60 } }
431
444
  ```
432
445
 
433
446
  ```ruby
434
- tables => { "6f29a89e-1385-4317-85af-3ac1cea48058" => { "intrumentation_key" => "76c3b8e9-dfc6-4afd-8d4c-3b02fdadb19f", "blob_max_delay" => 60 }
435
- "2e1b46aa-56d2-4e13-a742-d0db516d66fc" => { "intrumentation_key" => "76c3b8e9-dfc6-4afd-8d4c-3b02fdadb19f", "blob_max_delay" => 120 "ext" => "csv" "serialized_event_field" => "message" }
447
+ tables => { "6f29a89e-1385-4317-85af-3ac1cea48058" => { "instrumentation_key" => "76c3b8e9-dfc6-4afd-8d4c-3b02fdadb19f", "blob_max_delay" => 60 }
448
+ "2e1b46aa-56d2-4e13-a742-d0db516d66fc" => { "instrumentation_key" => "76c3b8e9-dfc6-4afd-8d4c-3b02fdadb19f", "blob_max_delay" => 120 "ext" => "csv" "serialized_event_field" => "message" }
436
449
  }
437
450
  ```
438
451
 
@@ -54,6 +54,12 @@ class LogStash::Outputs::Application_insights < LogStash::Outputs::Base
54
54
  require "logstash/outputs/application_insights/blob"
55
55
  autoload :Block, "logstash/outputs/application_insights/block"
56
56
  autoload :Storage_cleanup, "logstash/outputs/application_insights/storage_cleanup"
57
+ autoload :Shutdown_recovery, "logstash/outputs/application_insights/shutdown_recovery"
58
+ autoload :Storage_recovery, "logstash/outputs/application_insights/storage_recovery"
59
+ autoload :Notification_recovery, "logstash/outputs/application_insights/notification_recovery"
60
+ autoload :Validate_storage, "logstash/outputs/application_insights/validate_storage"
61
+ autoload :Validate_notification, "logstash/outputs/application_insights/validate_notification"
62
+
57
63
 
58
64
  autoload :Clients, "logstash/outputs/application_insights/clients"
59
65
  autoload :Client, "logstash/outputs/application_insights/client"
@@ -101,10 +107,10 @@ class LogStash::Outputs::Application_insights < LogStash::Outputs::Base
101
107
  # string may include only characters that are allowed in any valid url
102
108
  config :azure_storage_blob_prefix, :validate => :string
103
109
 
104
- # Default Application Insights Analytics intrumentation_key
110
+ # Default Application Insights Analytics instrumentation_key
105
111
  # will be used only in case it is not specified as a table_id property in tables
106
112
  # or as part of the event's fields or event's metadata fields
107
- config :intrumentation_key, :validate => :string
113
+ config :instrumentation_key, :validate => :string
108
114
 
109
115
  # Default Application Insights Analytics table_id
110
116
  # will be used only in case it is not specified as part o
@@ -121,8 +127,8 @@ class LogStash::Outputs::Application_insights < LogStash::Outputs::Base
121
127
  # A hash of table_ids, where each table_id points to a set of properties
122
128
  # the properties are a hash, where the keys are are the properties
123
129
  # current supported properties per table_id are:
124
- # intrumentation_key, ext, table_columns, csv_default_value, csv_separator, blob_max_delay, event_separator, serialized_event_field
125
- # intrumentation_key, Application Insights Analytics intrumentation_key, will be used in case not specified in any of the event's fields or events's metadata fileds
130
+ # instrumentation_key, ext, table_columns, csv_default_value, csv_separator, blob_max_delay, event_separator, serialized_event_field
131
+ # instrumentation_key, Application Insights Analytics instrumentation_key, will be used in case not specified in any of the event's fields or events's metadata fileds
126
132
  # serialized_event_field, specifies the field that may contain the full serialized event (either as json or csv),
127
133
  # when specified, the ext property should be set either to csv or to json (json is the default)
128
134
  # if event. does not conatin the field, value will be created based on the fileds in the evnt, according to table_columns if configured, or all fileds in event
@@ -150,7 +156,7 @@ class LogStash::Outputs::Application_insights < LogStash::Outputs::Base
150
156
  # can be specified only together with table_columns
151
157
  #
152
158
  # Example json table_id
153
- # tables => {"a679fbd2-702c-4c46-8548-80082c66ef28" => {"intrumentation_key" => "abee940b-e648-4242-b6b3-f2826667bf96", "blob_max_delay" => 60} }
159
+ # tables => {"a679fbd2-702c-4c46-8548-80082c66ef28" => {"instrumentation_key" => "abee940b-e648-4242-b6b3-f2826667bf96", "blob_max_delay" => 60} }
154
160
  # Example json table_id, input in serialized_event_field
155
161
  # {"ab6a3584-aef0-4a82-8725-2f2336e59f3e" => {"serialized_event_field" => "message". "ext" => "json"} }
156
162
  # Example csv table_id, input in serialized_event_field
@@ -271,25 +277,30 @@ class LogStash::Outputs::Application_insights < LogStash::Outputs::Base
271
277
  # When set to true, process will stop if an unknown IO error is found
272
278
  config :stop_on_unknown_io_errors, :validate => :boolean
273
279
 
274
- # Advanced, internal, should not be set, the default is Application Insights production endpoint
275
280
  # when set notification are sent to an alternative endpoint, used for internal testing
276
- config :notification_endpoint, :validate => :string
281
+ config :application_insights_endpoint, :validate => :string
282
+
283
+ # when set an alternative storage service will be used.
284
+ config :azure_storage_host_suffix, :validate => :string
277
285
 
278
286
  # Advanced, internal, should not be set, the only current valid value is 1
279
287
  config :notification_version, :validate => :number
280
288
 
281
289
  # When set to true, access to application insights will be validated at initialization
282
290
  # and if validation fail, logstash process will abort
283
- config :validate_endpoint, :validate => :boolean, :default => true
291
+ config :validate_notification, :validate => :boolean
284
292
 
285
293
  # When set to true, access to azure storage for each of the configured accounts will be validated at initialization
286
294
  # and if validation fail, logstash process will abort
287
- config :validate_storage, :validate => :boolean, :default => true
295
+ config :validate_storage, :validate => :boolean
288
296
 
289
297
  public
290
298
 
291
299
  def register
292
300
 
301
+ # logstash define: @original_params = original_params
302
+ # logstash define: @config = params
303
+
293
304
  # set configuration
294
305
  Config.validate_and_adjust_configuration( default_configuration )
295
306
  configuration = Config.current
@@ -305,9 +316,27 @@ class LogStash::Outputs::Application_insights < LogStash::Outputs::Base
305
316
  configuration[:telemetry_channel] = @telemetry.telemetry_channel
306
317
 
307
318
  Timer.config( configuration )
308
- Blob.config( configuration )
309
- Blob.validate_endpoint if @validate_endpoint
310
- Blob.validate_storage if @validate_storage
319
+
320
+ if @validate_notification
321
+ status = Validate_notification.new.validate
322
+ raise ConfigurationError, "Failed to access application insights at #{configuration[:application_insights_endpoint]}, due to error #{status[:error].inspect}" unless status[:success]
323
+ end
324
+
325
+ if @validate_storage
326
+ result = Validate_storage.new.validate
327
+ result.each do |storage_account_name, status|
328
+ raise ConfigurationError, "Failed access azure storage account #{storage_account_name}, due to error #{status[:error].inspect}" unless status[:success]
329
+ end
330
+ end
331
+
332
+ @notification_recovery = Notification_recovery.instance
333
+ @notification_recovery.start
334
+
335
+ @storage_recovery = Storage_recovery.instance
336
+ @storage_recovery.start
337
+
338
+ @shutdown_recovery = Shutdown_recovery.instance
339
+ @shutdown_recovery.start
311
340
 
312
341
  @shutdown = Shutdown.instance
313
342
  @channels = Channels.instance
@@ -320,7 +349,7 @@ class LogStash::Outputs::Application_insights < LogStash::Outputs::Base
320
349
  # @channels.receive( event, encoded_event )
321
350
  # end
322
351
 
323
- Telemetry.instance.track_event("register", {:properties => configuration})
352
+ @telemetry.track_event("register", {:properties => configuration})
324
353
 
325
354
 
326
355
  return "ok\n"
@@ -334,60 +363,16 @@ class LogStash::Outputs::Application_insights < LogStash::Outputs::Base
334
363
  end
335
364
 
336
365
  def close
337
- Telemetry.instance.track_event( "close" )
338
- Telemetry.instance.flush
366
+ @telemetry.track_event( "close" )
367
+ @telemetry.flush
368
+ @shutdown_recovery.close
369
+ @storage_recovery.close
370
+ @notification_recovery.close
339
371
  @shutdown.submit
340
372
  end
341
373
 
342
374
  private
343
375
 
344
376
  # -----------------------------------------------
345
-
346
-
347
- def list_blob_names
348
- blob_names = Set.new []
349
- loop do
350
- continuation_token = NIL
351
- entries = @azure_blob.list_blobs(@container, { :timeout => 10, :marker => continuation_token})
352
- @@logger.debug { 'blob entries: #{entries}' }
353
- entries.each do |entry|
354
- @@logger.debug { 'blob entry name: #{entry.name}' }
355
- blob_names << entry.name
356
- end
357
- continuation_token = entries.continuation_token
358
- break if continuation_token.empty?
359
- end
360
- return blob_names
361
- end # def list_blobs
362
-
363
-
364
- def list_container_names
365
- container_names = Set.new []
366
- loop do
367
- continuation_token = NIL
368
- containers = @azure_blob.list_containers()
369
- @@logger.debug { 'containers: #{containers}' }
370
- containers.each do |container|
371
- @@logger.debug { 'container entry name:' + container.name }
372
- container_names << container.name
373
- upload(container.name, "blob-append-" + container.name, "test - " + container.name)
374
- blobs = @azure_blob.list_blobs(container.name)
375
- blobs.each do |blob|
376
- @@logger.debug { 'blob name: ' + blob.name }
377
- end
378
- end
379
- continuation_token = containers.continuation_token
380
- break if continuation_token.empty?
381
- end
382
- return container_names
383
- end # def list_blobs
384
-
385
- def create_container (container_name)
386
- begin
387
- @azure_blob.create_container(container_name)
388
- rescue
389
- @@logger.debug { $! }
390
- end
391
- end
392
377
  end
393
378
 
@@ -21,7 +21,7 @@
21
21
  class LogStash::Outputs::Application_insights
22
22
  class Blob
23
23
 
24
- attr_reader :intrumentation_key
24
+ attr_reader :instrumentation_key
25
25
  attr_reader :table_id
26
26
  attr_reader :storage_account_name
27
27
  attr_reader :container_name
@@ -36,37 +36,7 @@ class LogStash::Outputs::Application_insights
36
36
 
37
37
  public
38
38
 
39
- def self.config ( configuration )
40
- @@configuration = configuration
41
-
42
- @@logger = configuration[:logger]
43
- @@io_retry_delay = configuration[:io_retry_delay]
44
- @@io_max_retries = configuration[:io_max_retries]
45
- @@blob_max_bytesize = configuration[:blob_max_bytesize]
46
- @@blob_max_events = configuration[:blob_max_events]
47
- @@state_table_name = "#{configuration[:azure_storage_table_prefix]}#{STATE_TABLE_NAME}"
48
- @@save_notified_blobs_records = configuration[:save_notified_blobs_records]
49
-
50
- @@closing = false
51
-
52
- # queues, per storage_account_name, for failed blob commit, will continue to try resending
53
- @@failed_on_commit_retry_Qs = {}
54
- launch_storage_recovery_threads( @@failed_on_commit_retry_Qs, :commit, :io_failure )
55
- launch_storage_recovery_table_threads( :uploading )
56
-
57
- # queues, per storage_account_name, for failed notify, will continue to try resending
58
- @@failed_on_notify_retry_Qs = {}
59
- launch_storage_recovery_threads( @@failed_on_notify_retry_Qs, :notify, :notify_failed_blob_not_accessible )
60
- launch_storage_recovery_table_threads( :committed )
61
-
62
- # for failed to notify due to endpoint, will continue to try resending
63
- launch_endpoint_recovery_thread
64
-
65
- # queues, per storage_account_name, for failed to log to table, will continue to try resending
66
- @@failed_on_log_to_table_retry_Qs = {}
67
- launch_storage_recovery_threads( @@failed_on_log_to_table_retry_Qs, :log_to_table_update, :io_failure )
68
-
69
- end
39
+ @@closing = false
70
40
 
71
41
  def self.close
72
42
  @@closing = true
@@ -76,160 +46,22 @@ class LogStash::Outputs::Application_insights
76
46
  @@closing
77
47
  end
78
48
 
79
- def self.launch_endpoint_recovery_thread
80
- @@failed_on_notification_endpoint_retry_Q = Queue.new
81
- storage_recovery_thread( nil, @@failed_on_notification_endpoint_retry_Q, :notify, :io_failure )
82
- end
83
-
84
- def self.launch_storage_recovery_threads ( queues, method, failure_reason )
85
- @@configuration[:storage_account_name_key].each do |storage_account_name, storage_account_keys|
86
- queues[storage_account_name] = Queue.new
87
- # a threads, per storage account name
88
- storage_recovery_thread( storage_account_name, queues[storage_account_name], method, failure_reason )
89
- end
90
- end
91
-
92
- def self.launch_storage_recovery_table_threads ( state )
93
- @@configuration[:storage_account_name_key].each do |storage_account_name, storage_account_keys|
94
- recovery_table_thread( storage_account_name, state)
95
- end
96
- end
97
-
98
- #return thread
99
- def self.recovery_table_thread( storage_account_name, state )
100
- Thread.new( storage_account_name, state ) do |storage_account_name, state|
101
-
102
- blob = Blob.new
103
-
104
- committed_tuples = [ ]
105
- uncommitted_tuples = [ ]
106
- upload_empty_tuples = [ ]
107
- token = nil
108
- finished = false
109
- filter = "#{:PartitionKey} eq '#{@@configuration[:azure_storage_blob_prefix]}-#{state}'"
110
-
111
- # should exit thread after fetching data from table, and submit recovery, the loop is only for case of failure
112
- until finished || stopped? do
113
- entities = blob.log_to_table_query( storage_account_name, filter, token )
114
- if entities
115
- token = entities.continuation_token
116
-
117
- if :committed == state
118
- entities.each do |entity|
119
- State.instance.inc_pending_notifications
120
- tuple = blob.table_entity_to_tuple( entity.properties )
121
- @@failed_on_notification_endpoint_retry_Q << tuple
122
- end
123
-
124
- elsif :uploading == state
125
- # first tuples are collected, before send to queues, to make sure blob states don't change in between
126
- entities.each do |entity|
127
- typed_tuple = nil
128
- until typed_tuple || stopped?
129
- typed_tuple = blob.update_commited_or_uncommited_list( entity.properties )
130
- Stud.stoppable_sleep(60, 1) { stopped? } unless typed_tuple
131
- end
132
- next if stopped?
133
-
134
- if typed_tuple[:committed]
135
- committed_tuples << typed_tuple[:committed]
136
- elsif typed_tuple[:uncommitted]
137
- uncommitted_tuples << typed_tuple[:uncommitted]
138
- else
139
- upload_empty_tuples << typed_tuple[:upload_empty]
140
- end
141
- end
142
- end
143
-
144
- next if token
145
- committed_tuples.each do |tuple|
146
- State.instance.inc_pending_commits
147
- @@failed_on_log_to_table_retry_Qs[storage_account_name] << tuple
148
- end
149
- uncommitted_tuples.each do |tuple|
150
- State.instance.inc_pending_commits
151
- @@failed_on_commit_retry_Qs[storage_account_name] << tuple
152
- end
153
- upload_empty_tuples.each do |tuple|
154
- @@failed_on_log_to_table_retry_Qs[storage_account_name] << tuple
155
- end
156
- finished = true
157
- else
158
- Stud.stoppable_sleep(60, 1) { stopped? }
159
- end
160
- end
161
- @@logger.info { "exit table recovery thread, storage: #{storage_account_name}, state: #{state}, entities: #{entities ? entities.length : nil}" }
162
- end
163
- end
164
-
165
- def self.state_on? ( storage_account_name, blob, failure_reason )
166
- if blob
167
- if :io_failure == failure_reason
168
- @@endpoint_state_on ||= blob.test_notification_endpoint( @@configuration[:storage_account_name_key][0][0] )
169
- else
170
- Clients.instance.storage_account_state_on?( storage_account_name )
171
- end
172
- elsif storage_account_name
173
- Clients.instance.storage_account_state_on?( storage_account_name )
174
- else
175
- Clients.instance.storage_account_state_on?
176
- end
177
- end
178
-
179
- def self.storage_recovery_thread( storage_account_name, queue, method, failure_reason )
180
- # a threads, per storage account name, that retries failed blob commits / notification / table updates
181
- Thread.new( storage_account_name, queue, method, failure_reason ) do |storage_account_name, queue, method, failure_reason|
182
- blob = Blob.new if :notify == method
183
- semaphore = Mutex.new
184
- action = {:method => method, :semaphore => semaphore, :counter => 0 }
185
- loop do
186
- tuple ||= queue.pop
187
- until state_on?( storage_account_name, blob, failure_reason ) do sleep( 1 ) end
188
-
189
- not_busy = nil
190
- semaphore.synchronize {
191
- not_busy = action[:counter] += 1 if 10 > action[:counter]
192
- }
193
- if not_busy
194
- Thread.new( action, tuple ) do |action, tuple|
195
- Blob.new.send( action[:method], tuple )
196
- action[:semaphore].synchronize {
197
- action[:counter] -= 1
198
- }
199
- end
200
- tuple = nil # release for GC
201
- else
202
- Stud.stoppable_sleep(60, 1) { 10 > action[:counter] }
203
- next
204
- end
205
- end
206
- end
207
- end
208
-
209
- def self.validate_endpoint
210
- io = Blob.new
211
- raise ConfigurationError, "Failed to access application insights #{@@configuration[:notification_endpoint]}, due to error #{io.last_io_exception.inspect}" unless io.test_notification_endpoint( @@configuration[:storage_account_name_key][0][0] )
212
- end
213
-
214
- def self.validate_storage
215
- io = Blob.new
216
- @@configuration[:storage_account_name_key].each do |storage_account_name, storage_account_keys|
217
- raise ConfigurationError, "Failed access azure storage account #{storage_account_name}, due to error #{io.last_io_exception.inspect}" unless io.test_storage( storage_account_name )
218
- end
219
- end
220
-
221
-
222
49
  def initialize ( channel = nil, id = nil , no_queue = false )
50
+ @configuration = Config.current
51
+ @logger = @configuration[:logger]
52
+ @storage_recovery = Storage_recovery.instance
53
+ @notification_recovery = Notification_recovery.instance
54
+ @max_tries = @configuration[:io_max_retries] + 1
55
+
223
56
  @uploaded_block_ids = [ ]
224
57
  @uploaded_block_numbers = [ ]
225
58
  @uploaded_bytesize = 0
226
59
  @uploaded_events_count = 0
227
- @max_tries = @@io_max_retries + 1
228
60
  @sub_state = :none
229
61
 
230
62
  if channel
231
63
  @id = id
232
- @intrumentation_key = channel.intrumentation_key
64
+ @instrumentation_key = channel.instrumentation_key
233
65
  @table_id = channel.table_id
234
66
  @blob_max_delay = channel.blob_max_delay
235
67
 
@@ -273,14 +105,13 @@ class LogStash::Outputs::Application_insights
273
105
 
274
106
  unless to_commit
275
107
  @timer.set( block_to_upload.oldest_event_time + @blob_max_delay, nil ) {|object| @io_queue << :wakeup if 0 == @io_queue.length } if blob_empty?
276
- to_commit = :commit if blob_full?
277
- upload( block_to_upload, to_commit)
108
+ upload( block_to_upload )
278
109
  block_to_upload = nil # release reference to resource for GC
279
- else
280
- commit unless @uploaded_block_ids.empty?
110
+ to_commit = :commit if blob_full?
281
111
  end
282
112
 
283
113
  if to_commit
114
+ commit unless @uploaded_block_ids.empty?
284
115
  to_commit = nil
285
116
  @uploaded_block_ids = [ ]
286
117
  @timer.cancel
@@ -302,9 +133,9 @@ class LogStash::Outputs::Application_insights
302
133
 
303
134
  def blob_full? ( next_block = nil )
304
135
  if next_block
305
- BLOB_MAX_BLOCKS < @uploaded_block_ids.length + 1 || @@blob_max_events < @uploaded_events_count + next_block.events_count || @@blob_max_bytesize < @uploaded_bytesize + next_block.bytesize
136
+ BLOB_MAX_BLOCKS < @uploaded_block_ids.length + 1 || @configuration[:blob_max_events] < @uploaded_events_count + next_block.events_count || @configuration[:blob_max_bytesize] < @uploaded_bytesize + next_block.bytesize
306
137
  else
307
- BLOB_MAX_BLOCKS <= @uploaded_block_ids.length || @@blob_max_events <= @uploaded_events_count || @@blob_max_bytesize <= @uploaded_bytesize
138
+ BLOB_MAX_BLOCKS <= @uploaded_block_ids.length || @configuration[:blob_max_events] <= @uploaded_events_count || @configuration[:blob_max_bytesize] <= @uploaded_bytesize
308
139
  end
309
140
  end
310
141
 
@@ -334,7 +165,7 @@ class LogStash::Outputs::Application_insights
334
165
  end
335
166
 
336
167
  def table_entity_to_tuple( options = {} )
337
- [ options[:start_time.to_s] || Time.now.utc, options[:action.to_s], options[:intrumentation_key.to_s], options[:table_id.to_s],
168
+ [ options[:start_time.to_s] || Time.now.utc, options[:action.to_s], options[:instrumentation_key.to_s], options[:table_id.to_s],
338
169
  options[:storage_account_name.to_s], options[:container_name.to_s], options[:blob_name.to_s],
339
170
  eval( options[:uploaded_block_ids.to_s] ), eval( options[:uploaded_block_numbers.to_s] ),
340
171
  options[:uploaded_events_count.to_s] || 0, options[:uploaded_bytesize.to_s] || 0, options[:oldest_event_time.to_s] || Time.now.utc,
@@ -344,7 +175,7 @@ class LogStash::Outputs::Application_insights
344
175
  end
345
176
 
346
177
  def state_to_tuple
347
- [ @start_time || Time.now.utc, @action, @intrumentation_key, @table_id,
178
+ [ @start_time || Time.now.utc, @action, @instrumentation_key, @table_id,
348
179
  @storage_account_name, @container_name, @blob_name,
349
180
  @uploaded_block_ids, @uploaded_block_numbers,
350
181
  @uploaded_events_count, @uploaded_bytesize, @oldest_event_time,
@@ -354,7 +185,7 @@ class LogStash::Outputs::Application_insights
354
185
  end
355
186
 
356
187
  def tuple_to_state ( tuple )
357
- ( @start_time, @action, @intrumentation_key, @table_id,
188
+ ( @start_time, @action, @instrumentation_key, @table_id,
358
189
  @storage_account_name, @container_name, @blob_name,
359
190
  @uploaded_block_ids, @uploaded_block_numbers,
360
191
  @uploaded_events_count, @uploaded_bytesize, @oldest_event_time,
@@ -363,7 +194,7 @@ class LogStash::Outputs::Application_insights
363
194
  end
364
195
 
365
196
  def state_to_table_entity
366
- { :start_time => @start_time, :intrumentation_key => @intrumentation_key, :table_id => @table_id,
197
+ { :start_time => @start_time, :instrumentation_key => @instrumentation_key, :table_id => @table_id,
367
198
  :storage_account_name => @storage_account_name, :container_name => @container_name, :blob_name => @blob_name,
368
199
  :uploaded_block_ids => @uploaded_block_ids.to_s, :uploaded_block_numbers => @uploaded_block_numbers.to_s,
369
200
  :uploaded_events_count => @uploaded_events_count, :uploaded_bytesize => @uploaded_bytesize, :oldest_event_time => @oldest_event_time,
@@ -372,42 +203,32 @@ class LogStash::Outputs::Application_insights
372
203
  end
373
204
 
374
205
 
375
- def test_storage_recover
376
- proc do |reason, e| @recovery = :ok if :container_exist == reason || :create_container == reason end
377
- end
378
-
379
-
380
206
  def test_storage ( storage_account_name )
381
207
  @storage_account_name = storage_account_name
382
208
  @action = :test_storage
383
209
  @max_tries = 1
384
210
  @force_client = true # to enable get a client even if all storage_accounts marked dead
385
- @recoverable = [ :invalid_storage_key ]
386
- storage_io_block( test_storage_recover ) {
211
+ @recoverable = [ :invalid_storage_key, :container_exist, :create_container ]
212
+ storage_io_block {
387
213
  if @recovery.nil? || :invalid_storage_key == @recovery
388
- container_name = "logstash-test-container"
389
214
  @info = "#{@action} #{@storage_account_name}"
390
- @client.blobClient.create_container( container_name ) unless @@configuration[:disable_blob_upload]
215
+ @client.blobClient.create_container( @configuration[:test_storage_container] ) unless @configuration[:disable_blob_upload]
391
216
  end
392
217
  }
393
218
  end
394
219
 
395
- def test_notification_endpoint_recover
396
- proc do |reason, e| @recovery = :ok if :invalid_intrumentation_key == reason || :invalid_table_id == reason end
397
- end
398
-
399
- def test_notification_endpoint( storage_account_name )
220
+ def test_notification( storage_account_name )
400
221
  @storage_account_name = storage_account_name
401
- @action = :test_notification_endpoint
222
+ @action = :test_notification
402
223
  @max_tries = 1
403
224
  @force_client = true # to enable get a client even if all storage_accounts marked dead
404
- @recoverable = [ ]
405
- success = storage_io_block( test_notification_endpoint_recover ) {
225
+ @recoverable = [ :invalid_instrumentation_key, :invalid_table_id ]
226
+ success = storage_io_block {
406
227
  if @recovery.nil?
407
228
  @container_name = "logstash-test-container"
408
229
  @blob_name = "logstash-test-blob"
409
230
  @table_id = GUID_NULL
410
- @intrumentation_key = GUID_NULL
231
+ @instrumentation_key = GUID_NULL
411
232
  @info = "#{@action}"
412
233
  set_blob_sas_url
413
234
  payload = create_payload
@@ -419,24 +240,21 @@ class LogStash::Outputs::Application_insights
419
240
  end
420
241
 
421
242
 
422
- def notify_recover
423
- proc do |reason, e|
424
- if :notify_failed_blob_not_accessible == reason
425
- @sub_state = reason
426
- @@failed_on_notify_retry_Qs[@storage_account_name] << state_to_tuple
427
- elsif :invalid_intrumentation_key == reason || :invalid_table_id == reason
428
- @sub_state = reason
429
- Channels.instance.channel( @intrumentation_key, @table_id ).failed_on_notify_retry_Q << state_to_tuple
243
+ def notify_retry_later
244
+ if :notify_failed_blob_not_accessible == @recovery
245
+ @sub_state = @recovery
246
+ @storage_recovery.recover_later( state_to_tuple, :notify, @storage_account_name )
247
+ elsif :invalid_instrumentation_key == @recovery || :invalid_table_id == @recovery
248
+ @sub_state = @recovery
249
+ Channels.instance.channel( @instrumentation_key, @table_id ).failed_on_notify_retry_Q << state_to_tuple
430
250
 
251
+ else
252
+ if :notify_failed_blob_not_accessible == @sub_state
253
+ @storage_recovery.recover_later( state_to_tuple, :notify, @storage_account_name )
254
+ elsif :invalid_instrumentation_key == @sub_state || :invalid_table_id == @sub_state
255
+ Channels.instance.channel( @instrumentation_key, @table_id ).failed_on_notify_retry_Q << state_to_tuple
431
256
  else
432
- @@endpoint_state_on = false
433
- if :notify_failed_blob_not_accessible == @sub_state
434
- @@failed_on_notify_retry_Qs[@storage_account_name] << state_to_tuple
435
- elsif :invalid_intrumentation_key == @sub_state || :invalid_table_id == @sub_state
436
- Channels.instance.channel( @intrumentation_key, @table_id ).failed_on_notify_retry_Q << state_to_tuple
437
- else
438
- @@failed_on_notification_endpoint_retry_Q << state_to_tuple
439
- end
257
+ @notification_recovery.recover_later( state_to_tuple )
440
258
  end
441
259
  end
442
260
  end
@@ -446,32 +264,36 @@ class LogStash::Outputs::Application_insights
446
264
  @action = :notify
447
265
  @force_client = true # to enable get a client even if all storage_accounts marked dead
448
266
  @recoverable = [ :notify_failed_blob_not_accessible, :io_failure, :service_unavailable ]
449
- success = storage_io_block( notify_recover ) {
267
+ success = storage_io_block {
450
268
  set_blob_sas_url
451
269
  payload = create_payload
452
- @@logger.debug { "notification payload: #{payload}" }
270
+ @logger.debug { "notification payload: #{payload}" }
453
271
  @info = "#{@action.to_s} #{@storage_account_name}/#{@container_name}/#{@blob_name}, events: #{@uploaded_events_count}, size: #{@uploaded_bytesize}, blocks: #{@uploaded_block_numbers}, delay: #{Time.now.utc - @oldest_event_time}, blob_sas_url: #{@blob_sas_url}"
454
272
 
455
273
  # assume that exceptions can be raised due to this method:
456
- post_notification( @client.notifyClient, payload ) unless @@configuration[:disable_notification]
274
+ post_notification( @client.notifyClient, payload ) unless @configuration[:disable_notification]
457
275
  @log_state = :notified
458
276
  }
459
- log_to_table_update if success
277
+ if success
278
+ state_table_update
279
+ else
280
+ notify_retry_later
281
+ end
460
282
  end
461
283
 
462
284
  CREATE_EXIST_ERRORS = { :container => [ :create_container, :container_exist ], :table => [ :create_table, :table_exist ] }
463
285
  def create_exist_recovery( type, name = nil )
464
286
  prev_info = @info
465
287
  if CREATE_EXIST_ERRORS[type][0] == @recovery
466
- name ||= ( :table == type ? @@state_table_name : @container_name )
288
+ name ||= ( :table == type ? @configuration[:state_table_name] : @container_name )
467
289
  @info = "create #{type} #{@storage_account_name}/#{name}"
468
290
 
469
291
  # assume that exceptions can be raised due to this method:
470
292
  yield name
471
- @@logger.info { "Successed to #{@info}" }
293
+ @logger.info { "Successed to #{@info}" }
472
294
  @info = prev_info
473
295
  elsif CREATE_EXIST_ERRORS[type][1] == @recovery
474
- @@logger.info { "Successed (already exist) to #{@info}" }
296
+ @logger.info { "Successed (already exist) to #{@info}" }
475
297
  @info = prev_info
476
298
  end
477
299
  end
@@ -485,39 +307,38 @@ class LogStash::Outputs::Application_insights
485
307
  end
486
308
 
487
309
  # return true on success
488
- def log_to_table_insert
489
- @action = :log_to_table_insert
310
+ def state_table_insert
311
+ @action = :state_table_insert
490
312
  @recoverable = [ :invalid_storage_key, :io_failure, :service_unavailable, :table_exist, :create_table, :table_busy, :entity_exist ]
491
313
  @info = "#{@action} #{@log_state} #{@storage_account_name}/#{@container_name}/#{@blob_name}"
492
- success = storage_io_block( :uploading == @log_state ? proc do |reason, e| end : log_to_table_update_recover ) {
314
+ success = storage_io_block {
493
315
  create_table_exist_recovery
494
316
  if :entity_exist == @recovery
495
317
  raise NotRecoverableError if :uploading == @log_state
496
318
  else
497
319
  entity_values = state_to_table_entity
498
- entity_values[:PartitionKey] = "#{@@configuration[:azure_storage_blob_prefix]}-#{@log_state}"
499
- entity_values[:RowKey] = @blob_name
500
- @client.tableClient.insert_entity( @@state_table_name, entity_values )
320
+ entity_values[:PartitionKey] = "#{@configuration[:partition_key_prefix]}-#{@log_state}"
321
+ entity_values[:RowKey] = @blob_name.gsub("/","_")
322
+ @client.tableClient.insert_entity( @configuration[:state_table_name], entity_values )
501
323
  end
502
324
  }
325
+ @storage_recovery.recover_later( state_to_tuple, :state_table_update, @storage_account_name ) unless success || :uploading == @log_state
326
+ success
503
327
  end
504
328
 
505
- def log_to_table_update_recover
506
- proc do |reason, e| @@failed_on_log_to_table_retry_Qs[@storage_account_name] << state_to_tuple end
507
- end
508
-
509
- def log_to_table_update ( tuple = nil )
329
+ def state_table_update ( tuple = nil )
510
330
  tuple_to_state( tuple ) if tuple
511
331
  if :uploading == @log_state
512
- log_to_table_delete
332
+ state_table_delete
513
333
  elsif :committed == @log_state
514
- if log_to_table_insert && log_to_table_delete( nil, :uploading )
334
+ if state_table_insert && state_table_delete( nil, :uploading )
515
335
  State.instance.dec_pending_commits
516
336
  State.instance.inc_pending_notifications
517
- @@failed_on_notification_endpoint_retry_Q << state_to_tuple
337
+ # this is not a recovery, it is actually enqueue to notify
338
+ @notification_recovery.enqueue( state_to_tuple )
518
339
  end
519
340
  elsif :notified == @log_state
520
- if (!@@save_notified_blobs_records || log_to_table_insert) && log_to_table_delete( nil, :committed )
341
+ if (!@configuration[:save_notified_blobs_records] || state_table_insert) && state_table_delete( nil, :committed )
521
342
  State.instance.dec_pending_notifications
522
343
  end
523
344
  end
@@ -525,81 +346,81 @@ class LogStash::Outputs::Application_insights
525
346
 
526
347
 
527
348
  # retturn tru on success
528
- def log_to_table_delete ( tuple = nil, state = nil )
349
+ def state_table_delete ( tuple = nil, state = nil )
529
350
  tuple_to_state( tuple ) if tuple
530
351
  state ||= @log_state
531
- @action = :log_to_table_delete
352
+ @action = :state_table_delete
532
353
  @recoverable = [ :invalid_storage_key, :io_failure, :service_unavailable, :table_exist, :create_table, :table_busy, :create_resource ]
533
354
  @info = "#{@action} #{state} #{@storage_account_name}/#{@container_name}/#{@blob_name}"
534
355
 
535
- success = storage_io_block( log_to_table_update_recover ) {
356
+ success = storage_io_block {
536
357
  create_table_exist_recovery
537
358
  if :create_resource == @recovery
538
- @@logger.info { "Note: delete entity failed, already deleted, #{@info}, state: #{state}, log_state: #{@log_state}" }
359
+ @logger.info { "Note: delete entity failed, already deleted, #{@info}, state: #{state}, log_state: #{@log_state}" }
539
360
  else
540
- @client.tableClient.delete_entity( @@state_table_name, "#{@@configuration[:azure_storage_blob_prefix]}-#{state}", @blob_name )
361
+ @client.tableClient.delete_entity( @configuration[:state_table_name], "#{@configuration[:partition_key_prefix]}-#{state}", @blob_name.gsub( "/", "_" ) )
541
362
  end
542
363
  }
364
+ @storage_recovery.recover_later( state_to_tuple, :state_table_update, @storage_account_name ) unless success
365
+ success
543
366
  end
544
367
 
545
368
  # return entities
546
- def log_to_table_query ( storage_account_name, filter , token )
369
+ def state_table_query ( storage_account_name, filter , token )
547
370
  @storage_account_name = storage_account_name
548
371
 
549
- @action = :log_to_table_query
372
+ @action = :state_table_query
550
373
  @recoverable = [ :invalid_storage_key, :io_failure, :service_unavailable, :table_exist, :create_table, :table_busy ]
551
- @info = "#{@action} #{@storage_account_name}/#{@@state_table_name}"
374
+ @info = "#{@action} #{@storage_account_name}/#{@configuration[:state_table_name]}"
552
375
 
553
376
  entities = nil
554
- success = storage_io_block( proc do |reason, e| end ) {
377
+ success = storage_io_block {
555
378
  create_table_exist_recovery
556
379
  options = { :filter => filter }
557
380
  options[:continuation_token] = token if token
558
- entities = @client.tableClient.query_entities( @@state_table_name, options )
381
+ entities = @client.tableClient.query_entities( @configuration[:state_table_name], options )
559
382
  }
560
383
  entities
561
384
  end
562
385
 
563
- def commit_recover
564
- proc do |reason, e| @@failed_on_commit_retry_Qs[@storage_account_name] << state_to_tuple end
565
- end
566
-
567
386
  def commit ( tuple = nil )
568
387
  tuple_to_state( tuple ) if tuple
569
388
 
570
389
  unless @uploaded_block_ids.empty?
571
390
  @action = :commit
572
391
  @recoverable = [ :invalid_storage_key, :io_failure, :service_unavailable ]
573
- success = storage_io_block( commit_recover ) {
392
+ success = storage_io_block {
574
393
  @info = "#{@action.to_s} #{@storage_account_name}/#{@container_name}/#{@blob_name}, events: #{@uploaded_events_count}, size: #{@uploaded_bytesize}, blocks: #{@uploaded_block_numbers}, delay: #{Time.now.utc - @oldest_event_time}"
575
394
  # assume that exceptions can be raised due to this method:
576
- @client.blobClient.commit_blob_blocks( @container_name, @blob_name, @uploaded_block_ids ) unless @@configuration[:disable_blob_upload]
395
+ @client.blobClient.commit_blob_blocks( @container_name, @blob_name, @uploaded_block_ids ) unless @configuration[:disable_blob_upload]
577
396
  @log_state = :committed
578
397
  }
579
- # next stage
580
- log_to_table_update if success
398
+ if success
399
+ # next stage
400
+ state_table_update
401
+ else
402
+ @storage_recovery.recover_later( state_to_tuple, :commit, @storage_account_name )
403
+ end
581
404
  end
582
405
  end
583
406
 
584
407
 
585
- def upload_recover
586
- proc do |reason, e|
587
- unless @uploaded_block_ids.empty?
588
- info1 = "#{:commit} #{@storage_account_name}/#{@container_name}/#{@blob_name}, events: #{@uploaded_events_count}, size: #{@uploaded_bytesize}, blocks: #{@uploaded_block_numbers}, delay: #{Time.now.utc - @oldest_event_time}"
589
- @@logger.error { "Pospone to #{info1} (; retry later, error: #{e.inspect}" }
590
- @@failed_on_commit_retry_Qs[@storage_account_name] << state_to_tuple
591
- @uploaded_block_ids = [ ]
592
- end
593
- unless :io_all_dead == reason
594
- @recovery = :invalid_storage_account
595
- else
596
- Channels.instance.channel( @intrumentation_key, @table_id ).failed_on_upload_retry_Q << @block_to_upload
597
- @block_to_upload = nil
598
- end
408
+ def upload_retry_later
409
+ unless @uploaded_block_ids.empty?
410
+ info1 = "#{:commit} #{@storage_account_name}/#{@container_name}/#{@blob_name}, events: #{@uploaded_events_count}, size: #{@uploaded_bytesize}, blocks: #{@uploaded_block_numbers}, delay: #{Time.now.utc - @oldest_event_time}"
411
+ @logger.error { "Pospone to #{info1} (; retry later, error: #{@last_io_exception.inspect}" }
412
+ @storage_recovery.recover_later( state_to_tuple, :commit, @storage_account_name )
413
+ @uploaded_block_ids = [ ]
414
+ end
415
+ unless :io_all_dead == @recovery
416
+ raise UploadRetryError
417
+ else
418
+ Channels.instance.channel( @instrumentation_key, @table_id ).failed_on_upload_retry_Q << @block_to_upload
419
+ @block_to_upload = nil
599
420
  end
600
421
  end
601
422
 
602
- def upload ( block, to_commit = nil )
423
+ def upload ( block )
603
424
  @storage_account_name = nil if @uploaded_block_ids.empty?
604
425
  @block_to_upload = block
605
426
  block = nil # remove reference for GC
@@ -615,7 +436,7 @@ class LogStash::Outputs::Application_insights
615
436
  # remove record of previous upload that failed
616
437
  if @storage_account_name
617
438
  exclude_storage_account_names << @storage_account_name
618
- @@failed_on_log_to_table_retry_Qs[@storage_account_name] << state_to_tuple
439
+ @storage_recovery.recover_later( state_to_tuple, :state_table_update, @storage_account_name )
619
440
  end
620
441
  set_conatainer_and_blob_names
621
442
  @storage_account_name = Clients.instance.get_random_active_storage( exclude_storage_account_names )
@@ -623,7 +444,7 @@ class LogStash::Outputs::Application_insights
623
444
  upload_recover.call( :io_all_dead, nil )
624
445
  return false
625
446
  end
626
- raise UploadRetryError unless log_to_table_insert
447
+ raise UploadRetryError unless state_table_insert
627
448
  end
628
449
 
629
450
  @action = :upload
@@ -631,12 +452,12 @@ class LogStash::Outputs::Application_insights
631
452
  @info = "#{@action} #{@storage_account_name}/#{@container_name}/#{@blob_name}, #{@block_info}, commitId: [\"#{100001 + @uploaded_block_ids.length}\"]"
632
453
  @recoverable = [ :invalid_storage_key, :invalid_storage_account, :io_failure, :service_unavailable, :container_exist, :create_container ]
633
454
 
634
- success = storage_io_block( upload_recover ) {
455
+ success = storage_io_block {
635
456
  create_container_exist_recovery
636
457
  block_id = "#{100001 + @uploaded_block_ids.length}"
637
458
 
638
459
  # assume that exceptions can be raised due to this method:
639
- @client.blobClient.put_blob_block( @container_name, @blob_name, block_id, @block_to_upload.bytes ) unless @@configuration[:disable_blob_upload]
460
+ @client.blobClient.put_blob_block( @container_name, @blob_name, block_id, @block_to_upload.bytes ) unless @configuration[:disable_blob_upload]
640
461
 
641
462
  # upload success
642
463
  first_block_in_blob = @uploaded_block_ids.empty?
@@ -655,8 +476,7 @@ class LogStash::Outputs::Application_insights
655
476
  Telemetry.instance.track_event("uploading", {:properties => state_to_table_entity})
656
477
  }
657
478
 
658
- raise UploadRetryError if :invalid_storage_account == @recovery
659
- commit if success && to_commit
479
+ upload_retry_later unless success
660
480
  rescue UploadRetryError
661
481
  @recovery = nil
662
482
  retry
@@ -670,7 +490,7 @@ class LogStash::Outputs::Application_insights
670
490
  @action = :list_blob_blocks
671
491
  @recoverable = [ :invalid_storage_key, :io_failure, :service_unavailable, :container_exist, :create_container, :create_blob ]
672
492
  list_blob_blocks = nil
673
- success = storage_io_block( proc do |reason, e| end ) {
493
+ success = storage_io_block {
674
494
  @info = "#{@action} #{@storage_account_name}/#{@container_name}/#{@blob_name}"
675
495
 
676
496
  create_container_exist_recovery
@@ -705,15 +525,15 @@ class LogStash::Outputs::Application_insights
705
525
  private
706
526
 
707
527
 
708
- def storage_io_block( recover_later_proc, valid_recovery = nil )
528
+ def storage_io_block
709
529
  @recovery = nil
710
530
  @try_count = 1
711
531
 
712
532
  begin
713
533
  @client ||= Client.new( @storage_account_name, @force_client )
714
534
  yield
715
- disabled = :notify == @action ? @@configuration[:disable_notification] : @@configuration[:disable_blob_upload]
716
- @@logger.info { "Successed to #{disabled ? 'DISABLED ' : ''}#{@info}" }
535
+ disabled = :notify == @action ? @configuration[:disable_notification] : @configuration[:disable_blob_upload]
536
+ @logger.info { "Successed to #{disabled ? 'DISABLED ' : ''}#{@info}" }
717
537
  true
718
538
 
719
539
  rescue TypeError
@@ -721,8 +541,13 @@ class LogStash::Outputs::Application_insights
721
541
 
722
542
  rescue StandardError => e
723
543
  @last_io_exception = e
724
- @recovery = nil
725
- retry if recover_retry?( e, recover_later_proc )
544
+ @recovery, reason = recover_retry?( e )
545
+ retry if @recovery || reason.nil?
546
+
547
+ puts " +++ recovery: #{@recovery}, reason: #{reason}"
548
+
549
+ @recovery = reason
550
+ @logger.error { "Failed to #{@info} ; retry later, error: #{e.inspect}" }
726
551
  false
727
552
 
728
553
  ensure
@@ -730,157 +555,173 @@ class LogStash::Outputs::Application_insights
730
555
  end
731
556
  end
732
557
 
733
-
734
- def recover_retry? ( e, recover_later_proc )
735
- # http error, probably server error
558
+ def error_to_sym ( e )
736
559
  if e.is_a?( Azure::Core::Http::HTTPError )
560
+ if 404 == e.status_code
561
+ if "ContainerNotFound" == e.type
562
+ :create_container
737
563
 
738
- if 404 == e.status_code && "ContainerNotFound" == e.type
739
- @recovery = :create_container
740
-
741
- elsif 404 == e.status_code && "TableNotFound" == e.type
742
- @recovery = :create_table
564
+ elsif "TableNotFound" == e.type
565
+ :create_table
743
566
 
744
- elsif 404 == e.status_code && "BlobNotFound" == e.type
745
- @recovery = :create_blob
567
+ elsif "BlobNotFound" == e.type
568
+ :create_blob
746
569
 
747
- elsif 404 == e.status_code && "ResourceNotFound" == e.type
748
- @recovery = :create_resource
570
+ elsif "ResourceNotFound" == e.type
571
+ :create_resource
749
572
 
750
- elsif 409 == e.status_code && "ContainerAlreadyExists" == e.type
751
- @recovery = :container_exist
573
+ else
574
+ :create_resource
575
+ end
752
576
 
753
- elsif 409 == e.status_code && "BlobAlreadyExists" == e.type
754
- @recovery = :blob_exist
577
+ elsif 409 == e.status_code
578
+ if "ContainerAlreadyExists" == e.type
579
+ :container_exist
755
580
 
756
- elsif 409 == e.status_code && "TableAlreadyExists" == e.type
757
- @recovery = :table_exist
581
+ elsif "BlobAlreadyExists" == e.type
582
+ :blob_exist
758
583
 
759
- elsif 409 == e.status_code && "TableBeingDeleted" == e.type
760
- @recovery = :table_busy
584
+ elsif "TableAlreadyExists" == e.type
585
+ :table_exist
761
586
 
762
- elsif 409 == e.status_code && "EntityAlreadyExists" == e.type
763
- @recovery = :entity_exist
587
+ elsif "TableBeingDeleted" == e.type
588
+ :table_busy
764
589
 
765
- elsif 403 == e.status_code && "AuthenticationFailed" == e.type
766
- @recovery = :invalid_storage_key
590
+ elsif "EntityAlreadyExists" == e.type
591
+ :entity_exist
767
592
 
768
- elsif 403 == e.status_code && "Unknown" == e.type && e.description.include?("Blob does not exist or not accessible.")
769
- @recovery = :notify_failed_blob_not_accessible
593
+ else
594
+ :http_unknown
595
+ end
596
+
597
+ elsif 403 == e.status_code
598
+ if "AuthenticationFailed" == e.type
599
+ :invalid_storage_key
600
+
601
+ elsif "Unknown" == e.type && e.description.include?("Blob does not exist or not accessible.")
602
+ :notify_failed_blob_not_accessible
603
+
604
+ else
605
+ :access_denied
606
+ end
770
607
 
771
608
  elsif 400 == e.status_code && "Unknown" == e.type && e.description.include?("Invalid instrumentation key")
772
- @recovery = :invalid_intrumentation_key
609
+ :invalid_instrumentation_key
773
610
 
774
611
  elsif 500 == e.status_code && "Unknown" == e.type && e.description.include?("Processing error")
775
- @recovery = :notification_process_down
612
+ :notification_process_down
776
613
 
777
614
  elsif 503 == e.status_code
778
- @recovery = :service_unavailable
779
- elsif 404 == e.status_code
780
- @recovery = :create_resource
781
- elsif 403 == e.status_code
782
- # todo, came from updating the log_table, how to hnadle this
783
- @recovery = :access_denied
615
+ :service_unavailable
616
+
784
617
  else
785
- puts "\n>>>> HTTP error - #{e.inspect} <<<<\n"
786
- @recovery = :http_unknown
787
- raise e if @@configuration[:stop_on_unknown_io_errors]
618
+ :http_unknown
788
619
  end
789
620
 
790
621
  # communication error
791
622
  elsif e.is_a?( Faraday::ClientError )
792
- @recovery = :io_failure
623
+ :io_failure
793
624
 
794
625
  # communication error
795
626
  elsif e.is_a?( IOError )
796
- @recovery = :io_failure
627
+ :io_failure
797
628
 
798
629
  # all storage accounts are dead, couldn't get client (internal exception)
799
630
  elsif e.is_a?( StorageAccountsOffError )
800
- @recovery = :io_all_dead
631
+ :io_all_dead
801
632
 
802
633
  # all storage accounts are dead, couldn't get client (internal exception)
803
634
  elsif e.is_a?( NotRecoverableError )
804
- @recovery = :not_recoverable
635
+ :not_recoverable
805
636
 
806
637
  elsif e.is_a?( NameError ) && e.message.include?( "uninitialized constant Azure::Core::Auth::Signer::OpenSSL" )
807
- sleep( 1 )
808
- @recovery = :io_failure
638
+ :init_error
809
639
 
810
640
  elsif e.is_a?( NameError ) && e.message.include?( "uninitialized constant Azure::Storage::Auth::SharedAccessSignature" )
811
- sleep( 1 )
812
- @recovery = :io_failure
641
+ :init_error
813
642
 
814
643
  else
815
- # UNKNOWN error - #<NameError: uninitialized constant Azure::Core::Auth::Signer::OpenSSL>
816
- puts "\n>>>> UNKNOWN error - #{e.inspect} <<<<\n"
817
- raise e
644
+ :unknown
645
+ end
646
+ end
818
647
 
648
+
649
+ def recover_retry? ( e )
650
+ recovery = error_to_sym( e )
651
+ if :init_error == recovery
652
+ @client = @client.dispose if @client
653
+ sleep( 1 )
654
+ recovery = nil
655
+
656
+ elsif :http_unknown == recovery || :unknown == recovery
657
+ puts "\n>>>> UNKNOWN error - #{e.inspect} <<<<\n"
658
+ raise e if @configuration[:stop_on_unknown_io_errors]
819
659
  end
820
660
 
821
- reason = @recovery
822
- if @recovery && @recoverable.include?( @recovery )
823
- case @recovery
824
- when :container_exist, :table_exist, :entity_exist, :create_container, :create_table
825
- # ignore log error
826
- # @@logger.error { "Failed to #{@info} ;( recovery: continue, error: #{e.inspect}" }
661
+ return [recovery, recovery] unless recovery && @recoverable.include?( recovery )
827
662
 
828
- when :invalid_storage_key, :notify_failed_blob_not_accessible
829
- if @client.switch_storage_account_key!
830
- @@logger.error { "Failed to #{@info} ;( recovery: switched to secondary storage key, error: #{e.inspect}" }
831
- else
832
- @client = @client.dispose( :auth_to_storage_failed ) if @client && :invalid_storage_key == @recovery
833
- @recovery = nil
834
- end
663
+ case recovery
664
+ when :container_exist, :table_exist, :entity_exist, :create_container, :create_table
665
+ # ignore log error
666
+ # @logger.error { "Failed to #{@info} ;( recovery: continue, error: #{e.inspect}" }
835
667
 
836
- when :table_busy
668
+ when :invalid_storage_key, :notify_failed_blob_not_accessible
669
+ if @client.switch_storage_account_key!
670
+ @logger.error { "Failed to #{@info} ;( recovery: switched to secondary storage key, error: #{e.inspect}" }
671
+ else
672
+ @client = @client.dispose( :auth_to_storage_failed ) if @client && :invalid_storage_key == recovery
673
+ return [nil, recovery]
674
+ end
675
+
676
+ when :table_busy
677
+ @client = @client.dispose if @client
678
+ sleep( @configuration[:io_retry_delay] )
679
+ @logger.error { "Failed to #{@info} ;( recovery: retry, error: #{e.inspect}" }
680
+
681
+ when :io_failure, :service_unavailable, :notification_process_down
682
+ if @try_count < @max_tries
837
683
  @client = @client.dispose if @client
838
- sleep( @@io_retry_delay )
839
- @@logger.error { "Failed to #{@info} ;( recovery: retry, error: #{e.inspect}" }
684
+ sleep( @configuration[:io_retry_delay] )
685
+ @logger.error { "Failed to #{@info} ;( recovery: retry, try #{@try_count} / #{@max_tries}, error: #{e.inspect}" }
686
+ @try_count += 1
687
+ else
688
+ if :io_failure == recovery || ( :service_unavailable == recovery && :notify != @action )
689
+ @client = @client.dispose( :io_to_storage_failed ) if @client
690
+ end
691
+ return [nil, recovery]
692
+ end
840
693
 
841
- when :io_failure, :service_unavailable, :notification_process_down, :invalid_intrumentation_key, :invalid_table_id
694
+ when :invalid_instrumentation_key, :invalid_table_id
695
+ if :notify == @action # only for notify, not for test endpoint
842
696
  if @try_count < @max_tries
843
697
  @client = @client.dispose if @client
844
- sleep( @@io_retry_delay )
845
- @@logger.error { "Failed to #{@info} ;( recovery: retry, try #{@try_count} / #{@max_tries}, error: #{e.inspect}" }
698
+ sleep( @configuration[:io_retry_delay] )
699
+ @logger.error { "Failed to #{@info} ;( recovery: retry, try #{@try_count} / #{@max_tries}, error: #{e.inspect}" }
846
700
  @try_count += 1
847
701
  else
848
- if :invalid_intrumentation_key == @recovery
849
- Channels.instance.mark_invalid_intrumentation_key( @intrumentation_key )
850
- elsif :invalid_table_id == @recovery
702
+ if :invalid_instrumentation_key == recovery
703
+ Channels.instance.mark_invalid_instrumentation_key( @instrumentation_key )
704
+ elsif :invalid_table_id == recovery
851
705
  Channels.instance.mark_invalid_table_id( @table_id )
852
- elsif :io_failure == @recovery || ( :service_unavailable == @recovery && :notify != @action )
853
- @client = @client.dispose( :io_to_storage_failed ) if @client
854
706
  end
855
- @recovery = nil
707
+ return [nil, recovery]
856
708
  end
857
709
  end
858
- else
859
- @recovery = nil
860
710
  end
711
+ [recovery, recovery]
712
+ end
861
713
 
862
- if @recovery
863
- true
864
- else
865
- recover_later_proc.call( reason, e )
866
- @@logger.error { "Failed to #{@info} ; retry later, error: #{e.inspect}" } unless :ok == @recovery
867
- :ok == @recovery
868
- end
869
714
 
870
- # Blob service error codes - msdn.microsoft.com/en-us/library/azure/dd179439.aspx
871
- # ConnectionFailed - problem with connection
872
- # ParsingError - problem with request/response payload
873
- # ResourceNotFound, SSLError, TimeoutError
874
- end
875
715
 
876
716
  def set_conatainer_and_blob_names
877
717
  time_utc = Time.now.utc
878
718
  id = @id.to_s.rjust(4, "0")
879
719
  strtime = time_utc.strftime( "%F" )
880
- @container_name = "#{@@configuration[:azure_storage_container_prefix]}-#{strtime}"
720
+ @container_name = "#{AZURE_STORAGE_CONTAINER_LOGSTASH_PREFIX}#{@configuration[:azure_storage_container_prefix]}-#{strtime}"
881
721
 
882
722
  strtime = time_utc.strftime( "%F-%H-%M-%S-%L" )
883
- @blob_name = "#{@@configuration[:azure_storage_blob_prefix]}_ikey-#{@intrumentation_key}_table-#{@table_id}_id-#{id}_#{strtime}.#{@event_format_ext}"
723
+ # @blob_name = "#{@configuration[:azure_storage_blob_prefix]}_ikey-#{@instrumentation_key}_table-#{@table_id}_id-#{id}_#{strtime}.#{@event_format_ext}"
724
+ @blob_name = "#{AZURE_STORAGE_BLOB_LOGSTASH_PREFIX}#{@configuration[:azure_storage_blob_prefix]}/ikey-#{@instrumentation_key}/table-#{@table_id}/#{strtime}_#{id}.#{@event_format_ext}"
884
725
  end
885
726
 
886
727
 
@@ -892,30 +733,30 @@ class LogStash::Outputs::Application_insights
892
733
  :ver => BASE_DATA_REQUIRED_VERSION,
893
734
  :blobSasUri => @blob_sas_url.to_s,
894
735
  :sourceName => @table_id,
895
- :sourceVersion => @@configuration[:notification_version].to_s
736
+ :sourceVersion => @configuration[:notification_version].to_s
896
737
  }
897
738
  },
898
- :ver => @@configuration[:notification_version],
739
+ :ver => @configuration[:notification_version],
899
740
  :name => REQUEST_NAME,
900
741
  :time => Time.now.utc.iso8601,
901
- :iKey => @intrumentation_key
742
+ :iKey => @instrumentation_key
902
743
  }
903
744
  notification_hash.to_json
904
745
  end
905
746
 
906
747
 
907
748
  def post_notification ( http_client, body )
908
- request = Azure::Core::Http::HttpRequest.new( :post, @@configuration[:notification_endpoint], { :body => body, :client => http_client } )
749
+ request = Azure::Core::Http::HttpRequest.new( :post, @configuration[:application_insights_endpoint], { :body => body, :client => http_client } )
909
750
  request.headers['Content-Type'] = 'application/json; charset=utf-8'
910
751
  request.headers['Accept'] = 'application/json'
911
- @@logger.debug { "send notification : \n endpoint: #{@@configuration[:notification_endpoint]}\n body : #{body}" }
752
+ @logger.debug { "send notification : \n endpoint: #{@configuration[:application_insights_endpoint]}\n body : #{body}" }
912
753
  response = request.call
913
754
  end
914
755
 
915
756
 
916
757
  def set_blob_sas_url
917
758
  blob_url ="https://#{@storage_account_name}.blob.core.windows.net/#{@container_name}/#{@blob_name}"
918
- options_and_constrains = {:permissions => "r", :resource => "b", :expiry => ( Time.now.utc + @@configuration[:blob_access_expiry_time] ).iso8601 }
759
+ options_and_constrains = {:permissions => "r", :resource => "b", :expiry => ( Time.now.utc + @configuration[:blob_access_expiry_time] ).iso8601 }
919
760
  @blob_sas_url = @client.storage_auth_sas.signed_uri( URI( blob_url ), options_and_constrains )
920
761
  end
921
762