logstash-output-application_insights 0.1.3
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/CHANGELOG.md +5 -0
- data/CONTRIBUTORS +9 -0
- data/DEVELOPER.md +0 -0
- data/Gemfile +26 -0
- data/LICENSE +17 -0
- data/README.md +495 -0
- data/Rakefile +22 -0
- data/lib/logstash/outputs/application_insights.rb +393 -0
- data/lib/logstash/outputs/application_insights/blob.rb +923 -0
- data/lib/logstash/outputs/application_insights/block.rb +118 -0
- data/lib/logstash/outputs/application_insights/channel.rb +259 -0
- data/lib/logstash/outputs/application_insights/channels.rb +142 -0
- data/lib/logstash/outputs/application_insights/client.rb +110 -0
- data/lib/logstash/outputs/application_insights/clients.rb +113 -0
- data/lib/logstash/outputs/application_insights/config.rb +341 -0
- data/lib/logstash/outputs/application_insights/constants.rb +208 -0
- data/lib/logstash/outputs/application_insights/exceptions.rb +55 -0
- data/lib/logstash/outputs/application_insights/flow_control.rb +80 -0
- data/lib/logstash/outputs/application_insights/multi_io_logger.rb +69 -0
- data/lib/logstash/outputs/application_insights/shutdown.rb +96 -0
- data/lib/logstash/outputs/application_insights/state.rb +89 -0
- data/lib/logstash/outputs/application_insights/storage_cleanup.rb +214 -0
- data/lib/logstash/outputs/application_insights/sub_channel.rb +75 -0
- data/lib/logstash/outputs/application_insights/telemetry.rb +99 -0
- data/lib/logstash/outputs/application_insights/timer.rb +90 -0
- data/lib/logstash/outputs/application_insights/utils.rb +139 -0
- data/lib/logstash/outputs/application_insights/version.rb +24 -0
- data/logstash-output-application-insights.gemspec +50 -0
- data/spec/outputs/application_insights_spec.rb +42 -0
- metadata +151 -0
data/Rakefile
ADDED
@@ -0,0 +1,22 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
|
3
|
+
# ----------------------------------------------------------------------------------
|
4
|
+
# Logstash Output Application Insights
|
5
|
+
#
|
6
|
+
# Copyright (c) Microsoft Corporation
|
7
|
+
#
|
8
|
+
# All rights reserved.
|
9
|
+
#
|
10
|
+
# Licensed under the Apache License, Version 2.0 (the License);
|
11
|
+
# you may not use this file except in compliance with the License.
|
12
|
+
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
13
|
+
#
|
14
|
+
# Unless required by applicable law or agreed to in writing, software
|
15
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
16
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
17
|
+
#
|
18
|
+
# See the Apache Version 2.0 License for specific language governing
|
19
|
+
# permissions and limitations under the License.
|
20
|
+
# ----------------------------------------------------------------------------------
|
21
|
+
|
22
|
+
require "logstash/devutils/rake"
|
@@ -0,0 +1,393 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
|
3
|
+
# ----------------------------------------------------------------------------------
|
4
|
+
# Logstash Output Application Insights
|
5
|
+
#
|
6
|
+
# Copyright (c) Microsoft Corporation
|
7
|
+
#
|
8
|
+
# All rights reserved.
|
9
|
+
#
|
10
|
+
# Licensed under the Apache License, Version 2.0 (the License);
|
11
|
+
# you may not use this file except in compliance with the License.
|
12
|
+
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
13
|
+
#
|
14
|
+
# Unless required by applicable law or agreed to in writing, software
|
15
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
16
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
17
|
+
#
|
18
|
+
# See the Apache Version 2.0 License for specific language governing
|
19
|
+
# permissions and limitations under the License.
|
20
|
+
# ----------------------------------------------------------------------------------
|
21
|
+
|
22
|
+
require "logstash/outputs/base"
|
23
|
+
require "logstash/namespace"
|
24
|
+
require 'logstash-core/version'
|
25
|
+
|
26
|
+
require "stud/interval"
|
27
|
+
|
28
|
+
require "azure/storage"
|
29
|
+
require 'azure/storage/core/auth/shared_access_signature'
|
30
|
+
|
31
|
+
require 'azure/core/http/http_request'
|
32
|
+
require 'azure/core/http/http_response'
|
33
|
+
|
34
|
+
require 'rbconfig'
|
35
|
+
require 'faraday'
|
36
|
+
require 'openssl'
|
37
|
+
require "time"
|
38
|
+
require "json"
|
39
|
+
require "uri"
|
40
|
+
require "socket"
|
41
|
+
|
42
|
+
require "thread"
|
43
|
+
require "concurrent" # for atomic and thread safe operations
|
44
|
+
require "logger"
|
45
|
+
require "csv"
|
46
|
+
|
47
|
+
require "application_insights"
|
48
|
+
|
49
|
+
class LogStash::Outputs::Application_insights < LogStash::Outputs::Base
|
50
|
+
require "logstash/outputs/application_insights/version"
|
51
|
+
require "logstash/outputs/application_insights/utils"
|
52
|
+
require "logstash/outputs/application_insights/constants"
|
53
|
+
require "logstash/outputs/application_insights/config"
|
54
|
+
require "logstash/outputs/application_insights/blob"
|
55
|
+
autoload :Block, "logstash/outputs/application_insights/block"
|
56
|
+
autoload :Storage_cleanup, "logstash/outputs/application_insights/storage_cleanup"
|
57
|
+
|
58
|
+
autoload :Clients, "logstash/outputs/application_insights/clients"
|
59
|
+
autoload :Client, "logstash/outputs/application_insights/client"
|
60
|
+
|
61
|
+
autoload :Channels, "logstash/outputs/application_insights/channels"
|
62
|
+
autoload :Channel, "logstash/outputs/application_insights/channel"
|
63
|
+
autoload :Sub_channel, "logstash/outputs/application_insights/sub_channel"
|
64
|
+
autoload :State, "logstash/outputs/application_insights/state"
|
65
|
+
autoload :Flow_control, "logstash/outputs/application_insights/flow_control"
|
66
|
+
autoload :Shutdown, "logstash/outputs/application_insights/shutdown"
|
67
|
+
autoload :Telemetry, "logstash/outputs/application_insights/telemetry"
|
68
|
+
|
69
|
+
require "logstash/outputs/application_insights/exceptions"
|
70
|
+
require "logstash/outputs/application_insights/timer"
|
71
|
+
require "logstash/outputs/application_insights/multi_io_logger"
|
72
|
+
|
73
|
+
config_name "application_insights"
|
74
|
+
|
75
|
+
# default :codec, "json_lines"
|
76
|
+
|
77
|
+
# Array of pairs, storage_account_name and an array of acces_keys
|
78
|
+
# examples [ account1, key1 ]
|
79
|
+
# [ [account1, key1], [accoun2, key2], [account3, key3] ]
|
80
|
+
# [ [account1, [key11, key12]], [account2, key2], [account3, [key3] ]
|
81
|
+
config :storage_account_name_key, :validate => :array
|
82
|
+
|
83
|
+
# prefix for the azure storage tables used by this Logstash instance
|
84
|
+
# it is recommeded that each Logstash prefix have a unique prefix, to avoid
|
85
|
+
# confusion and loss of tracking, although sharing tables won't damage proper execution
|
86
|
+
# if not set, the machine name is used (not alphanumeric characters are removed, and converted downcase), if machine name available
|
87
|
+
# string may contain only alphanumeric character, case sensitive, and must start with a letter
|
88
|
+
config :azure_storage_table_prefix, :validate => :string
|
89
|
+
|
90
|
+
# prefix for the azure storage containers used by this Logstash instance
|
91
|
+
# it is recommeded that each Logstash prefix have a unique prefix, to avoid
|
92
|
+
# confusion and loss of tracking, although sharing containers won't damage proper execution
|
93
|
+
# if not set, the machine name is used (not alphanumeric characters are removed, and converted downcase), if machine name available
|
94
|
+
# string may contain only alphanumeric character and dash, double dash not allowed, case insesitive
|
95
|
+
config :azure_storage_container_prefix, :validate => :string
|
96
|
+
|
97
|
+
# prefix for the azure storage blobs used by this Logstash instance
|
98
|
+
# Each Logstash prefix MUST have a unique prefix, to avoid
|
99
|
+
# loss of data !!!
|
100
|
+
# if not set, the machine name is used (not alphanumeric characters are removed, and converted downcase), if machine name available
|
101
|
+
# string may include only characters that are allowed in any valid url
|
102
|
+
config :azure_storage_blob_prefix, :validate => :string
|
103
|
+
|
104
|
+
# Default Application Insights Analytics intrumentation_key
|
105
|
+
# will be used only in case it is not specified as a table_id property in tables
|
106
|
+
# or as part of the event's fields or event's metadata fields
|
107
|
+
config :intrumentation_key, :validate => :string
|
108
|
+
|
109
|
+
# Default Application Insights Analytics table_id
|
110
|
+
# will be used only in case it is not specified as part o
|
111
|
+
# of the event's fields or event's metadata fields
|
112
|
+
config :table_id, :validate => :string
|
113
|
+
|
114
|
+
# specifies the list of the fields that will be filtered from the events, fields not specified will be ignored.
|
115
|
+
# if not specified all fileds in events will be filtered, the order is kept, and is used for csv serialization
|
116
|
+
config :table_columns, :validate => :array
|
117
|
+
|
118
|
+
# if set to true events fields are refered as case insensitive, default case sensitive
|
119
|
+
config :case_insensitive_columns, :validate => :boolean
|
120
|
+
|
121
|
+
# A hash of table_ids, where each table_id points to a set of properties
|
122
|
+
# the properties are a hash, where the keys are are the properties
|
123
|
+
# current supported properties per table_id are:
|
124
|
+
# intrumentation_key, ext, table_columns, csv_default_value, csv_separator, blob_max_delay, event_separator, serialized_event_field
|
125
|
+
# intrumentation_key, Application Insights Analytics intrumentation_key, will be used in case not specified in any of the event's fields or events's metadata fileds
|
126
|
+
# serialized_event_field, specifies the field that may contain the full serialized event (either as json or csv),
|
127
|
+
# when specified, the ext property should be set either to csv or to json (json is the default)
|
128
|
+
# if event. does not conatin the field, value will be created based on the fileds in the evnt, according to table_columns if configured, or all fileds in event
|
129
|
+
# if event contains this filed, and ext is csv
|
130
|
+
# if value is a string, it will be used as is as the serialized event, without validating whether it is a csv string
|
131
|
+
# if value is an array, it will be serialized as an array of csv columns
|
132
|
+
# if value is a hash, it will be serialized based on table_columns to csv columns
|
133
|
+
# if event contains this filed, and ext is json
|
134
|
+
# if value is a string, it will be used as is as the serialized event, without validating whether it is a json string
|
135
|
+
# if value is a hash, it will be serialized to json, if fileds_map exit, it will be based on filds_map
|
136
|
+
# if value is an array, it will be zipped with table_columns (if exist) and serialized to json
|
137
|
+
# ext, blob extension, the only valid values are either csv or json,
|
138
|
+
# should be set whenever the default json is not appropriate (.e, csv)
|
139
|
+
# blob_max_delay, maximum latency time, in seconds, since the time the event arrived till it should be commited in azure storage, and Application Insights is notified
|
140
|
+
# event_separator, specifies the string that is used as a separator between events in the blob
|
141
|
+
# table_columns, specifies the event fields that should be serialized, and their order (order is required for csv)
|
142
|
+
# if csv serialization will be used for this table_id
|
143
|
+
# each table_columns field is a hash with 3 keys: name, type, and default. Only name is mandatory
|
144
|
+
# name - is the name of the event fleld that its value should be mapped to this columns
|
145
|
+
# type - is the type of this field: "string", "hash", "array", "number", "json", "boolean", "float", "integer", "dynamic", "datetime", "object"
|
146
|
+
# default - is the value to be used for this column, in case the field is missing in the event
|
147
|
+
# csv_separator, specifies the string that is used as a separator between columns,
|
148
|
+
# can be specified only together with table_columns
|
149
|
+
# csv_default_value, specifies the string that is used as the value in a csv record, in case the field does not exist in the event
|
150
|
+
# can be specified only together with table_columns
|
151
|
+
#
|
152
|
+
# Example json table_id
|
153
|
+
# tables => {"a679fbd2-702c-4c46-8548-80082c66ef28" => {"intrumentation_key" => "abee940b-e648-4242-b6b3-f2826667bf96", "blob_max_delay" => 60} }
|
154
|
+
# Example json table_id, input in serialized_event_field
|
155
|
+
# {"ab6a3584-aef0-4a82-8725-2f2336e59f3e" => {"serialized_event_field" => "message". "ext" => "json"} }
|
156
|
+
# Example csv table_id, input in serialized_event_field
|
157
|
+
# {"ab6a3584-aef0-4a82-8725-2f2336e59f3e" => {"serialized_event_field" => "csv_message". "ext" => "csv"} }
|
158
|
+
# Example csv table_id, input in event fields
|
159
|
+
# {"ab6a3584-aef0-4a82-8725-2f2336e59f3e" => { "ext" => "csv", "table_columns" => [ {name => "Timestamp" type => datetime }, "Value", "Custom" ] } }
|
160
|
+
# Example csv table_id, input in event fields
|
161
|
+
# {"ab6a3584-aef0-4a82-8725-2f2336e59f3e" => { "ext" => "json", "table_columns" => [ "Timestamp", "Value", "Custom" ] } }
|
162
|
+
|
163
|
+
config :tables, :validate => :hash, :default => {}
|
164
|
+
|
165
|
+
# Advanced, internal, should not be set, the default is 192 GB ( = 50,000 * 4 MB )
|
166
|
+
# azure storage maximum number of blocks per blob is 192 GB ( = 50,000 * 4 MB )
|
167
|
+
config :blob_max_bytesize, :validate => :number
|
168
|
+
|
169
|
+
# Specifies, maximum number of events in one blob,
|
170
|
+
# setting it low may reduce the latency, but reduce logstash performance
|
171
|
+
# setting it high may increase latency to maximum delay, but logstash will be more efficient, and load on network will be lower
|
172
|
+
config :blob_max_events, :validate => :number
|
173
|
+
|
174
|
+
# Specifies maximum latency time, in seconds, since the time the event arrived
|
175
|
+
# till it is commited to azure storage, and Application Insights is notified
|
176
|
+
# The total latency may be higher, as this is not the full ingestion flow
|
177
|
+
config :blob_max_delay, :validate => :number
|
178
|
+
|
179
|
+
# Specifies the blob serialziation to create. Default "json"
|
180
|
+
# currently 2 types are supported "csv" and "json""
|
181
|
+
config :blob_serialization, :validate => :string
|
182
|
+
|
183
|
+
# Interval of time between retries due to IO failures
|
184
|
+
config :io_retry_delay, :validate => :number
|
185
|
+
|
186
|
+
# Number of retries on IO failures, before giving up, and move to available options
|
187
|
+
config :io_max_retries, :validate => :number
|
188
|
+
|
189
|
+
# Specifies the retention time of the blob in the container after it is notified to Application Insighta Analytics
|
190
|
+
# Once the retention time expires, the blob is the deleted from container
|
191
|
+
config :blob_retention_time, :validate => :number
|
192
|
+
|
193
|
+
# Specifies the time Application Insights Analytics have access to the blob that are notifie
|
194
|
+
# Blob access is limited with SAS URL
|
195
|
+
config :blob_access_expiry_time, :validate => :number
|
196
|
+
|
197
|
+
# Advanced, internal, should not be set, the default is \r\n,
|
198
|
+
# specifies the string that is used as a separator between events in the blob
|
199
|
+
config :event_separator, :validate => :string
|
200
|
+
|
201
|
+
# Advanced, internal, should not be set, the default is comma,
|
202
|
+
# specifies the string that is used as a separator between columns in a csv record
|
203
|
+
config :csv_separator, :validate => :string
|
204
|
+
|
205
|
+
# specifies the string that is used as the value in a csv record, in case the field does not exist in the event, the default is ""
|
206
|
+
config :csv_default_value, :validate => :string
|
207
|
+
|
208
|
+
# specifies the log level. valid values are: DEBUG, INFO, WARN, ERROR, FATAL, UNKNOWN
|
209
|
+
config :logger_level, :validate => :string
|
210
|
+
|
211
|
+
# Specifies the list of targets for the log. may include files, devices, "stdout: and "stderr"
|
212
|
+
config :logger_files, :validate => :array
|
213
|
+
|
214
|
+
# Advanced, internal, should not be set, the default is AI,
|
215
|
+
# Specifies the program name that will displayed in each log record
|
216
|
+
config :logger_progname, :validate => :string
|
217
|
+
|
218
|
+
# Specifies when file logs are shifted. valid values are either an integer or "daily", "weekly" or "monthly"
|
219
|
+
config :logger_shift_size
|
220
|
+
|
221
|
+
# Specifies the shift age of a log.
|
222
|
+
# Number of old files to keep, or frequency of rotation (daily, weekly or monthly)
|
223
|
+
config :logger_shift_age, :validate => :number
|
224
|
+
|
225
|
+
# Specifies a serialized event field name, that if exist in current event, its value as is will be taken as the serialized event. No Default
|
226
|
+
config :serialized_event_field, :validate => :string
|
227
|
+
|
228
|
+
# Specifies the time interval, between tests that check whether a stoarge account came back to life,
|
229
|
+
# after it stoped responding
|
230
|
+
config :resurrect_delay, :validate => :number
|
231
|
+
|
232
|
+
# specifies the high water mark for the flow control, that used to avoid out of memory crash
|
233
|
+
# once memory consumption reach reach the high water mark, the plugin will stop accepting events, till memory
|
234
|
+
# is below the low water mark
|
235
|
+
config :flow_control_suspend_bytes, :validate => :number
|
236
|
+
|
237
|
+
# specifies the low water mark for the flow control, that used to avoid out of memory crash
|
238
|
+
# once memory consumption reach the high water mark, the plugin will stop accepting events, till memory
|
239
|
+
# is below the low water mark
|
240
|
+
config :flow_control_resume_bytes, :validate => :number
|
241
|
+
|
242
|
+
# specifies the amount of time the flow control suspend receiving event, to allow GC, and flush of event to Azure storage
|
243
|
+
# before checking whether memory is below low water mark
|
244
|
+
config :flow_control_delay, :validate => :number
|
245
|
+
|
246
|
+
# File path of the CA file if having issue with SSL
|
247
|
+
config :ca_file, :validate => :string
|
248
|
+
|
249
|
+
|
250
|
+
# When set to true, telemetry about the plugin, won't be sent to Application Insights
|
251
|
+
config :disable_telemetry, :validate => :boolean
|
252
|
+
|
253
|
+
# When set to true, storage cleanup won't be done by the plugin (should be done by some other means or by another Logstash process with this flag enabled)
|
254
|
+
config :disable_cleanup, :validate => :boolean
|
255
|
+
|
256
|
+
# When set to true, not notified blobs are deleted, if not set they are copied to the orphan-blobs container
|
257
|
+
config :delete_not_notified_blobs, :validate => :boolean
|
258
|
+
|
259
|
+
# When set to true, notified blobs records are save in table, as long as blobs are retained in their containers
|
260
|
+
config :save_notified_blobs_records, :validate => :boolean
|
261
|
+
|
262
|
+
# Advanced, internal, should not be set, the default is false
|
263
|
+
# When set to true, notification is not sent to application insights, but behaves as if notified
|
264
|
+
config :disable_notification, :validate => :boolean
|
265
|
+
|
266
|
+
# Advanced, internal, should not be set, the default is false
|
267
|
+
# When set to true, events are not uploaded, and blob not commited, but behaves as if uploaded and notified
|
268
|
+
config :disable_blob_upload, :validate => :boolean
|
269
|
+
|
270
|
+
# Advanced, internal, should not be set, the default is false
|
271
|
+
# When set to true, process will stop if an unknown IO error is found
|
272
|
+
config :stop_on_unknown_io_errors, :validate => :boolean
|
273
|
+
|
274
|
+
# Advanced, internal, should not be set, the default is Application Insights production endpoint
|
275
|
+
# when set notification are sent to an alternative endpoint, used for internal testing
|
276
|
+
config :notification_endpoint, :validate => :string
|
277
|
+
|
278
|
+
# Advanced, internal, should not be set, the only current valid value is 1
|
279
|
+
config :notification_version, :validate => :number
|
280
|
+
|
281
|
+
# When set to true, access to application insights will be validated at initialization
|
282
|
+
# and if validation fail, logstash process will abort
|
283
|
+
config :validate_endpoint, :validate => :boolean, :default => true
|
284
|
+
|
285
|
+
# When set to true, access to azure storage for each of the configured accounts will be validated at initialization
|
286
|
+
# and if validation fail, logstash process will abort
|
287
|
+
config :validate_storage, :validate => :boolean, :default => true
|
288
|
+
|
289
|
+
public
|
290
|
+
|
291
|
+
def register
|
292
|
+
|
293
|
+
# set configuration
|
294
|
+
Config.validate_and_adjust_configuration( default_configuration )
|
295
|
+
configuration = Config.current
|
296
|
+
|
297
|
+
Multi_io_logger.config( configuration )
|
298
|
+
|
299
|
+
# be careful don't use here @logger, as it will override Logstash @logger, and may create starnge behaviour
|
300
|
+
@private_logger = configuration[:logger]
|
301
|
+
|
302
|
+
@private_logger.info { "configuration: #{configuration}" }
|
303
|
+
|
304
|
+
@telemetry = Telemetry.instance
|
305
|
+
configuration[:telemetry_channel] = @telemetry.telemetry_channel
|
306
|
+
|
307
|
+
Timer.config( configuration )
|
308
|
+
Blob.config( configuration )
|
309
|
+
Blob.validate_endpoint if @validate_endpoint
|
310
|
+
Blob.validate_storage if @validate_storage
|
311
|
+
|
312
|
+
@shutdown = Shutdown.instance
|
313
|
+
@channels = Channels.instance
|
314
|
+
|
315
|
+
@storage_cleanup = Storage_cleanup.start
|
316
|
+
|
317
|
+
@private_logger.info { "plugin registered" }
|
318
|
+
|
319
|
+
# @codec.on_event do |event, encoded_event|
|
320
|
+
# @channels.receive( event, encoded_event )
|
321
|
+
# end
|
322
|
+
|
323
|
+
Telemetry.instance.track_event("register", {:properties => configuration})
|
324
|
+
|
325
|
+
|
326
|
+
return "ok\n"
|
327
|
+
end # def register
|
328
|
+
|
329
|
+
|
330
|
+
def receive ( event )
|
331
|
+
# @codec.encode( event )
|
332
|
+
@channels.receive( event, nil )
|
333
|
+
return "ok\n"
|
334
|
+
end
|
335
|
+
|
336
|
+
def close
|
337
|
+
Telemetry.instance.track_event( "close" )
|
338
|
+
Telemetry.instance.flush
|
339
|
+
@shutdown.submit
|
340
|
+
end
|
341
|
+
|
342
|
+
private
|
343
|
+
|
344
|
+
# -----------------------------------------------
|
345
|
+
|
346
|
+
|
347
|
+
def list_blob_names
|
348
|
+
blob_names = Set.new []
|
349
|
+
loop do
|
350
|
+
continuation_token = NIL
|
351
|
+
entries = @azure_blob.list_blobs(@container, { :timeout => 10, :marker => continuation_token})
|
352
|
+
@@logger.debug { 'blob entries: #{entries}' }
|
353
|
+
entries.each do |entry|
|
354
|
+
@@logger.debug { 'blob entry name: #{entry.name}' }
|
355
|
+
blob_names << entry.name
|
356
|
+
end
|
357
|
+
continuation_token = entries.continuation_token
|
358
|
+
break if continuation_token.empty?
|
359
|
+
end
|
360
|
+
return blob_names
|
361
|
+
end # def list_blobs
|
362
|
+
|
363
|
+
|
364
|
+
def list_container_names
|
365
|
+
container_names = Set.new []
|
366
|
+
loop do
|
367
|
+
continuation_token = NIL
|
368
|
+
containers = @azure_blob.list_containers()
|
369
|
+
@@logger.debug { 'containers: #{containers}' }
|
370
|
+
containers.each do |container|
|
371
|
+
@@logger.debug { 'container entry name:' + container.name }
|
372
|
+
container_names << container.name
|
373
|
+
upload(container.name, "blob-append-" + container.name, "test - " + container.name)
|
374
|
+
blobs = @azure_blob.list_blobs(container.name)
|
375
|
+
blobs.each do |blob|
|
376
|
+
@@logger.debug { 'blob name: ' + blob.name }
|
377
|
+
end
|
378
|
+
end
|
379
|
+
continuation_token = containers.continuation_token
|
380
|
+
break if continuation_token.empty?
|
381
|
+
end
|
382
|
+
return container_names
|
383
|
+
end # def list_blobs
|
384
|
+
|
385
|
+
def create_container (container_name)
|
386
|
+
begin
|
387
|
+
@azure_blob.create_container(container_name)
|
388
|
+
rescue
|
389
|
+
@@logger.debug { $! }
|
390
|
+
end
|
391
|
+
end
|
392
|
+
end
|
393
|
+
|
@@ -0,0 +1,923 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
|
3
|
+
# ----------------------------------------------------------------------------------
|
4
|
+
# Logstash Output Application Insights
|
5
|
+
#
|
6
|
+
# Copyright (c) Microsoft Corporation
|
7
|
+
#
|
8
|
+
# All rights reserved.
|
9
|
+
#
|
10
|
+
# Licensed under the Apache License, Version 2.0 (the License);
|
11
|
+
# you may not use this file except in compliance with the License.
|
12
|
+
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
13
|
+
#
|
14
|
+
# Unless required by applicable law or agreed to in writing, software
|
15
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
16
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
17
|
+
#
|
18
|
+
# See the Apache Version 2.0 License for specific language governing
|
19
|
+
# permissions and limitations under the License.
|
20
|
+
# ----------------------------------------------------------------------------------
|
21
|
+
class LogStash::Outputs::Application_insights
|
22
|
+
class Blob
|
23
|
+
|
24
|
+
attr_reader :intrumentation_key
|
25
|
+
attr_reader :table_id
|
26
|
+
attr_reader :storage_account_name
|
27
|
+
attr_reader :container_name
|
28
|
+
attr_reader :blob_name
|
29
|
+
attr_reader :uploaded_events_count
|
30
|
+
attr_reader :uploaded_bytesize
|
31
|
+
attr_reader :oldest_event_time
|
32
|
+
|
33
|
+
attr_reader :io_queue
|
34
|
+
|
35
|
+
attr_reader :last_io_exception
|
36
|
+
|
37
|
+
public
|
38
|
+
|
39
|
+
def self.config ( configuration )
|
40
|
+
@@configuration = configuration
|
41
|
+
|
42
|
+
@@logger = configuration[:logger]
|
43
|
+
@@io_retry_delay = configuration[:io_retry_delay]
|
44
|
+
@@io_max_retries = configuration[:io_max_retries]
|
45
|
+
@@blob_max_bytesize = configuration[:blob_max_bytesize]
|
46
|
+
@@blob_max_events = configuration[:blob_max_events]
|
47
|
+
@@state_table_name = "#{configuration[:azure_storage_table_prefix]}#{STATE_TABLE_NAME}"
|
48
|
+
@@save_notified_blobs_records = configuration[:save_notified_blobs_records]
|
49
|
+
|
50
|
+
@@closing = false
|
51
|
+
|
52
|
+
# queues, per storage_account_name, for failed blob commit, will continue to try resending
|
53
|
+
@@failed_on_commit_retry_Qs = {}
|
54
|
+
launch_storage_recovery_threads( @@failed_on_commit_retry_Qs, :commit, :io_failure )
|
55
|
+
launch_storage_recovery_table_threads( :uploading )
|
56
|
+
|
57
|
+
# queues, per storage_account_name, for failed notify, will continue to try resending
|
58
|
+
@@failed_on_notify_retry_Qs = {}
|
59
|
+
launch_storage_recovery_threads( @@failed_on_notify_retry_Qs, :notify, :notify_failed_blob_not_accessible )
|
60
|
+
launch_storage_recovery_table_threads( :committed )
|
61
|
+
|
62
|
+
# for failed to notify due to endpoint, will continue to try resending
|
63
|
+
launch_endpoint_recovery_thread
|
64
|
+
|
65
|
+
# queues, per storage_account_name, for failed to log to table, will continue to try resending
|
66
|
+
@@failed_on_log_to_table_retry_Qs = {}
|
67
|
+
launch_storage_recovery_threads( @@failed_on_log_to_table_retry_Qs, :log_to_table_update, :io_failure )
|
68
|
+
|
69
|
+
end
|
70
|
+
|
71
|
+
def self.close
|
72
|
+
@@closing = true
|
73
|
+
end
|
74
|
+
|
75
|
+
def self.stopped?
|
76
|
+
@@closing
|
77
|
+
end
|
78
|
+
|
79
|
+
def self.launch_endpoint_recovery_thread
|
80
|
+
@@failed_on_notification_endpoint_retry_Q = Queue.new
|
81
|
+
storage_recovery_thread( nil, @@failed_on_notification_endpoint_retry_Q, :notify, :io_failure )
|
82
|
+
end
|
83
|
+
|
84
|
+
def self.launch_storage_recovery_threads ( queues, method, failure_reason )
|
85
|
+
@@configuration[:storage_account_name_key].each do |storage_account_name, storage_account_keys|
|
86
|
+
queues[storage_account_name] = Queue.new
|
87
|
+
# a threads, per storage account name
|
88
|
+
storage_recovery_thread( storage_account_name, queues[storage_account_name], method, failure_reason )
|
89
|
+
end
|
90
|
+
end
|
91
|
+
|
92
|
+
def self.launch_storage_recovery_table_threads ( state )
|
93
|
+
@@configuration[:storage_account_name_key].each do |storage_account_name, storage_account_keys|
|
94
|
+
recovery_table_thread( storage_account_name, state)
|
95
|
+
end
|
96
|
+
end
|
97
|
+
|
98
|
+
#return thread
|
99
|
+
def self.recovery_table_thread( storage_account_name, state )
|
100
|
+
Thread.new( storage_account_name, state ) do |storage_account_name, state|
|
101
|
+
|
102
|
+
blob = Blob.new
|
103
|
+
|
104
|
+
committed_tuples = [ ]
|
105
|
+
uncommitted_tuples = [ ]
|
106
|
+
upload_empty_tuples = [ ]
|
107
|
+
token = nil
|
108
|
+
finished = false
|
109
|
+
filter = "#{:PartitionKey} eq '#{@@configuration[:azure_storage_blob_prefix]}-#{state}'"
|
110
|
+
|
111
|
+
# should exit thread after fetching data from table, and submit recovery, the loop is only for case of failure
|
112
|
+
until finished || stopped? do
|
113
|
+
entities = blob.log_to_table_query( storage_account_name, filter, token )
|
114
|
+
if entities
|
115
|
+
token = entities.continuation_token
|
116
|
+
|
117
|
+
if :committed == state
|
118
|
+
entities.each do |entity|
|
119
|
+
State.instance.inc_pending_notifications
|
120
|
+
tuple = blob.table_entity_to_tuple( entity.properties )
|
121
|
+
@@failed_on_notification_endpoint_retry_Q << tuple
|
122
|
+
end
|
123
|
+
|
124
|
+
elsif :uploading == state
|
125
|
+
# first tuples are collected, before send to queues, to make sure blob states don't change in between
|
126
|
+
entities.each do |entity|
|
127
|
+
typed_tuple = nil
|
128
|
+
until typed_tuple || stopped?
|
129
|
+
typed_tuple = blob.update_commited_or_uncommited_list( entity.properties )
|
130
|
+
Stud.stoppable_sleep(60, 1) { stopped? } unless typed_tuple
|
131
|
+
end
|
132
|
+
next if stopped?
|
133
|
+
|
134
|
+
if typed_tuple[:committed]
|
135
|
+
committed_tuples << typed_tuple[:committed]
|
136
|
+
elsif typed_tuple[:uncommitted]
|
137
|
+
uncommitted_tuples << typed_tuple[:uncommitted]
|
138
|
+
else
|
139
|
+
upload_empty_tuples << typed_tuple[:upload_empty]
|
140
|
+
end
|
141
|
+
end
|
142
|
+
end
|
143
|
+
|
144
|
+
next if token
|
145
|
+
committed_tuples.each do |tuple|
|
146
|
+
State.instance.inc_pending_commits
|
147
|
+
@@failed_on_log_to_table_retry_Qs[storage_account_name] << tuple
|
148
|
+
end
|
149
|
+
uncommitted_tuples.each do |tuple|
|
150
|
+
State.instance.inc_pending_commits
|
151
|
+
@@failed_on_commit_retry_Qs[storage_account_name] << tuple
|
152
|
+
end
|
153
|
+
upload_empty_tuples.each do |tuple|
|
154
|
+
@@failed_on_log_to_table_retry_Qs[storage_account_name] << tuple
|
155
|
+
end
|
156
|
+
finished = true
|
157
|
+
else
|
158
|
+
Stud.stoppable_sleep(60, 1) { stopped? }
|
159
|
+
end
|
160
|
+
end
|
161
|
+
@@logger.info { "exit table recovery thread, storage: #{storage_account_name}, state: #{state}, entities: #{entities ? entities.length : nil}" }
|
162
|
+
end
|
163
|
+
end
|
164
|
+
|
165
|
+
def self.state_on? ( storage_account_name, blob, failure_reason )
|
166
|
+
if blob
|
167
|
+
if :io_failure == failure_reason
|
168
|
+
@@endpoint_state_on ||= blob.test_notification_endpoint( @@configuration[:storage_account_name_key][0][0] )
|
169
|
+
else
|
170
|
+
Clients.instance.storage_account_state_on?( storage_account_name )
|
171
|
+
end
|
172
|
+
elsif storage_account_name
|
173
|
+
Clients.instance.storage_account_state_on?( storage_account_name )
|
174
|
+
else
|
175
|
+
Clients.instance.storage_account_state_on?
|
176
|
+
end
|
177
|
+
end
|
178
|
+
|
179
|
+
def self.storage_recovery_thread( storage_account_name, queue, method, failure_reason )
|
180
|
+
# a threads, per storage account name, that retries failed blob commits / notification / table updates
|
181
|
+
Thread.new( storage_account_name, queue, method, failure_reason ) do |storage_account_name, queue, method, failure_reason|
|
182
|
+
blob = Blob.new if :notify == method
|
183
|
+
semaphore = Mutex.new
|
184
|
+
action = {:method => method, :semaphore => semaphore, :counter => 0 }
|
185
|
+
loop do
|
186
|
+
tuple ||= queue.pop
|
187
|
+
until state_on?( storage_account_name, blob, failure_reason ) do sleep( 1 ) end
|
188
|
+
|
189
|
+
not_busy = nil
|
190
|
+
semaphore.synchronize {
|
191
|
+
not_busy = action[:counter] += 1 if 10 > action[:counter]
|
192
|
+
}
|
193
|
+
if not_busy
|
194
|
+
Thread.new( action, tuple ) do |action, tuple|
|
195
|
+
Blob.new.send( action[:method], tuple )
|
196
|
+
action[:semaphore].synchronize {
|
197
|
+
action[:counter] -= 1
|
198
|
+
}
|
199
|
+
end
|
200
|
+
tuple = nil # release for GC
|
201
|
+
else
|
202
|
+
Stud.stoppable_sleep(60, 1) { 10 > action[:counter] }
|
203
|
+
next
|
204
|
+
end
|
205
|
+
end
|
206
|
+
end
|
207
|
+
end
|
208
|
+
|
209
|
+
def self.validate_endpoint
|
210
|
+
io = Blob.new
|
211
|
+
raise ConfigurationError, "Failed to access application insights #{@@configuration[:notification_endpoint]}, due to error #{io.last_io_exception.inspect}" unless io.test_notification_endpoint( @@configuration[:storage_account_name_key][0][0] )
|
212
|
+
end
|
213
|
+
|
214
|
+
def self.validate_storage
|
215
|
+
io = Blob.new
|
216
|
+
@@configuration[:storage_account_name_key].each do |storage_account_name, storage_account_keys|
|
217
|
+
raise ConfigurationError, "Failed access azure storage account #{storage_account_name}, due to error #{io.last_io_exception.inspect}" unless io.test_storage( storage_account_name )
|
218
|
+
end
|
219
|
+
end
|
220
|
+
|
221
|
+
|
222
|
+
def initialize ( channel = nil, id = nil , no_queue = false )
|
223
|
+
@uploaded_block_ids = [ ]
|
224
|
+
@uploaded_block_numbers = [ ]
|
225
|
+
@uploaded_bytesize = 0
|
226
|
+
@uploaded_events_count = 0
|
227
|
+
@max_tries = @@io_max_retries + 1
|
228
|
+
@sub_state = :none
|
229
|
+
|
230
|
+
if channel
|
231
|
+
@id = id
|
232
|
+
@intrumentation_key = channel.intrumentation_key
|
233
|
+
@table_id = channel.table_id
|
234
|
+
@blob_max_delay = channel.blob_max_delay
|
235
|
+
|
236
|
+
@event_format_ext = channel.event_format_ext
|
237
|
+
|
238
|
+
unless no_queue
|
239
|
+
|
240
|
+
@io_queue = Queue.new
|
241
|
+
@timer = Timer.new
|
242
|
+
|
243
|
+
# create a thread that handles the IO of the blob
|
244
|
+
Thread.new do
|
245
|
+
next_block = nil
|
246
|
+
loop do
|
247
|
+
block_to_upload = nil # release reference to resource for GC
|
248
|
+
block_to_upload = next_block || @io_queue.pop
|
249
|
+
next_block = nil
|
250
|
+
|
251
|
+
if :trigger == @timer.state
|
252
|
+
next_block = block_to_upload unless :wakeup == block_to_upload
|
253
|
+
block_to_upload = :timeout
|
254
|
+
to_commit = :commit
|
255
|
+
|
256
|
+
elsif :close == block_to_upload
|
257
|
+
to_commit = :commit
|
258
|
+
|
259
|
+
# ignore :trigger as they are only to casue check timeout
|
260
|
+
elsif :wakeup == block_to_upload # ignore :wakeup
|
261
|
+
next
|
262
|
+
|
263
|
+
else
|
264
|
+
while @io_queue.length > 0
|
265
|
+
next_block = @io_queue.pop
|
266
|
+
next if :wakeup == next_block # ignore :wakeup
|
267
|
+
break if :close == next_block
|
268
|
+
break if blob_full?( next_block )
|
269
|
+
break unless block_to_upload.concat( next_block )
|
270
|
+
next_block = nil
|
271
|
+
end
|
272
|
+
end
|
273
|
+
|
274
|
+
unless to_commit
|
275
|
+
@timer.set( block_to_upload.oldest_event_time + @blob_max_delay, nil ) {|object| @io_queue << :wakeup if 0 == @io_queue.length } if blob_empty?
|
276
|
+
to_commit = :commit if blob_full?
|
277
|
+
upload( block_to_upload, to_commit)
|
278
|
+
block_to_upload = nil # release reference to resource for GC
|
279
|
+
else
|
280
|
+
commit unless @uploaded_block_ids.empty?
|
281
|
+
end
|
282
|
+
|
283
|
+
if to_commit
|
284
|
+
to_commit = nil
|
285
|
+
@uploaded_block_ids = [ ]
|
286
|
+
@timer.cancel
|
287
|
+
break if :close == block_to_upload
|
288
|
+
end
|
289
|
+
end
|
290
|
+
end
|
291
|
+
end
|
292
|
+
|
293
|
+
end
|
294
|
+
|
295
|
+
end
|
296
|
+
|
297
|
+
# close blob. It will finish whatever was already on the queue, and if necessary commit
|
298
|
+
# called on shutdown
|
299
|
+
def close
|
300
|
+
@io_queue << :close
|
301
|
+
end
|
302
|
+
|
303
|
+
def blob_full? ( next_block = nil )
|
304
|
+
if next_block
|
305
|
+
BLOB_MAX_BLOCKS < @uploaded_block_ids.length + 1 || @@blob_max_events < @uploaded_events_count + next_block.events_count || @@blob_max_bytesize < @uploaded_bytesize + next_block.bytesize
|
306
|
+
else
|
307
|
+
BLOB_MAX_BLOCKS <= @uploaded_block_ids.length || @@blob_max_events <= @uploaded_events_count || @@blob_max_bytesize <= @uploaded_bytesize
|
308
|
+
end
|
309
|
+
end
|
310
|
+
|
311
|
+
def blob_empty?
|
312
|
+
@uploaded_block_ids.empty?
|
313
|
+
end
|
314
|
+
|
315
|
+
def queue_empty?
|
316
|
+
@io_queue.length == 0 if @io_queue
|
317
|
+
end
|
318
|
+
|
319
|
+
|
320
|
+
def queue_size
|
321
|
+
@io_queue.length
|
322
|
+
end
|
323
|
+
|
324
|
+
def clear_state
|
325
|
+
@action = nil
|
326
|
+
@storage_account_name = nil
|
327
|
+
@container_name = nil
|
328
|
+
@blob_name = nil
|
329
|
+
@uploaded_block_ids = [ ]
|
330
|
+
@uploaded_block_numbers = [ ]
|
331
|
+
@uploaded_events_count = 0
|
332
|
+
@uploaded_bytesize = 0
|
333
|
+
@oldest_event_time = nil
|
334
|
+
end
|
335
|
+
|
336
|
+
def table_entity_to_tuple( options = {} )
|
337
|
+
[ options[:start_time.to_s] || Time.now.utc, options[:action.to_s], options[:intrumentation_key.to_s], options[:table_id.to_s],
|
338
|
+
options[:storage_account_name.to_s], options[:container_name.to_s], options[:blob_name.to_s],
|
339
|
+
eval( options[:uploaded_block_ids.to_s] ), eval( options[:uploaded_block_numbers.to_s] ),
|
340
|
+
options[:uploaded_events_count.to_s] || 0, options[:uploaded_bytesize.to_s] || 0, options[:oldest_event_time.to_s] || Time.now.utc,
|
341
|
+
options[:event_format_ext.to_s], options[:blob_max_delay.to_s] || 0,
|
342
|
+
options[:log_state.to_s].to_sym, (options[:sub_state.to_s] || :none).to_sym
|
343
|
+
]
|
344
|
+
end
|
345
|
+
|
346
|
+
def state_to_tuple
|
347
|
+
[ @start_time || Time.now.utc, @action, @intrumentation_key, @table_id,
|
348
|
+
@storage_account_name, @container_name, @blob_name,
|
349
|
+
@uploaded_block_ids, @uploaded_block_numbers,
|
350
|
+
@uploaded_events_count, @uploaded_bytesize, @oldest_event_time,
|
351
|
+
@event_format_ext, @blob_max_delay,
|
352
|
+
@log_state, @sub_state
|
353
|
+
]
|
354
|
+
end
|
355
|
+
|
356
|
+
def tuple_to_state ( tuple )
|
357
|
+
( @start_time, @action, @intrumentation_key, @table_id,
|
358
|
+
@storage_account_name, @container_name, @blob_name,
|
359
|
+
@uploaded_block_ids, @uploaded_block_numbers,
|
360
|
+
@uploaded_events_count, @uploaded_bytesize, @oldest_event_time,
|
361
|
+
@event_format_ext, @blob_max_delay,
|
362
|
+
@log_state, @sub_state) = tuple
|
363
|
+
end
|
364
|
+
|
365
|
+
def state_to_table_entity
|
366
|
+
{ :start_time => @start_time, :intrumentation_key => @intrumentation_key, :table_id => @table_id,
|
367
|
+
:storage_account_name => @storage_account_name, :container_name => @container_name, :blob_name => @blob_name,
|
368
|
+
:uploaded_block_ids => @uploaded_block_ids.to_s, :uploaded_block_numbers => @uploaded_block_numbers.to_s,
|
369
|
+
:uploaded_events_count => @uploaded_events_count, :uploaded_bytesize => @uploaded_bytesize, :oldest_event_time => @oldest_event_time,
|
370
|
+
:log_state => @log_state, :sub_state => @sub_state
|
371
|
+
}
|
372
|
+
end
|
373
|
+
|
374
|
+
|
375
|
+
def test_storage_recover
|
376
|
+
proc do |reason, e| @recovery = :ok if :container_exist == reason || :create_container == reason end
|
377
|
+
end
|
378
|
+
|
379
|
+
|
380
|
+
def test_storage ( storage_account_name )
|
381
|
+
@storage_account_name = storage_account_name
|
382
|
+
@action = :test_storage
|
383
|
+
@max_tries = 1
|
384
|
+
@force_client = true # to enable get a client even if all storage_accounts marked dead
|
385
|
+
@recoverable = [ :invalid_storage_key ]
|
386
|
+
storage_io_block( test_storage_recover ) {
|
387
|
+
if @recovery.nil? || :invalid_storage_key == @recovery
|
388
|
+
container_name = "logstash-test-container"
|
389
|
+
@info = "#{@action} #{@storage_account_name}"
|
390
|
+
@client.blobClient.create_container( container_name ) unless @@configuration[:disable_blob_upload]
|
391
|
+
end
|
392
|
+
}
|
393
|
+
end
|
394
|
+
|
395
|
+
def test_notification_endpoint_recover
|
396
|
+
proc do |reason, e| @recovery = :ok if :invalid_intrumentation_key == reason || :invalid_table_id == reason end
|
397
|
+
end
|
398
|
+
|
399
|
+
def test_notification_endpoint( storage_account_name )
|
400
|
+
@storage_account_name = storage_account_name
|
401
|
+
@action = :test_notification_endpoint
|
402
|
+
@max_tries = 1
|
403
|
+
@force_client = true # to enable get a client even if all storage_accounts marked dead
|
404
|
+
@recoverable = [ ]
|
405
|
+
success = storage_io_block( test_notification_endpoint_recover ) {
|
406
|
+
if @recovery.nil?
|
407
|
+
@container_name = "logstash-test-container"
|
408
|
+
@blob_name = "logstash-test-blob"
|
409
|
+
@table_id = GUID_NULL
|
410
|
+
@intrumentation_key = GUID_NULL
|
411
|
+
@info = "#{@action}"
|
412
|
+
set_blob_sas_url
|
413
|
+
payload = create_payload
|
414
|
+
post_notification( @client.notifyClient, payload )
|
415
|
+
end
|
416
|
+
}
|
417
|
+
sleep( 30 ) unless success
|
418
|
+
success
|
419
|
+
end
|
420
|
+
|
421
|
+
|
422
|
+
def notify_recover
|
423
|
+
proc do |reason, e|
|
424
|
+
if :notify_failed_blob_not_accessible == reason
|
425
|
+
@sub_state = reason
|
426
|
+
@@failed_on_notify_retry_Qs[@storage_account_name] << state_to_tuple
|
427
|
+
elsif :invalid_intrumentation_key == reason || :invalid_table_id == reason
|
428
|
+
@sub_state = reason
|
429
|
+
Channels.instance.channel( @intrumentation_key, @table_id ).failed_on_notify_retry_Q << state_to_tuple
|
430
|
+
|
431
|
+
else
|
432
|
+
@@endpoint_state_on = false
|
433
|
+
if :notify_failed_blob_not_accessible == @sub_state
|
434
|
+
@@failed_on_notify_retry_Qs[@storage_account_name] << state_to_tuple
|
435
|
+
elsif :invalid_intrumentation_key == @sub_state || :invalid_table_id == @sub_state
|
436
|
+
Channels.instance.channel( @intrumentation_key, @table_id ).failed_on_notify_retry_Q << state_to_tuple
|
437
|
+
else
|
438
|
+
@@failed_on_notification_endpoint_retry_Q << state_to_tuple
|
439
|
+
end
|
440
|
+
end
|
441
|
+
end
|
442
|
+
end
|
443
|
+
|
444
|
+
def notify ( tuple = nil )
|
445
|
+
tuple_to_state( tuple ) if tuple
|
446
|
+
@action = :notify
|
447
|
+
@force_client = true # to enable get a client even if all storage_accounts marked dead
|
448
|
+
@recoverable = [ :notify_failed_blob_not_accessible, :io_failure, :service_unavailable ]
|
449
|
+
success = storage_io_block( notify_recover ) {
|
450
|
+
set_blob_sas_url
|
451
|
+
payload = create_payload
|
452
|
+
@@logger.debug { "notification payload: #{payload}" }
|
453
|
+
@info = "#{@action.to_s} #{@storage_account_name}/#{@container_name}/#{@blob_name}, events: #{@uploaded_events_count}, size: #{@uploaded_bytesize}, blocks: #{@uploaded_block_numbers}, delay: #{Time.now.utc - @oldest_event_time}, blob_sas_url: #{@blob_sas_url}"
|
454
|
+
|
455
|
+
# assume that exceptions can be raised due to this method:
|
456
|
+
post_notification( @client.notifyClient, payload ) unless @@configuration[:disable_notification]
|
457
|
+
@log_state = :notified
|
458
|
+
}
|
459
|
+
log_to_table_update if success
|
460
|
+
end
|
461
|
+
|
462
|
+
CREATE_EXIST_ERRORS = { :container => [ :create_container, :container_exist ], :table => [ :create_table, :table_exist ] }
|
463
|
+
def create_exist_recovery( type, name = nil )
|
464
|
+
prev_info = @info
|
465
|
+
if CREATE_EXIST_ERRORS[type][0] == @recovery
|
466
|
+
name ||= ( :table == type ? @@state_table_name : @container_name )
|
467
|
+
@info = "create #{type} #{@storage_account_name}/#{name}"
|
468
|
+
|
469
|
+
# assume that exceptions can be raised due to this method:
|
470
|
+
yield name
|
471
|
+
@@logger.info { "Successed to #{@info}" }
|
472
|
+
@info = prev_info
|
473
|
+
elsif CREATE_EXIST_ERRORS[type][1] == @recovery
|
474
|
+
@@logger.info { "Successed (already exist) to #{@info}" }
|
475
|
+
@info = prev_info
|
476
|
+
end
|
477
|
+
end
|
478
|
+
|
479
|
+
def create_table_exist_recovery
|
480
|
+
create_exist_recovery( :table ) { |name| @client.tableClient.create_table( name ) }
|
481
|
+
end
|
482
|
+
|
483
|
+
def create_container_exist_recovery
|
484
|
+
create_exist_recovery( :container ) { |name| @client.blobClient.create_container( name ) }
|
485
|
+
end
|
486
|
+
|
487
|
+
# return true on success
|
488
|
+
def log_to_table_insert
|
489
|
+
@action = :log_to_table_insert
|
490
|
+
@recoverable = [ :invalid_storage_key, :io_failure, :service_unavailable, :table_exist, :create_table, :table_busy, :entity_exist ]
|
491
|
+
@info = "#{@action} #{@log_state} #{@storage_account_name}/#{@container_name}/#{@blob_name}"
|
492
|
+
success = storage_io_block( :uploading == @log_state ? proc do |reason, e| end : log_to_table_update_recover ) {
|
493
|
+
create_table_exist_recovery
|
494
|
+
if :entity_exist == @recovery
|
495
|
+
raise NotRecoverableError if :uploading == @log_state
|
496
|
+
else
|
497
|
+
entity_values = state_to_table_entity
|
498
|
+
entity_values[:PartitionKey] = "#{@@configuration[:azure_storage_blob_prefix]}-#{@log_state}"
|
499
|
+
entity_values[:RowKey] = @blob_name
|
500
|
+
@client.tableClient.insert_entity( @@state_table_name, entity_values )
|
501
|
+
end
|
502
|
+
}
|
503
|
+
end
|
504
|
+
|
505
|
+
def log_to_table_update_recover
|
506
|
+
proc do |reason, e| @@failed_on_log_to_table_retry_Qs[@storage_account_name] << state_to_tuple end
|
507
|
+
end
|
508
|
+
|
509
|
+
def log_to_table_update ( tuple = nil )
|
510
|
+
tuple_to_state( tuple ) if tuple
|
511
|
+
if :uploading == @log_state
|
512
|
+
log_to_table_delete
|
513
|
+
elsif :committed == @log_state
|
514
|
+
if log_to_table_insert && log_to_table_delete( nil, :uploading )
|
515
|
+
State.instance.dec_pending_commits
|
516
|
+
State.instance.inc_pending_notifications
|
517
|
+
@@failed_on_notification_endpoint_retry_Q << state_to_tuple
|
518
|
+
end
|
519
|
+
elsif :notified == @log_state
|
520
|
+
if (!@@save_notified_blobs_records || log_to_table_insert) && log_to_table_delete( nil, :committed )
|
521
|
+
State.instance.dec_pending_notifications
|
522
|
+
end
|
523
|
+
end
|
524
|
+
end
|
525
|
+
|
526
|
+
|
527
|
+
# retturn tru on success
|
528
|
+
def log_to_table_delete ( tuple = nil, state = nil )
|
529
|
+
tuple_to_state( tuple ) if tuple
|
530
|
+
state ||= @log_state
|
531
|
+
@action = :log_to_table_delete
|
532
|
+
@recoverable = [ :invalid_storage_key, :io_failure, :service_unavailable, :table_exist, :create_table, :table_busy, :create_resource ]
|
533
|
+
@info = "#{@action} #{state} #{@storage_account_name}/#{@container_name}/#{@blob_name}"
|
534
|
+
|
535
|
+
success = storage_io_block( log_to_table_update_recover ) {
|
536
|
+
create_table_exist_recovery
|
537
|
+
if :create_resource == @recovery
|
538
|
+
@@logger.info { "Note: delete entity failed, already deleted, #{@info}, state: #{state}, log_state: #{@log_state}" }
|
539
|
+
else
|
540
|
+
@client.tableClient.delete_entity( @@state_table_name, "#{@@configuration[:azure_storage_blob_prefix]}-#{state}", @blob_name )
|
541
|
+
end
|
542
|
+
}
|
543
|
+
end
|
544
|
+
|
545
|
+
# return entities
|
546
|
+
def log_to_table_query ( storage_account_name, filter , token )
|
547
|
+
@storage_account_name = storage_account_name
|
548
|
+
|
549
|
+
@action = :log_to_table_query
|
550
|
+
@recoverable = [ :invalid_storage_key, :io_failure, :service_unavailable, :table_exist, :create_table, :table_busy ]
|
551
|
+
@info = "#{@action} #{@storage_account_name}/#{@@state_table_name}"
|
552
|
+
|
553
|
+
entities = nil
|
554
|
+
success = storage_io_block( proc do |reason, e| end ) {
|
555
|
+
create_table_exist_recovery
|
556
|
+
options = { :filter => filter }
|
557
|
+
options[:continuation_token] = token if token
|
558
|
+
entities = @client.tableClient.query_entities( @@state_table_name, options )
|
559
|
+
}
|
560
|
+
entities
|
561
|
+
end
|
562
|
+
|
563
|
+
def commit_recover
|
564
|
+
proc do |reason, e| @@failed_on_commit_retry_Qs[@storage_account_name] << state_to_tuple end
|
565
|
+
end
|
566
|
+
|
567
|
+
def commit ( tuple = nil )
|
568
|
+
tuple_to_state( tuple ) if tuple
|
569
|
+
|
570
|
+
unless @uploaded_block_ids.empty?
|
571
|
+
@action = :commit
|
572
|
+
@recoverable = [ :invalid_storage_key, :io_failure, :service_unavailable ]
|
573
|
+
success = storage_io_block( commit_recover ) {
|
574
|
+
@info = "#{@action.to_s} #{@storage_account_name}/#{@container_name}/#{@blob_name}, events: #{@uploaded_events_count}, size: #{@uploaded_bytesize}, blocks: #{@uploaded_block_numbers}, delay: #{Time.now.utc - @oldest_event_time}"
|
575
|
+
# assume that exceptions can be raised due to this method:
|
576
|
+
@client.blobClient.commit_blob_blocks( @container_name, @blob_name, @uploaded_block_ids ) unless @@configuration[:disable_blob_upload]
|
577
|
+
@log_state = :committed
|
578
|
+
}
|
579
|
+
# next stage
|
580
|
+
log_to_table_update if success
|
581
|
+
end
|
582
|
+
end
|
583
|
+
|
584
|
+
|
585
|
+
def upload_recover
|
586
|
+
proc do |reason, e|
|
587
|
+
unless @uploaded_block_ids.empty?
|
588
|
+
info1 = "#{:commit} #{@storage_account_name}/#{@container_name}/#{@blob_name}, events: #{@uploaded_events_count}, size: #{@uploaded_bytesize}, blocks: #{@uploaded_block_numbers}, delay: #{Time.now.utc - @oldest_event_time}"
|
589
|
+
@@logger.error { "Pospone to #{info1} (; retry later, error: #{e.inspect}" }
|
590
|
+
@@failed_on_commit_retry_Qs[@storage_account_name] << state_to_tuple
|
591
|
+
@uploaded_block_ids = [ ]
|
592
|
+
end
|
593
|
+
unless :io_all_dead == reason
|
594
|
+
@recovery = :invalid_storage_account
|
595
|
+
else
|
596
|
+
Channels.instance.channel( @intrumentation_key, @table_id ).failed_on_upload_retry_Q << @block_to_upload
|
597
|
+
@block_to_upload = nil
|
598
|
+
end
|
599
|
+
end
|
600
|
+
end
|
601
|
+
|
602
|
+
def upload ( block, to_commit = nil )
|
603
|
+
@storage_account_name = nil if @uploaded_block_ids.empty?
|
604
|
+
@block_to_upload = block
|
605
|
+
block = nil # remove reference for GC
|
606
|
+
exclude_storage_account_names = [ ]
|
607
|
+
begin
|
608
|
+
if @uploaded_block_ids.empty?
|
609
|
+
@log_state = :uploading
|
610
|
+
@uploaded_block_numbers = [ ]
|
611
|
+
@uploaded_bytesize = 0
|
612
|
+
@uploaded_events_count = 0
|
613
|
+
@oldest_event_time = nil
|
614
|
+
|
615
|
+
# remove record of previous upload that failed
|
616
|
+
if @storage_account_name
|
617
|
+
exclude_storage_account_names << @storage_account_name
|
618
|
+
@@failed_on_log_to_table_retry_Qs[@storage_account_name] << state_to_tuple
|
619
|
+
end
|
620
|
+
set_conatainer_and_blob_names
|
621
|
+
@storage_account_name = Clients.instance.get_random_active_storage( exclude_storage_account_names )
|
622
|
+
unless @storage_account_name
|
623
|
+
upload_recover.call( :io_all_dead, nil )
|
624
|
+
return false
|
625
|
+
end
|
626
|
+
raise UploadRetryError unless log_to_table_insert
|
627
|
+
end
|
628
|
+
|
629
|
+
@action = :upload
|
630
|
+
@block_info = "blocks: #{@block_to_upload.block_numbers}, events: #{@block_to_upload.events_count}, size: #{@block_to_upload.bytes.length}"
|
631
|
+
@info = "#{@action} #{@storage_account_name}/#{@container_name}/#{@blob_name}, #{@block_info}, commitId: [\"#{100001 + @uploaded_block_ids.length}\"]"
|
632
|
+
@recoverable = [ :invalid_storage_key, :invalid_storage_account, :io_failure, :service_unavailable, :container_exist, :create_container ]
|
633
|
+
|
634
|
+
success = storage_io_block( upload_recover ) {
|
635
|
+
create_container_exist_recovery
|
636
|
+
block_id = "#{100001 + @uploaded_block_ids.length}"
|
637
|
+
|
638
|
+
# assume that exceptions can be raised due to this method:
|
639
|
+
@client.blobClient.put_blob_block( @container_name, @blob_name, block_id, @block_to_upload.bytes ) unless @@configuration[:disable_blob_upload]
|
640
|
+
|
641
|
+
# upload success
|
642
|
+
first_block_in_blob = @uploaded_block_ids.empty?
|
643
|
+
@uploaded_block_ids << [ block_id ]
|
644
|
+
@uploaded_block_numbers.concat( @block_to_upload.block_numbers )
|
645
|
+
@uploaded_bytesize += @block_to_upload.bytes.length
|
646
|
+
@uploaded_events_count += @block_to_upload.events_count
|
647
|
+
@oldest_event_time ||= @block_to_upload.oldest_event_time
|
648
|
+
|
649
|
+
# release memory
|
650
|
+
bytesize = @block_to_upload.bytesize
|
651
|
+
@block_to_upload.dispose
|
652
|
+
@block_to_upload = nil
|
653
|
+
State.instance.inc_pending_commits if first_block_in_blob
|
654
|
+
State.instance.dec_upload_bytesize( bytesize )
|
655
|
+
Telemetry.instance.track_event("uploading", {:properties => state_to_table_entity})
|
656
|
+
}
|
657
|
+
|
658
|
+
raise UploadRetryError if :invalid_storage_account == @recovery
|
659
|
+
commit if success && to_commit
|
660
|
+
rescue UploadRetryError
|
661
|
+
@recovery = nil
|
662
|
+
retry
|
663
|
+
end
|
664
|
+
end
|
665
|
+
|
666
|
+
def update_commited_or_uncommited_list( table_entity )
|
667
|
+
tuple = table_entity_to_tuple( table_entity )
|
668
|
+
|
669
|
+
tuple_to_state( tuple )
|
670
|
+
@action = :list_blob_blocks
|
671
|
+
@recoverable = [ :invalid_storage_key, :io_failure, :service_unavailable, :container_exist, :create_container, :create_blob ]
|
672
|
+
list_blob_blocks = nil
|
673
|
+
success = storage_io_block( proc do |reason, e| end ) {
|
674
|
+
@info = "#{@action} #{@storage_account_name}/#{@container_name}/#{@blob_name}"
|
675
|
+
|
676
|
+
create_container_exist_recovery
|
677
|
+
if :create_blob == @recovery
|
678
|
+
list_blob_blocks = { :uncommitted => [ ], :committed => [ ] }
|
679
|
+
else
|
680
|
+
list_blob_blocks = @client.blobClient.list_blob_blocks( @container_name, @blob_name, { :blocklist_type => :all } ) unless :create_blob == @recovery
|
681
|
+
end
|
682
|
+
}
|
683
|
+
|
684
|
+
if list_blob_blocks
|
685
|
+
blocks = ( list_blob_blocks[:uncommitted].empty? ? list_blob_blocks[:committed] : list_blob_blocks[:uncommitted] )
|
686
|
+
blocks.each do |block|
|
687
|
+
@uploaded_block_ids << [ block.name ]
|
688
|
+
@uploaded_bytesize += block.size
|
689
|
+
end
|
690
|
+
type = ( blocks.empty? || 0 == @uploaded_bytesize ? :upload_empty : blocks[0].type )
|
691
|
+
|
692
|
+
@log_state = :committed if :committed == type
|
693
|
+
{ type => state_to_tuple }
|
694
|
+
else
|
695
|
+
nil
|
696
|
+
end
|
697
|
+
end
|
698
|
+
|
699
|
+
|
700
|
+
def << ( block )
|
701
|
+
@io_queue << block
|
702
|
+
end
|
703
|
+
|
704
|
+
|
705
|
+
private
|
706
|
+
|
707
|
+
|
708
|
+
def storage_io_block( recover_later_proc, valid_recovery = nil )
|
709
|
+
@recovery = nil
|
710
|
+
@try_count = 1
|
711
|
+
|
712
|
+
begin
|
713
|
+
@client ||= Client.new( @storage_account_name, @force_client )
|
714
|
+
yield
|
715
|
+
disabled = :notify == @action ? @@configuration[:disable_notification] : @@configuration[:disable_blob_upload]
|
716
|
+
@@logger.info { "Successed to #{disabled ? 'DISABLED ' : ''}#{@info}" }
|
717
|
+
true
|
718
|
+
|
719
|
+
rescue TypeError
|
720
|
+
raise
|
721
|
+
|
722
|
+
rescue StandardError => e
|
723
|
+
@last_io_exception = e
|
724
|
+
@recovery = nil
|
725
|
+
retry if recover_retry?( e, recover_later_proc )
|
726
|
+
false
|
727
|
+
|
728
|
+
ensure
|
729
|
+
@client = @client.dispose if @client
|
730
|
+
end
|
731
|
+
end
|
732
|
+
|
733
|
+
|
734
|
+
def recover_retry? ( e, recover_later_proc )
|
735
|
+
# http error, probably server error
|
736
|
+
if e.is_a?( Azure::Core::Http::HTTPError )
|
737
|
+
|
738
|
+
if 404 == e.status_code && "ContainerNotFound" == e.type
|
739
|
+
@recovery = :create_container
|
740
|
+
|
741
|
+
elsif 404 == e.status_code && "TableNotFound" == e.type
|
742
|
+
@recovery = :create_table
|
743
|
+
|
744
|
+
elsif 404 == e.status_code && "BlobNotFound" == e.type
|
745
|
+
@recovery = :create_blob
|
746
|
+
|
747
|
+
elsif 404 == e.status_code && "ResourceNotFound" == e.type
|
748
|
+
@recovery = :create_resource
|
749
|
+
|
750
|
+
elsif 409 == e.status_code && "ContainerAlreadyExists" == e.type
|
751
|
+
@recovery = :container_exist
|
752
|
+
|
753
|
+
elsif 409 == e.status_code && "BlobAlreadyExists" == e.type
|
754
|
+
@recovery = :blob_exist
|
755
|
+
|
756
|
+
elsif 409 == e.status_code && "TableAlreadyExists" == e.type
|
757
|
+
@recovery = :table_exist
|
758
|
+
|
759
|
+
elsif 409 == e.status_code && "TableBeingDeleted" == e.type
|
760
|
+
@recovery = :table_busy
|
761
|
+
|
762
|
+
elsif 409 == e.status_code && "EntityAlreadyExists" == e.type
|
763
|
+
@recovery = :entity_exist
|
764
|
+
|
765
|
+
elsif 403 == e.status_code && "AuthenticationFailed" == e.type
|
766
|
+
@recovery = :invalid_storage_key
|
767
|
+
|
768
|
+
elsif 403 == e.status_code && "Unknown" == e.type && e.description.include?("Blob does not exist or not accessible.")
|
769
|
+
@recovery = :notify_failed_blob_not_accessible
|
770
|
+
|
771
|
+
elsif 400 == e.status_code && "Unknown" == e.type && e.description.include?("Invalid instrumentation key")
|
772
|
+
@recovery = :invalid_intrumentation_key
|
773
|
+
|
774
|
+
elsif 500 == e.status_code && "Unknown" == e.type && e.description.include?("Processing error")
|
775
|
+
@recovery = :notification_process_down
|
776
|
+
|
777
|
+
elsif 503 == e.status_code
|
778
|
+
@recovery = :service_unavailable
|
779
|
+
elsif 404 == e.status_code
|
780
|
+
@recovery = :create_resource
|
781
|
+
elsif 403 == e.status_code
|
782
|
+
# todo, came from updating the log_table, how to hnadle this
|
783
|
+
@recovery = :access_denied
|
784
|
+
else
|
785
|
+
puts "\n>>>> HTTP error - #{e.inspect} <<<<\n"
|
786
|
+
@recovery = :http_unknown
|
787
|
+
raise e if @@configuration[:stop_on_unknown_io_errors]
|
788
|
+
end
|
789
|
+
|
790
|
+
# communication error
|
791
|
+
elsif e.is_a?( Faraday::ClientError )
|
792
|
+
@recovery = :io_failure
|
793
|
+
|
794
|
+
# communication error
|
795
|
+
elsif e.is_a?( IOError )
|
796
|
+
@recovery = :io_failure
|
797
|
+
|
798
|
+
# all storage accounts are dead, couldn't get client (internal exception)
|
799
|
+
elsif e.is_a?( StorageAccountsOffError )
|
800
|
+
@recovery = :io_all_dead
|
801
|
+
|
802
|
+
# all storage accounts are dead, couldn't get client (internal exception)
|
803
|
+
elsif e.is_a?( NotRecoverableError )
|
804
|
+
@recovery = :not_recoverable
|
805
|
+
|
806
|
+
elsif e.is_a?( NameError ) && e.message.include?( "uninitialized constant Azure::Core::Auth::Signer::OpenSSL" )
|
807
|
+
sleep( 1 )
|
808
|
+
@recovery = :io_failure
|
809
|
+
|
810
|
+
elsif e.is_a?( NameError ) && e.message.include?( "uninitialized constant Azure::Storage::Auth::SharedAccessSignature" )
|
811
|
+
sleep( 1 )
|
812
|
+
@recovery = :io_failure
|
813
|
+
|
814
|
+
else
|
815
|
+
# UNKNOWN error - #<NameError: uninitialized constant Azure::Core::Auth::Signer::OpenSSL>
|
816
|
+
puts "\n>>>> UNKNOWN error - #{e.inspect} <<<<\n"
|
817
|
+
raise e
|
818
|
+
|
819
|
+
end
|
820
|
+
|
821
|
+
reason = @recovery
|
822
|
+
if @recovery && @recoverable.include?( @recovery )
|
823
|
+
case @recovery
|
824
|
+
when :container_exist, :table_exist, :entity_exist, :create_container, :create_table
|
825
|
+
# ignore log error
|
826
|
+
# @@logger.error { "Failed to #{@info} ;( recovery: continue, error: #{e.inspect}" }
|
827
|
+
|
828
|
+
when :invalid_storage_key, :notify_failed_blob_not_accessible
|
829
|
+
if @client.switch_storage_account_key!
|
830
|
+
@@logger.error { "Failed to #{@info} ;( recovery: switched to secondary storage key, error: #{e.inspect}" }
|
831
|
+
else
|
832
|
+
@client = @client.dispose( :auth_to_storage_failed ) if @client && :invalid_storage_key == @recovery
|
833
|
+
@recovery = nil
|
834
|
+
end
|
835
|
+
|
836
|
+
when :table_busy
|
837
|
+
@client = @client.dispose if @client
|
838
|
+
sleep( @@io_retry_delay )
|
839
|
+
@@logger.error { "Failed to #{@info} ;( recovery: retry, error: #{e.inspect}" }
|
840
|
+
|
841
|
+
when :io_failure, :service_unavailable, :notification_process_down, :invalid_intrumentation_key, :invalid_table_id
|
842
|
+
if @try_count < @max_tries
|
843
|
+
@client = @client.dispose if @client
|
844
|
+
sleep( @@io_retry_delay )
|
845
|
+
@@logger.error { "Failed to #{@info} ;( recovery: retry, try #{@try_count} / #{@max_tries}, error: #{e.inspect}" }
|
846
|
+
@try_count += 1
|
847
|
+
else
|
848
|
+
if :invalid_intrumentation_key == @recovery
|
849
|
+
Channels.instance.mark_invalid_intrumentation_key( @intrumentation_key )
|
850
|
+
elsif :invalid_table_id == @recovery
|
851
|
+
Channels.instance.mark_invalid_table_id( @table_id )
|
852
|
+
elsif :io_failure == @recovery || ( :service_unavailable == @recovery && :notify != @action )
|
853
|
+
@client = @client.dispose( :io_to_storage_failed ) if @client
|
854
|
+
end
|
855
|
+
@recovery = nil
|
856
|
+
end
|
857
|
+
end
|
858
|
+
else
|
859
|
+
@recovery = nil
|
860
|
+
end
|
861
|
+
|
862
|
+
if @recovery
|
863
|
+
true
|
864
|
+
else
|
865
|
+
recover_later_proc.call( reason, e )
|
866
|
+
@@logger.error { "Failed to #{@info} ; retry later, error: #{e.inspect}" } unless :ok == @recovery
|
867
|
+
:ok == @recovery
|
868
|
+
end
|
869
|
+
|
870
|
+
# Blob service error codes - msdn.microsoft.com/en-us/library/azure/dd179439.aspx
|
871
|
+
# ConnectionFailed - problem with connection
|
872
|
+
# ParsingError - problem with request/response payload
|
873
|
+
# ResourceNotFound, SSLError, TimeoutError
|
874
|
+
end
|
875
|
+
|
876
|
+
def set_conatainer_and_blob_names
|
877
|
+
time_utc = Time.now.utc
|
878
|
+
id = @id.to_s.rjust(4, "0")
|
879
|
+
strtime = time_utc.strftime( "%F" )
|
880
|
+
@container_name = "#{@@configuration[:azure_storage_container_prefix]}-#{strtime}"
|
881
|
+
|
882
|
+
strtime = time_utc.strftime( "%F-%H-%M-%S-%L" )
|
883
|
+
@blob_name = "#{@@configuration[:azure_storage_blob_prefix]}_ikey-#{@intrumentation_key}_table-#{@table_id}_id-#{id}_#{strtime}.#{@event_format_ext}"
|
884
|
+
end
|
885
|
+
|
886
|
+
|
887
|
+
def create_payload
|
888
|
+
notification_hash = {
|
889
|
+
:data => {
|
890
|
+
:baseType => DATA_BASE_TYPE,
|
891
|
+
:baseData => {
|
892
|
+
:ver => BASE_DATA_REQUIRED_VERSION,
|
893
|
+
:blobSasUri => @blob_sas_url.to_s,
|
894
|
+
:sourceName => @table_id,
|
895
|
+
:sourceVersion => @@configuration[:notification_version].to_s
|
896
|
+
}
|
897
|
+
},
|
898
|
+
:ver => @@configuration[:notification_version],
|
899
|
+
:name => REQUEST_NAME,
|
900
|
+
:time => Time.now.utc.iso8601,
|
901
|
+
:iKey => @intrumentation_key
|
902
|
+
}
|
903
|
+
notification_hash.to_json
|
904
|
+
end
|
905
|
+
|
906
|
+
|
907
|
+
def post_notification ( http_client, body )
|
908
|
+
request = Azure::Core::Http::HttpRequest.new( :post, @@configuration[:notification_endpoint], { :body => body, :client => http_client } )
|
909
|
+
request.headers['Content-Type'] = 'application/json; charset=utf-8'
|
910
|
+
request.headers['Accept'] = 'application/json'
|
911
|
+
@@logger.debug { "send notification : \n endpoint: #{@@configuration[:notification_endpoint]}\n body : #{body}" }
|
912
|
+
response = request.call
|
913
|
+
end
|
914
|
+
|
915
|
+
|
916
|
+
def set_blob_sas_url
|
917
|
+
blob_url ="https://#{@storage_account_name}.blob.core.windows.net/#{@container_name}/#{@blob_name}"
|
918
|
+
options_and_constrains = {:permissions => "r", :resource => "b", :expiry => ( Time.now.utc + @@configuration[:blob_access_expiry_time] ).iso8601 }
|
919
|
+
@blob_sas_url = @client.storage_auth_sas.signed_uri( URI( blob_url ), options_and_constrains )
|
920
|
+
end
|
921
|
+
|
922
|
+
end
|
923
|
+
end
|