logstash-output-application_insights 0.1.3 → 0.1.4
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +29 -16
- data/lib/logstash/outputs/application_insights.rb +47 -62
- data/lib/logstash/outputs/application_insights/blob.rb +224 -383
- data/lib/logstash/outputs/application_insights/channel.rb +4 -4
- data/lib/logstash/outputs/application_insights/channels.rb +24 -24
- data/lib/logstash/outputs/application_insights/client.rb +10 -4
- data/lib/logstash/outputs/application_insights/config.rb +34 -27
- data/lib/logstash/outputs/application_insights/constants.rb +16 -6
- data/lib/logstash/outputs/application_insights/notification_recovery.rb +101 -0
- data/lib/logstash/outputs/application_insights/shutdown.rb +3 -0
- data/lib/logstash/outputs/application_insights/shutdown_recovery.rb +140 -0
- data/lib/logstash/outputs/application_insights/storage_cleanup.rb +7 -7
- data/lib/logstash/outputs/application_insights/storage_recovery.rb +104 -0
- data/lib/logstash/outputs/application_insights/utils.rb +17 -0
- data/lib/logstash/outputs/application_insights/validate_notification.rb +37 -0
- data/lib/logstash/outputs/application_insights/validate_storage.rb +41 -0
- data/lib/logstash/outputs/application_insights/version.rb +1 -1
- data/logstash-output-application-insights.gemspec +1 -1
- metadata +8 -3
@@ -22,7 +22,7 @@
|
|
22
22
|
class LogStash::Outputs::Application_insights
|
23
23
|
class Channel
|
24
24
|
|
25
|
-
attr_reader :
|
25
|
+
attr_reader :instrumentation_key
|
26
26
|
attr_reader :table_id
|
27
27
|
attr_reader :failed_on_upload_retry_Q
|
28
28
|
attr_reader :failed_on_notify_retry_Q
|
@@ -31,14 +31,14 @@ class LogStash::Outputs::Application_insights
|
|
31
31
|
|
32
32
|
public
|
33
33
|
|
34
|
-
def initialize (
|
34
|
+
def initialize ( instrumentation_key, table_id )
|
35
35
|
@closing = false
|
36
36
|
configuration = Config.current
|
37
37
|
|
38
38
|
@logger = configuration[:logger]
|
39
39
|
|
40
|
-
@logger.debug { "Create a new channel,
|
41
|
-
@
|
40
|
+
@logger.debug { "Create a new channel, instrumentation_key / table_id : #{instrumentation_key} / #{table_id}" }
|
41
|
+
@instrumentation_key = instrumentation_key
|
42
42
|
@table_id = table_id
|
43
43
|
set_table_properties( configuration )
|
44
44
|
@semaphore = Mutex.new
|
@@ -29,11 +29,11 @@ class LogStash::Outputs::Application_insights
|
|
29
29
|
|
30
30
|
@logger = configuration[:logger]
|
31
31
|
|
32
|
-
@
|
32
|
+
@instrumentation_key_table_id_db = {}
|
33
33
|
@channels = [ ]
|
34
34
|
@create_semaphore = Mutex.new
|
35
35
|
|
36
|
-
@
|
36
|
+
@default_instrumentation_key = configuration[:instrumentation_key]
|
37
37
|
@default_table_id = configuration[:table_id]
|
38
38
|
@tables = configuration[:tables]
|
39
39
|
|
@@ -46,29 +46,29 @@ class LogStash::Outputs::Application_insights
|
|
46
46
|
|
47
47
|
def receive ( event, encoded_event )
|
48
48
|
if LogStash::SHUTDOWN == event
|
49
|
-
@logger.info { "received a LogStash::SHUTDOWN event
|
49
|
+
@logger.info { "received a LogStash::SHUTDOWN event" }
|
50
50
|
|
51
51
|
elsif LogStash::FLUSH == event
|
52
|
-
@logger.info { "received a LogStash::FLUSH event
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
intrumentation_key = event[METADATA_FIELD_INSTRUMENTATION_KEY] || event[FIELD_INSTRUMENTATION_KEY] || ( @tables[table_id][TABLE_PROPERTY_INSTRUMENTATION_KEY] if @tables[table_id] ) || @default_intrumentation_key
|
52
|
+
@logger.info { "received a LogStash::FLUSH event" }
|
53
|
+
else
|
54
|
+
table_id = event[METADATA_FIELD_TABLE_ID] || event[FIELD_TABLE_ID] || @default_table_id
|
55
|
+
instrumentation_key = event[METADATA_FIELD_INSTRUMENTATION_KEY] || event[FIELD_INSTRUMENTATION_KEY] || ( @tables[table_id][:instrumentation_key] if @tables[table_id] ) || @default_instrumentation_key
|
57
56
|
|
58
|
-
|
59
|
-
|
57
|
+
@flow_control.pass_or_wait
|
58
|
+
channel( instrumentation_key, table_id ) << event
|
59
|
+
end
|
60
60
|
end
|
61
61
|
|
62
62
|
|
63
|
-
def channel (
|
63
|
+
def channel ( instrumentation_key, table_id )
|
64
64
|
begin
|
65
|
-
dispatch_channel(
|
65
|
+
dispatch_channel( instrumentation_key, table_id )
|
66
66
|
|
67
67
|
rescue NoChannelError
|
68
68
|
begin
|
69
|
-
create_channel(
|
69
|
+
create_channel( instrumentation_key, table_id )
|
70
70
|
rescue ChannelExistError # can happen due to race conditions
|
71
|
-
dispatch_channel(
|
71
|
+
dispatch_channel( instrumentation_key, table_id )
|
72
72
|
end
|
73
73
|
end
|
74
74
|
end
|
@@ -89,13 +89,13 @@ class LogStash::Outputs::Application_insights
|
|
89
89
|
private
|
90
90
|
|
91
91
|
# return channel
|
92
|
-
def dispatch_channel (
|
92
|
+
def dispatch_channel ( instrumentation_key, table_id )
|
93
93
|
begin
|
94
|
-
channel = @
|
95
|
-
channel.
|
94
|
+
channel = @instrumentation_key_table_id_db[instrumentation_key][table_id]
|
95
|
+
channel.instrumentation_key # don't remove it, it is to emit an exception in case channel not created yet'
|
96
96
|
channel
|
97
97
|
rescue => e
|
98
|
-
raise NoChannelError if @
|
98
|
+
raise NoChannelError if @instrumentation_key_table_id_db[instrumentation_key].nil? || @instrumentation_key_table_id_db[instrumentation_key][table_id].nil?
|
99
99
|
@logger.error { "Channel dispatch failed - error: #{e.inspect}" }
|
100
100
|
raise e
|
101
101
|
end
|
@@ -103,12 +103,12 @@ class LogStash::Outputs::Application_insights
|
|
103
103
|
|
104
104
|
|
105
105
|
# return channel
|
106
|
-
def create_channel (
|
106
|
+
def create_channel ( instrumentation_key, table_id )
|
107
107
|
@create_semaphore.synchronize {
|
108
|
-
raise ChannelExistError if @
|
109
|
-
@
|
110
|
-
channel = Channel.new(
|
111
|
-
@
|
108
|
+
raise ChannelExistError if @instrumentation_key_table_id_db[instrumentation_key] && @instrumentation_key_table_id_db[instrumentation_key][table_id]
|
109
|
+
@instrumentation_key_table_id_db[instrumentation_key] ||= {}
|
110
|
+
channel = Channel.new( instrumentation_key, table_id )
|
111
|
+
@instrumentation_key_table_id_db[instrumentation_key][table_id] = channel
|
112
112
|
@channels << channel
|
113
113
|
channel
|
114
114
|
}
|
@@ -122,7 +122,7 @@ class LogStash::Outputs::Application_insights
|
|
122
122
|
end
|
123
123
|
end
|
124
124
|
|
125
|
-
def
|
125
|
+
def mark_invalid_instrumentation_key ( instrumentation_key )
|
126
126
|
# TODO should go to lost and found container
|
127
127
|
end
|
128
128
|
|
@@ -98,12 +98,18 @@ class LogStash::Outputs::Application_insights
|
|
98
98
|
|
99
99
|
def set_current_storage_account_client
|
100
100
|
configuration = Config.current
|
101
|
-
|
102
|
-
|
101
|
+
storage_access_key = @storage_account[:keys][@current_storage_account_key_index]
|
102
|
+
|
103
|
+
options = {
|
104
|
+
:storage_account_name => @storage_account_name,
|
105
|
+
:storage_access_key => storage_access_key,
|
106
|
+
:storage_blob_host => "https://#{@storage_account_name}.#{:blob}.#{configuration[:azure_storage_host_suffix]}",
|
107
|
+
:storage_table_host => "https://#{@storage_account_name}.#{:table}.#{configuration[:azure_storage_host_suffix]}"
|
108
|
+
}
|
103
109
|
options[:ca_file] = configuration[:ca_file] unless configuration[:ca_file].empty?
|
104
|
-
@current_azure_storage_client = Azure::Storage::Client.new( options )
|
105
110
|
|
106
|
-
@
|
111
|
+
@current_azure_storage_client = Azure::Storage::Client.new( options )
|
112
|
+
@current_azure_storage_auth_sas = Azure::Storage::Auth::SharedAccessSignature.new( @storage_account_name, storage_access_key )
|
107
113
|
end
|
108
114
|
|
109
115
|
end
|
@@ -79,26 +79,25 @@ class LogStash::Outputs::Application_insights
|
|
79
79
|
configuration[config_name] = config_value
|
80
80
|
|
81
81
|
when :azure_storage_container_prefix
|
82
|
-
|
83
|
-
|
84
|
-
len = 63 - "
|
82
|
+
unless config_value.empty?
|
83
|
+
azure_storage_container_prefix = validate_and_adjust( config_name, config_value, String )
|
84
|
+
len = 63 - "#{AZURE_STORAGE_CONTAINER_LOGSTASH_PREFIX}--yyyy-mm-dd".length
|
85
85
|
validate_max( "azure_storage_container_prefix length", azure_storage_container_prefix.length, len )
|
86
|
-
azure_storage_container_prefix
|
86
|
+
azure_storage_container_prefix = azure_storage_container_prefix.downcase
|
87
|
+
container_name = "#{AZURE_STORAGE_CONTAINER_LOGSTASH_PREFIX}-#{azure_storage_container_prefix}-yyyy-mm-dd"
|
88
|
+
raise ConfigurationError, "#{config_name.to_s} must have only alphanumeric characters and dash, cannot start or end with a dash, and a dash cannot follow a dash" unless Utils.valid_container_name?( container_name )
|
89
|
+
configuration[config_name] = "-#{azure_storage_container_prefix}"
|
87
90
|
end
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
when :azure_storage_azure_storage_table_prefix
|
94
|
-
azure_storage_table_prefix = validate_and_adjust( config_name, config_value, String )
|
95
|
-
unless azure_storage_table_prefix.empty?
|
96
|
-
len = 63 - "-#{AZURE_STORAGE_TABLE_LOGSTASH_PREFIX}yyyymmdd".length
|
91
|
+
|
92
|
+
when :azure_storage_table_prefix
|
93
|
+
unless config_value.empty?
|
94
|
+
azure_storage_table_prefix = validate_and_adjust( config_name, config_value, String )
|
95
|
+
len = 63 - "#{AZURE_STORAGE_TABLE_LOGSTASH_PREFIX}yyyymmdd".length
|
97
96
|
validate_max( "azure_storage_table_prefix length", azure_storage_table_prefix.length, len )
|
97
|
+
table_name = "#{AZURE_STORAGE_TABLE_LOGSTASH_PREFIX}#{azure_storage_table_prefix}yyyymmdd"
|
98
|
+
raise ConfigurationError, "#{config_name} must have only alphanumeric" unless Utils.valid_table_name?( table_name )
|
99
|
+
configuration[config_name] = azure_storage_table_prefix
|
98
100
|
end
|
99
|
-
table_name = "#{azure_storage_table_prefix}#{AZURE_STORAGE_TABLE_LOGSTASH_PREFIX}yyyymmdd"
|
100
|
-
raise ConfigurationError, "#{config_name} must have only alphanumeric" unless Utils.valid_table_name?( table_name )
|
101
|
-
configuration[config_name] = azure_storage_table_prefix + AZURE_STORAGE_TABLE_LOGSTASH_PREFIX
|
102
101
|
|
103
102
|
when :ca_file
|
104
103
|
config_value = validate_and_adjust( config_name, config_value, String )
|
@@ -107,17 +106,21 @@ class LogStash::Outputs::Application_insights
|
|
107
106
|
end
|
108
107
|
configuration[config_name] = validate_and_adjust( config_name, config_value, String )
|
109
108
|
|
109
|
+
when :azure_storage_host_suffix
|
110
|
+
config_value = validate_and_adjust( config_name, config_value, String )
|
111
|
+
unless config_value.empty?
|
112
|
+
raise ConfigurationError, "#{config_name} must have a valid host DNS address" unless Utils.dns_address?( config_value )
|
113
|
+
end
|
114
|
+
configuration[config_name] = validate_and_adjust( config_name, config_value, String )
|
115
|
+
|
110
116
|
when :azure_storage_blob_prefix
|
111
|
-
|
112
|
-
|
113
|
-
len = 1024 - "
|
117
|
+
unless config_value.empty?
|
118
|
+
azure_storage_blob_prefix = validate_and_adjust( config_name, config_value, String )
|
119
|
+
len = 1024 - "#{AZURE_STORAGE_BLOB_LOGSTASH_PREFIX}//ikey-#{INSTRUMENTATION_KEY_TEMPLATE}/table-#{TABLE_ID_TEMPLATE}/yyyy-mm-dd-HH-MM-SS-LLL_0000.json".length
|
114
120
|
validate_max( "azure_storage_blob_prefix length", azure_storage_blob_prefix.length, len )
|
115
|
-
|
121
|
+
raise ConfigurationError, "#{config_name.to_s} doesn't meet url format" unless Utils.url?( "http://storage/container/#{azure_storage_blob_prefix}_ikey-#{INSTRUMENTATION_KEY_TEMPLATE}_table-#{TABLE_ID_TEMPLATE}.json" )
|
122
|
+
configuration[config_name] = "/#{azure_storage_blob_prefix}"
|
116
123
|
end
|
117
|
-
azure_storage_blob_prefix += AZURE_STORAGE_BLOB_LOGSTASH_PREFIX
|
118
|
-
|
119
|
-
raise ConfigurationError, "#{config_name.to_s} doesn't meet url format" unless Utils.url?( "http://storage/container/#{azure_storage_blob_prefix}_ikey-#{INSTRUMENTATION_KEY_TEMPLATE}_table-#{TABLE_ID_TEMPLATE}.json" )
|
120
|
-
configuration[config_name] = azure_storage_blob_prefix
|
121
124
|
|
122
125
|
when :table_id
|
123
126
|
configuration[config_name] = validate_and_adjust_guid( config_name, config_value )
|
@@ -200,6 +203,10 @@ class LogStash::Outputs::Application_insights
|
|
200
203
|
end
|
201
204
|
}
|
202
205
|
validate_and_adjust_table_properties!( configuration, configuration )
|
206
|
+
|
207
|
+
configuration[:state_table_name] = "#{AZURE_STORAGE_TABLE_LOGSTASH_PREFIX}#{configuration[:azure_storage_table_prefix]}#{STATE_TABLE_NAME}"
|
208
|
+
configuration[:test_storage_container] = "#{AZURE_STORAGE_CONTAINER_LOGSTASH_PREFIX}#{configuration[:azure_storage_container_prefix]}-#{STORAGE_TEST_CONTAINER_NAME}"
|
209
|
+
configuration[:partition_key_prefix] = configuration[:azure_storage_blob_prefix].gsub( "/", "" )
|
203
210
|
@@configuration = configuration
|
204
211
|
end
|
205
212
|
|
@@ -220,8 +227,8 @@ class LogStash::Outputs::Application_insights
|
|
220
227
|
|
221
228
|
case property_name.downcase
|
222
229
|
|
223
|
-
when :
|
224
|
-
properties[:
|
230
|
+
when :instrumentation_key
|
231
|
+
properties[:instrumentation_key] = validate_and_adjust_guid( info, property_value )
|
225
232
|
when :blob_serialization
|
226
233
|
property_value = property_value.downcase
|
227
234
|
raise ConfigurationError, "#{info}, can be set to only one of the following values: #{VALID_EXT_EVENT_FORMAT}" unless VALID_EXT_EVENT_FORMAT.include?( property_value )
|
@@ -281,7 +288,7 @@ class LogStash::Outputs::Application_insights
|
|
281
288
|
def self.validate_and_adjust_ext ( property, ext, prefix )
|
282
289
|
ext = validate_and_adjust( property, ext, String )
|
283
290
|
raise ConfigurationError, "#{property.to_s} must be a valid extension string, have only alphanumeric, dash and underline characters" unless Utils.ext?( ext )
|
284
|
-
len = 1024 - "#{
|
291
|
+
len = 1024 - "#{AZURE_STORAGE_BLOB_LOGSTASH_PREFIX}#{prefix}/ikey-#{INSTRUMENTATION_KEY_TEMPLATE}/table-#{TABLE_ID_TEMPLATE}/yyyy-mm-dd-HH-MM-SS-LLL_0000".length
|
285
292
|
raise ConfigurationError, "#{property.to_s} length cannot be more than #{len} characters" unless ext.length <= len
|
286
293
|
ext
|
287
294
|
end
|
@@ -26,7 +26,8 @@ class LogStash::Outputs::Application_insights
|
|
26
26
|
:notification_version => @notification_version || DEFAULT_NOTIFICATION_VERSION,
|
27
27
|
:event_separator => @event_separator || DEFAULT_EVENT_SEPARATOR,
|
28
28
|
|
29
|
-
:
|
29
|
+
:azure_storage_host_suffix => @azure_storage_host_suffix || DEFAULT_AZURE_STORAGE_HOST_SUFFIX,
|
30
|
+
:application_insights_endpoint => @application_insights_endpoint || DEFAULT_APPLICATION_INSIGHTS_ENDPOINT,
|
30
31
|
:azure_storage_blob_prefix => @azure_storage_blob_prefix || DEFAULT_AZURE_STORAGE_BLOB_PREFIX || Utils.to_storage_name( Socket.gethostname.strip ) || "",
|
31
32
|
:azure_storage_container_prefix => @azure_storage_container_prefix || DEFAULT_AZURE_STORAGE_CONTAINER_PREFIX || Utils.to_storage_name( Socket.gethostname.strip ) || "",
|
32
33
|
:azure_storage_table_prefix => @azure_storage_table_prefix || DEFAULT_AZURE_STORAGE_TABLE_PREFIX || Utils.to_storage_name( Socket.gethostname.strip ) || "",
|
@@ -43,6 +44,9 @@ class LogStash::Outputs::Application_insights
|
|
43
44
|
:blob_retention_time => @blob_retention_time || DEFAULT_BLOB_RETENTION_TIME,
|
44
45
|
:blob_access_expiry_time => @blob_access_expiry_time || DEFAULT_BLOB_ACCESS_EXPIRY_TIME,
|
45
46
|
|
47
|
+
:validate_notification => @validate_notification || DEFAULT_VALIDATE_NOTIFICATION,
|
48
|
+
:validate_storage => @validate_storage || DEFAULT_VALIDATE_STORAGE,
|
49
|
+
|
46
50
|
:resurrect_delay => @resurrect_delay || DEFAULT_STORAGE_RESURRECT_DELAY,
|
47
51
|
:io_retry_delay => @io_retry_delay || DEFAULT_IO_RETRY_DELAY,
|
48
52
|
:io_max_retries => @io_max_retries || DEFAULT_IO_MAX_RETRIES,
|
@@ -61,7 +65,7 @@ class LogStash::Outputs::Application_insights
|
|
61
65
|
|
62
66
|
:tables => @tables || { },
|
63
67
|
:table_id => @table_id || DEFAULT_TABLE_ID,
|
64
|
-
:
|
68
|
+
:instrumentation_key => @instrumentation_key || DEFAULT_INSTRUMENTATION_KEY,
|
65
69
|
:table_columns => @table_columns,
|
66
70
|
:case_insensitive_columns => @case_insensitive_columns || DEFAULT_CASE_INSENSITIVE,
|
67
71
|
:serialized_event_field => @serialized_event_field,
|
@@ -75,7 +79,8 @@ class LogStash::Outputs::Application_insights
|
|
75
79
|
|
76
80
|
BOOLEAN_PROPERTIES = [ :disable_notification, :disable_blob_upload,
|
77
81
|
:stop_on_unknown_io_errors, :disable_telemetry,
|
78
|
-
:disable_cleanup, :delete_not_notified_blobs,
|
82
|
+
:disable_cleanup, :delete_not_notified_blobs,
|
83
|
+
:validate_notification, :validate_storage,
|
79
84
|
:save_notified_blobs_records, :case_insensitive_columns,
|
80
85
|
:table_columns, :serialized_event_field ]
|
81
86
|
|
@@ -135,12 +140,13 @@ class LogStash::Outputs::Application_insights
|
|
135
140
|
MIN_FLOW_CONTROL_DELAY = 0.1 # in seconds, 1 seconds, can be less than 1 seconds, like 0.5, 0.1
|
136
141
|
MAX_FLOW_CONTROL_DELAY = 0 # in seconds, 1 seconds, can be less than 1 seconds, like 0.5, 0.1
|
137
142
|
|
138
|
-
METADATA_FIELD_INSTRUMENTATION_KEY = "[@metadata]
|
143
|
+
METADATA_FIELD_INSTRUMENTATION_KEY = "[@metadata]instrumentation_key"
|
139
144
|
METADATA_FIELD_TABLE_ID = "[@metadata]table_id"
|
140
|
-
FIELD_INSTRUMENTATION_KEY = "
|
145
|
+
FIELD_INSTRUMENTATION_KEY = "instrumentation_key"
|
141
146
|
FIELD_TABLE_ID = "table_id"
|
142
147
|
|
143
148
|
STATE_TABLE_NAME = "BlobsState"
|
149
|
+
STORAGE_TEST_CONTAINER_NAME = "test-container"
|
144
150
|
|
145
151
|
AZURE_STORAGE_CONTAINER_LOGSTASH_PREFIX = "logstash" # lower case only, dash allowed
|
146
152
|
AZURE_STORAGE_BLOB_LOGSTASH_PREFIX = "logstash"
|
@@ -181,7 +187,8 @@ class LogStash::Outputs::Application_insights
|
|
181
187
|
DEFAULT_BLOB_RETENTION_TIME = 60 * 60 * 24 * 7 # in seconds - one week
|
182
188
|
DEFAULT_BLOB_ACCESS_EXPIRY_TIME = 60 * 60 * 24 * 1 # in seconds - one day
|
183
189
|
DEFAULT_STORAGE_RESURRECT_DELAY = 10
|
184
|
-
|
190
|
+
DEFAULT_APPLICATION_INSIGHTS_ENDPOINT = "https://dc.services.visualstudio.com/v2/track"
|
191
|
+
DEFAULT_AZURE_STORAGE_HOST_SUFFIX = "core.windows.net"
|
185
192
|
DEFAULT_NOTIFICATION_VERSION = 1
|
186
193
|
DEFAULT_DISABLE_NOTIFICATION = false
|
187
194
|
DEFAULT_DISABLE_BLOB_UPLOAD = false
|
@@ -193,6 +200,9 @@ class LogStash::Outputs::Application_insights
|
|
193
200
|
|
194
201
|
DEFAULT_CASE_INSENSITIVE = false
|
195
202
|
|
203
|
+
DEFAULT_VALIDATE_NOTIFICATION = false
|
204
|
+
DEFAULT_VALIDATE_STORAGE = false
|
205
|
+
|
196
206
|
DEFAULT_LOGGER_FILES = [ "logstash-output-application-insights.log" ]
|
197
207
|
DEFAULT_LOG_LEVEL = "INFO"
|
198
208
|
DEFAULT_LOGGER_PROGNAME = "AI"
|
@@ -0,0 +1,101 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
|
3
|
+
# ----------------------------------------------------------------------------------
|
4
|
+
# Logstash Output Application Insights
|
5
|
+
#
|
6
|
+
# Copyright (c) Microsoft Corporation
|
7
|
+
#
|
8
|
+
# All rights reserved.
|
9
|
+
#
|
10
|
+
# Licensed under the Apache License, Version 2.0 (the License);
|
11
|
+
# you may not use this file except in compliance with the License.
|
12
|
+
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
13
|
+
#
|
14
|
+
# Unless required by applicable law or agreed to in writing, software
|
15
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
16
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
17
|
+
#
|
18
|
+
# See the Apache Version 2.0 License for specific language governing
|
19
|
+
# permissions and limitations under the License.
|
20
|
+
# ----------------------------------------------------------------------------------
|
21
|
+
class LogStash::Outputs::Application_insights
|
22
|
+
class Notification_recovery
|
23
|
+
|
24
|
+
public
|
25
|
+
|
26
|
+
def initialize
|
27
|
+
configuration = Config.current
|
28
|
+
@logger = configuration[:logger]
|
29
|
+
@storage_account_name_key = configuration[:storage_account_name_key]
|
30
|
+
@queue = Queue.new
|
31
|
+
|
32
|
+
@closing = nil
|
33
|
+
@thread = nil
|
34
|
+
end
|
35
|
+
|
36
|
+
def start
|
37
|
+
@thread = recovery_thread
|
38
|
+
end
|
39
|
+
|
40
|
+
def recover_later ( tuple )
|
41
|
+
@notification_state_on = false
|
42
|
+
@queue << tuple
|
43
|
+
end
|
44
|
+
|
45
|
+
def enqueue ( tuple )
|
46
|
+
@queue << tuple
|
47
|
+
end
|
48
|
+
|
49
|
+
def close
|
50
|
+
@closing = true
|
51
|
+
# @thread.join
|
52
|
+
end
|
53
|
+
|
54
|
+
private
|
55
|
+
|
56
|
+
def stopped?
|
57
|
+
@closing
|
58
|
+
end
|
59
|
+
|
60
|
+
def init_queues ( storage_account_name_key, queues )
|
61
|
+
storage_account_name_key.each do |storage_account_name, storage_account_keys|
|
62
|
+
queues.each_key do |action|
|
63
|
+
queues[action][storage_account_name] = Queue.new
|
64
|
+
end
|
65
|
+
end
|
66
|
+
end
|
67
|
+
|
68
|
+
def recovery_thread
|
69
|
+
Thread.new do
|
70
|
+
blob = Blob.new
|
71
|
+
counter = Concurrent::AtomicFixnum.new(0)
|
72
|
+
|
73
|
+
loop do
|
74
|
+
tuple = @queue.pop
|
75
|
+
Stud.stoppable_sleep(Float::INFINITY, 1) { state_on?( blob ) && 10 > counter.value }
|
76
|
+
|
77
|
+
counter.increment
|
78
|
+
Thread.new( counter, tuple ) do |counter, tuple|
|
79
|
+
Blob.new.send( :notify, tuple )
|
80
|
+
counter.decrement
|
81
|
+
end
|
82
|
+
tuple = nil # release for GC
|
83
|
+
end
|
84
|
+
end
|
85
|
+
end
|
86
|
+
|
87
|
+
def state_on? ( blob )
|
88
|
+
@notification_state_on ||= blob.test_notification( @storage_account_name_key[0][0] )
|
89
|
+
end
|
90
|
+
|
91
|
+
public
|
92
|
+
|
93
|
+
@@instance = Notification_recovery.new
|
94
|
+
|
95
|
+
def self.instance
|
96
|
+
@@instance
|
97
|
+
end
|
98
|
+
|
99
|
+
private_class_method :new
|
100
|
+
end
|
101
|
+
end
|
@@ -48,7 +48,10 @@ class LogStash::Outputs::Application_insights
|
|
48
48
|
end
|
49
49
|
display_msg( "all events were uploaded to Azure storage" )
|
50
50
|
|
51
|
+
# close all blobs activity
|
51
52
|
Blob.close
|
53
|
+
|
54
|
+
# close all channels activity
|
52
55
|
@channels.close
|
53
56
|
|
54
57
|
# wait for all uploads to commit
|
@@ -0,0 +1,140 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
|
3
|
+
# ----------------------------------------------------------------------------------
|
4
|
+
# Logstash Output Application Insights
|
5
|
+
#
|
6
|
+
# Copyright (c) Microsoft Corporation
|
7
|
+
#
|
8
|
+
# All rights reserved.
|
9
|
+
#
|
10
|
+
# Licensed under the Apache License, Version 2.0 (the License);
|
11
|
+
# you may not use this file except in compliance with the License.
|
12
|
+
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
13
|
+
#
|
14
|
+
# Unless required by applicable law or agreed to in writing, software
|
15
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
16
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
17
|
+
#
|
18
|
+
# See the Apache Version 2.0 License for specific language governing
|
19
|
+
# permissions and limitations under the License.
|
20
|
+
# ----------------------------------------------------------------------------------
|
21
|
+
class LogStash::Outputs::Application_insights
|
22
|
+
class Shutdown_recovery
|
23
|
+
|
24
|
+
public
|
25
|
+
|
26
|
+
def initialize
|
27
|
+
configuration = Config.current
|
28
|
+
@logger = configuration[:logger]
|
29
|
+
@storage_account_name_key = configuration[:storage_account_name_key]
|
30
|
+
@partition_key_prefix =configuration[:azure_storage_blob_prefix].gsub( "/", "" )
|
31
|
+
|
32
|
+
@storage_recovery = Storage_recovery.instance
|
33
|
+
@notification_recovery = Notification_recovery.instance
|
34
|
+
|
35
|
+
@closing = nil
|
36
|
+
@threads = []
|
37
|
+
end
|
38
|
+
|
39
|
+
def start
|
40
|
+
@storage_account_name_key.each do |storage_account_name, storage_account_keys|
|
41
|
+
@threads << recovery_thread( storage_account_name, :uploading)
|
42
|
+
@threads << recovery_thread( storage_account_name, :committed)
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
46
|
+
def close
|
47
|
+
@closing = true
|
48
|
+
@threads.each do |thread|
|
49
|
+
thread.join
|
50
|
+
end
|
51
|
+
end
|
52
|
+
|
53
|
+
private
|
54
|
+
|
55
|
+
def stopped?
|
56
|
+
@closing
|
57
|
+
end
|
58
|
+
|
59
|
+
def recovery_thread( storage_account_name, state )
|
60
|
+
Thread.new( storage_account_name, state ) do |storage_account_name, state|
|
61
|
+
|
62
|
+
blob = Blob.new
|
63
|
+
|
64
|
+
committed_tuples = [ ]
|
65
|
+
uncommitted_tuples = [ ]
|
66
|
+
upload_empty_tuples = [ ]
|
67
|
+
token = nil
|
68
|
+
finished = false
|
69
|
+
filter = "#{:PartitionKey} eq '#{@partition_key_prefix}-#{state}'"
|
70
|
+
|
71
|
+
# should exit thread after fetching data from table, and submit recovery, the loop is only for case of failure
|
72
|
+
until finished || stopped? do
|
73
|
+
entities = blob.state_table_query( storage_account_name, filter, token )
|
74
|
+
if entities
|
75
|
+
token = entities.continuation_token
|
76
|
+
|
77
|
+
if :committed == state
|
78
|
+
entities.each do |entity|
|
79
|
+
State.instance.inc_pending_notifications
|
80
|
+
tuple = blob.table_entity_to_tuple( entity.properties )
|
81
|
+
@notification_recovery.enqueue( tuple )
|
82
|
+
end
|
83
|
+
|
84
|
+
elsif :uploading == state
|
85
|
+
# first tuples are collected, before send to queues, to make sure blob states don't change in between
|
86
|
+
entities.each do |entity|
|
87
|
+
typed_tuple = nil
|
88
|
+
until typed_tuple || stopped?
|
89
|
+
typed_tuple = blob.update_commited_or_uncommited_list( entity.properties )
|
90
|
+
Stud.stoppable_sleep(60, 1) { stopped? } unless typed_tuple
|
91
|
+
end
|
92
|
+
|
93
|
+
next if stopped?
|
94
|
+
|
95
|
+
if typed_tuple[:committed]
|
96
|
+
committed_tuples << typed_tuple[:committed]
|
97
|
+
elsif typed_tuple[:uncommitted]
|
98
|
+
uncommitted_tuples << typed_tuple[:uncommitted]
|
99
|
+
else
|
100
|
+
upload_empty_tuples << typed_tuple[:upload_empty]
|
101
|
+
end
|
102
|
+
end
|
103
|
+
end
|
104
|
+
|
105
|
+
next if token
|
106
|
+
|
107
|
+
committed_tuples.each do |tuple|
|
108
|
+
State.instance.inc_pending_commits
|
109
|
+
@storage_recovery.recover_later( tuple, :state_table_update, storage_account_name )
|
110
|
+
end
|
111
|
+
|
112
|
+
uncommitted_tuples.each do |tuple|
|
113
|
+
State.instance.inc_pending_commits
|
114
|
+
@storage_recovery.recover_later( tuple, :commit, storage_account_name )
|
115
|
+
end
|
116
|
+
|
117
|
+
upload_empty_tuples.each do |tuple|
|
118
|
+
@storage_recovery.recover_later( tuple, :state_table_update, storage_account_name )
|
119
|
+
end
|
120
|
+
|
121
|
+
finished = true
|
122
|
+
else
|
123
|
+
Stud.stoppable_sleep(60, 1) { stopped? }
|
124
|
+
end
|
125
|
+
end
|
126
|
+
@logger.info { "exit table recovery thread, storage: #{storage_account_name}, state: #{state}, entities: #{entities ? entities.length : nil}" }
|
127
|
+
end
|
128
|
+
end
|
129
|
+
|
130
|
+
public
|
131
|
+
|
132
|
+
@@instance = Shutdown_recovery.new
|
133
|
+
|
134
|
+
def self.instance
|
135
|
+
@@instance
|
136
|
+
end
|
137
|
+
|
138
|
+
private_class_method :new
|
139
|
+
end
|
140
|
+
end
|