logstash-output-application_insights 0.1.6 → 0.2.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (26) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +10 -2
  3. data/lib/logstash/outputs/application_insights.rb +13 -5
  4. data/lib/logstash/outputs/application_insights/blob.rb +27 -381
  5. data/lib/logstash/outputs/application_insights/block.rb +28 -21
  6. data/lib/logstash/outputs/application_insights/channel.rb +143 -48
  7. data/lib/logstash/outputs/application_insights/channels.rb +4 -3
  8. data/lib/logstash/outputs/application_insights/clients.rb +1 -1
  9. data/lib/logstash/outputs/application_insights/config.rb +3 -2
  10. data/lib/logstash/outputs/application_insights/constants.rb +9 -5
  11. data/lib/logstash/outputs/application_insights/context.rb +97 -0
  12. data/lib/logstash/outputs/application_insights/local_file.rb +113 -0
  13. data/lib/logstash/outputs/application_insights/notification.rb +116 -0
  14. data/lib/logstash/outputs/application_insights/notification_recovery.rb +5 -6
  15. data/lib/logstash/outputs/application_insights/shutdown_recovery.rb +3 -2
  16. data/lib/logstash/outputs/application_insights/state_table.rb +108 -0
  17. data/lib/logstash/outputs/application_insights/storage_cleanup.rb +4 -3
  18. data/lib/logstash/outputs/application_insights/storage_recovery.rb +10 -3
  19. data/lib/logstash/outputs/application_insights/test_notification.rb +3 -6
  20. data/lib/logstash/outputs/application_insights/test_storage.rb +1 -1
  21. data/lib/logstash/outputs/application_insights/upload_pipe.rb +285 -0
  22. data/lib/logstash/outputs/application_insights/validate_notification.rb +1 -1
  23. data/lib/logstash/outputs/application_insights/validate_storage.rb +1 -1
  24. data/lib/logstash/outputs/application_insights/version.rb +1 -1
  25. data/logstash-output-application-insights.gemspec +1 -1
  26. metadata +9 -4
@@ -73,6 +73,7 @@ class LogStash::Outputs::Application_insights
73
73
  :blob_serialization => @blob_serialization || DEFAULT_BLOB_SERIALIZATION,
74
74
  :csv_separator => @csv_separator || DEFAULT_CSV_SEPARATOR,
75
75
  :csv_default_value => @csv_default_value || DEFAULT_CSV_DEFAULT_VALUE,
76
+ :disable_compression => @disable_compression || DEFAULT_DISABLE_COMPRESSION,
76
77
 
77
78
  }
78
79
  end
@@ -82,6 +83,7 @@ class LogStash::Outputs::Application_insights
82
83
  :disable_cleanup, :delete_not_notified_blobs,
83
84
  :validate_notification, :validate_storage,
84
85
  :save_notified_blobs_records, :case_insensitive_columns,
86
+ :disable_compression,
85
87
  :table_columns, :serialized_event_field ]
86
88
 
87
89
  GUID_NULL = "00000000-0000-0000-0000-000000000000"
@@ -140,6 +142,9 @@ class LogStash::Outputs::Application_insights
140
142
  MIN_FLOW_CONTROL_DELAY = 0.1 # in seconds, 1 seconds, can be less than 1 seconds, like 0.5, 0.1
141
143
  MAX_FLOW_CONTROL_DELAY = 0 # in seconds, 1 seconds, can be less than 1 seconds, like 0.5, 0.1
142
144
 
145
+ MAX_CHANNEL_UPLOAD_PIPES = 40
146
+ CHANNEL_THRESHOLD_TO_ADD_UPLOAD_PIPE = 3 # not relevant for file upload mode
147
+
143
148
  METADATA_FIELD_INSTRUMENTATION_KEY = "[@metadata]instrumentation_key"
144
149
  METADATA_FIELD_TABLE_ID = "[@metadata]table_id"
145
150
  FIELD_INSTRUMENTATION_KEY = "instrumentation_key"
@@ -151,6 +156,7 @@ class LogStash::Outputs::Application_insights
151
156
  AZURE_STORAGE_CONTAINER_LOGSTASH_PREFIX = "logstash" # lower case only, dash allowed
152
157
  AZURE_STORAGE_BLOB_LOGSTASH_PREFIX = "logstash"
153
158
  AZURE_STORAGE_TABLE_LOGSTASH_PREFIX = "Logstash" # case sensitive, no dash
159
+ LOCAL_FS_FILE_PREFIX = "application_insights"
154
160
 
155
161
  AZURE_STORAGE_ORPHAN_BLOBS_CONTAINER_NAME = "orphan-blobs"
156
162
  AZURE_STORAGE_NOTIFIED_BLOBS_TABLE_NAME = "orphan-blobs"
@@ -174,14 +180,11 @@ class LogStash::Outputs::Application_insights
174
180
  DEFAULT_AZURE_STORAGE_BLOB_PREFIX = nil
175
181
  DEFAULT_AZURE_STORAGE_CONTAINER_PREFIX = nil
176
182
  DEFAULT_AZURE_STORAGE_TABLE_PREFIX = nil
177
- DEFAULT_JSON_EXT = EXT_EVENT_FORMAT_JSON
178
- DEFAULT_CSV_EXT = EXT_EVENT_FORMAT_CSV
179
-
180
183
 
181
184
  DEFAULT_BLOB_SERIALIZATION = EXT_EVENT_FORMAT_JSON
182
185
 
183
- DEFAULT_BLOB_MAX_BYTESIZE = 1 * 1024 * 1024 * 1024
184
- DEFAULT_BLOB_MAX_EVENTS = 256 * 1024 # 256 Kilo events
186
+ DEFAULT_BLOB_MAX_BYTESIZE = 4 * 1024 * 1024 * 1024 # 4 Giga bytes
187
+ DEFAULT_BLOB_MAX_EVENTS = 1000 * 1000 # 1,000,000
185
188
 
186
189
  DEFAULT_BLOB_MAX_DELAY = 60 # in seconds
187
190
  DEFAULT_BLOB_RETENTION_TIME = 60 * 60 * 24 * 7 # in seconds - one week
@@ -197,6 +200,7 @@ class LogStash::Outputs::Application_insights
197
200
  DEFAULT_DISABLE_CLEANUP = false
198
201
  DEFAULT_DELETE_NOT_NOTIFIED_BLOBS = false
199
202
  DEFAULT_SAVE_NOTIFIED_BLOBS_RECORDS = false
203
+ DEFAULT_DISABLE_COMPRESSION = false
200
204
 
201
205
  DEFAULT_CASE_INSENSITIVE = false
202
206
 
@@ -0,0 +1,97 @@
1
+ # encoding: utf-8
2
+
3
+ # ----------------------------------------------------------------------------------
4
+ # Logstash Output Application Insights
5
+ #
6
+ # Copyright (c) Microsoft Corporation
7
+ #
8
+ # All rights reserved.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the License);
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
13
+ #
14
+ # Unless required by applicable law or agreed to in writing, software
15
+ # distributed under the License is distributed on an "AS IS" BASIS,
16
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
+ #
18
+ # See the Apache Version 2.0 License for specific language governing
19
+ # permissions and limitations under the License.
20
+ # ----------------------------------------------------------------------------------
21
+
22
+ class LogStash::Outputs::Application_insights
23
+ class Context
24
+
25
+ def initialize ( tuple )
26
+ if tuple
27
+ tuple_to_context( tuple )
28
+ else
29
+ clear_context
30
+ end
31
+ end
32
+
33
+ def clear_context
34
+ @start_time = Time.now.utc
35
+ @action = nil
36
+ @instrumentation_key = nil
37
+ @table_id = nil
38
+ @storage_account_name = nil
39
+ @container_name = nil
40
+ @blob_name = nil
41
+ @uploaded_block_ids = [ ]
42
+ @uploaded_block_numbers = [ ]
43
+ @uploaded_events_count = 0
44
+ @uploaded_bytesize = 0
45
+ @oldest_event_time = nil
46
+ @event_format = nil
47
+ @blob_max_delay = nil
48
+ @log_state = nil
49
+ @sub_state = :none
50
+ @file_size = 0
51
+ end
52
+
53
+ def table_entity_to_tuple( options = {} )
54
+ [ options[:start_time.to_s] || Time.now.utc, options[:action.to_s], options[:instrumentation_key.to_s], options[:table_id.to_s],
55
+ options[:storage_account_name.to_s], options[:container_name.to_s], options[:blob_name.to_s],
56
+ eval( options[:uploaded_block_ids.to_s] ), eval( options[:uploaded_block_numbers.to_s] ),
57
+ options[:uploaded_events_count.to_s] || 0, options[:uploaded_bytesize.to_s] || 0, options[:oldest_event_time.to_s] || Time.now.utc,
58
+ options[:event_format.to_s], options[:blob_max_delay.to_s] || 0,
59
+ options[:log_state.to_s].to_sym, (options[:sub_state.to_s] || :none).to_sym, options[:file_size.to_s] || 0
60
+ ]
61
+ end
62
+
63
+ def table_entity_to_context ( table_entity )
64
+ tuple_to_context( table_entity_to_tuple( table_entity ) )
65
+ end
66
+
67
+
68
+ def context_to_tuple
69
+ [ @start_time || Time.now.utc, @action, @instrumentation_key, @table_id,
70
+ @storage_account_name, @container_name, @blob_name,
71
+ @uploaded_block_ids, @uploaded_block_numbers,
72
+ @uploaded_events_count, @uploaded_bytesize, @oldest_event_time,
73
+ @event_format, @blob_max_delay,
74
+ @log_state, @sub_state, @file_size
75
+ ]
76
+ end
77
+
78
+ def tuple_to_context ( tuple )
79
+ ( @start_time, @action, @instrumentation_key, @table_id,
80
+ @storage_account_name, @container_name, @blob_name,
81
+ @uploaded_block_ids, @uploaded_block_numbers,
82
+ @uploaded_events_count, @uploaded_bytesize, @oldest_event_time,
83
+ @event_format, @blob_max_delay,
84
+ @log_state, @sub_state, @file_size ) = tuple
85
+ end
86
+
87
+ def context_to_table_entity
88
+ { :start_time => @start_time, :instrumentation_key => @instrumentation_key, :table_id => @table_id,
89
+ :storage_account_name => @storage_account_name, :container_name => @container_name, :blob_name => @blob_name,
90
+ :uploaded_block_ids => @uploaded_block_ids.to_s, :uploaded_block_numbers => @uploaded_block_numbers.to_s,
91
+ :uploaded_events_count => @uploaded_events_count, :uploaded_bytesize => @uploaded_bytesize, :oldest_event_time => @oldest_event_time,
92
+ :log_state => @log_state, :sub_state => @sub_state, :file_size => @file_size
93
+ }
94
+ end
95
+
96
+ end
97
+ end
@@ -0,0 +1,113 @@
1
+ # encoding: utf-8
2
+
3
+ # ----------------------------------------------------------------------------------
4
+ # Logstash Output Application Insights
5
+ #
6
+ # Copyright (c) Microsoft Corporation
7
+ #
8
+ # All rights reserved.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the License);
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
13
+ #
14
+ # Unless required by applicable law or agreed to in writing, software
15
+ # distributed under the License is distributed on an "AS IS" BASIS,
16
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
+ #
18
+ # See the Apache Version 2.0 License for specific language governing
19
+ # permissions and limitations under the License.
20
+ # ----------------------------------------------------------------------------------
21
+
22
+ class LogStash::Outputs::Application_insights
23
+ class Local_file
24
+
25
+ attr_reader :bytesize
26
+ attr_reader :events_count
27
+ attr_reader :oldest_event_time
28
+
29
+ attr_reader :file_size
30
+
31
+ public
32
+
33
+ def initialize ( filename, is_gzip_file )
34
+ @file_name = filename
35
+ @writer = write_file = File.new( @file_name, File::RDWR|File::CREAT )
36
+ @writer = Zlib::GzipWriter.new( write_file ) if is_gzip_file
37
+ @read_file = nil
38
+ @bytesize = 0
39
+ @events_count = 0
40
+ @first_block_number = nil
41
+ @next_block_number = nil
42
+
43
+ end
44
+
45
+ def seal
46
+ if @writer
47
+ @writer.close
48
+ @writer = nil
49
+ end
50
+ end
51
+
52
+
53
+ def close_read
54
+ @read_file.close if @read_file
55
+ @read_file = nil
56
+ end
57
+
58
+
59
+ def get_next_block
60
+ block = Block.new
61
+ block.done_time = @done_time
62
+ block.oldest_event_time = @oldest_event_time
63
+
64
+ unless @read_file
65
+ @read_file = File.new( @file_name, File::RDWR )
66
+ @file_size = @read_file.size
67
+ @blocks_num = ( @file_size + BLOB_BLOCK_MAX_BYTESIZE - 1 ) / BLOB_BLOCK_MAX_BYTESIZE
68
+ @events_per_block = @events_count / @blocks_num
69
+ block.events_count = @events_per_block + ( @events_count % @blocks_num )
70
+ @next_block_number = @first_block_number ||= Block.generate_block_number
71
+ block.block_numbers = [ @first_block_number ]
72
+ else
73
+ block.block_numbers = [ @next_block_number ]
74
+ block.events_count = @events_per_block
75
+ end
76
+ @next_block_number += 1
77
+ block.bytes = @read_file.read(BLOB_BLOCK_MAX_BYTESIZE)
78
+ return nil if block.bytes.nil? || 0 == block.bytes.length
79
+ block.bytesize = block.bytes.length
80
+ State.instance.inc_upload_bytesize( block.bytesize )
81
+ block
82
+ end
83
+
84
+
85
+ def << ( block )
86
+ @bytesize += block.bytesize
87
+ @events_count += block.events_count
88
+
89
+ @writer.write( block.bytes )
90
+ State.instance.dec_upload_bytesize( block.bytesize )
91
+
92
+ @oldest_event_time = block.oldest_event_time if @oldest_event_time.nil? || block.oldest_event_time < @oldest_event_time
93
+ @done_time = block.done_time if @done_time.nil? || block.done_time > @done_time
94
+ end
95
+
96
+
97
+ def dispose
98
+ @bytesize = nil
99
+ @events_count = nil
100
+ @done_time = nil
101
+ @oldest_event_time = nil
102
+ seal
103
+ close_read
104
+ File.delete( @file_name ) if @file_name
105
+ @file_name = nil
106
+ end
107
+
108
+
109
+ private
110
+
111
+ end
112
+
113
+ end
@@ -0,0 +1,116 @@
1
+ # encoding: utf-8
2
+
3
+ # ----------------------------------------------------------------------------------
4
+ # Logstash Output Application Insights
5
+ #
6
+ # Copyright (c) Microsoft Corporation
7
+ #
8
+ # All rights reserved.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the License);
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
13
+ #
14
+ # Unless required by applicable law or agreed to in writing, software
15
+ # distributed under the License is distributed on an "AS IS" BASIS,
16
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
+ #
18
+ # See the Apache Version 2.0 License for specific language governing
19
+ # permissions and limitations under the License.
20
+ # ----------------------------------------------------------------------------------
21
+ class LogStash::Outputs::Application_insights
22
+ class Notification < Blob
23
+
24
+ def initialize ( tuple )
25
+ # super first parameter must be nil. blob first parameter is channel, otherwise it will pass storage_account_name as channel
26
+ super( tuple )
27
+ end
28
+
29
+ # must return whether notification was successful or failed
30
+ def notify
31
+ @action = :notify
32
+ @recoverable = [ :notify_failed_blob_not_accessible, :io_failure, :service_unavailable, :notification_process_down ]
33
+ @force_client = true # to enable get a client even if all storage_accounts marked dead
34
+ @info = "#{@action} #{@storage_account_name}/#{@container_name}/#{@blob_name}, events: #{@uploaded_events_count}, size: #{@uploaded_bytesize}, blocks: #{@uploaded_block_numbers}, delay: #{Time.now.utc - @oldest_event_time}"
35
+ success = storage_io_block { submit }
36
+ if success
37
+ Telemetry.instance.track_event { { :name => "notified", :properties => context_to_table_entity } }
38
+ state_table_update
39
+ else
40
+ notify_retry_later
41
+ end
42
+ success
43
+ end
44
+
45
+ private
46
+
47
+ def submit
48
+ set_blob_sas_url
49
+ @logger.debug { "blob_sas_url: #{@blob_sas_url}" }
50
+ payload = create_payload
51
+ @logger.debug { "notification payload: #{payload}" }
52
+
53
+ # assume that exceptions can be raised due to this method:
54
+ post_notification( @client.notifyClient, payload ) unless @configuration[:disable_notification]
55
+
56
+ @log_state = :notified
57
+ end
58
+
59
+
60
+ def set_blob_sas_url
61
+ blob_url ="https://#{@storage_account_name}.blob.#{@configuration[:azure_storage_host_suffix]}/#{@container_name}/#{@blob_name}"
62
+ options_and_constrains = {:permissions => "r", :resource => "b", :expiry => ( Time.now.utc + @configuration[:blob_access_expiry_time] ).iso8601 }
63
+ @blob_sas_url = @client.storage_auth_sas.signed_uri( URI( blob_url ), options_and_constrains )
64
+ end
65
+
66
+
67
+ def create_payload
68
+ notification_hash = {
69
+ :data => {
70
+ :baseType => DATA_BASE_TYPE,
71
+ :baseData => {
72
+ :ver => BASE_DATA_REQUIRED_VERSION,
73
+ :blobSasUri => @blob_sas_url.to_s,
74
+ :sourceName => @table_id,
75
+ :sourceVersion => @configuration[:notification_version].to_s
76
+ }
77
+ },
78
+ :ver => @configuration[:notification_version],
79
+ :name => REQUEST_NAME,
80
+ :time => Time.now.utc.iso8601,
81
+ :iKey => @instrumentation_key
82
+ }
83
+ notification_hash.to_json
84
+ end
85
+
86
+
87
+ def post_notification ( http_client, body )
88
+ request = Azure::Core::Http::HttpRequest.new( :post, @configuration[:application_insights_endpoint], { :body => body, :client => http_client } )
89
+ request.headers['Content-Type'] = 'application/json; charset=utf-8'
90
+ request.headers['Accept'] = 'application/json'
91
+ @logger.debug { "send notification : \n endpoint: #{@configuration[:application_insights_endpoint]}\n body : #{body}" }
92
+ response = request.call
93
+ end
94
+
95
+
96
+ def notify_retry_later
97
+ if :notify_failed_blob_not_accessible == @recovery
98
+ @sub_state = @recovery
99
+ @storage_recovery.recover_later( context_to_tuple, :notify, @storage_account_name )
100
+
101
+ elsif :invalid_instrumentation_key == @recovery || :invalid_table_id == @recovery
102
+ @sub_state = @recovery
103
+ Channels.instance.channel( @instrumentation_key, @table_id ).recover_later_notification( context_to_tuple )
104
+
105
+ else
106
+ if :notify_failed_blob_not_accessible == @sub_state
107
+ @storage_recovery.recover_later( context_to_tuple, :notify, @storage_account_name )
108
+ elsif :invalid_instrumentation_key == @sub_state || :invalid_table_id == @sub_state
109
+ Channels.instance.channel( @instrumentation_key, @table_id ).recover_later_notification( context_to_tuple )
110
+ else
111
+ @notification_recovery.recover_later( context_to_tuple )
112
+ end
113
+ end
114
+ end
115
+ end
116
+ end
@@ -66,19 +66,18 @@ class LogStash::Outputs::Application_insights
66
66
 
67
67
  def recovery_thread
68
68
  Thread.new do
69
- blob = Blob.new
70
69
  counter = Concurrent::AtomicFixnum.new(0)
71
70
 
72
71
  loop do
73
72
  tuple = @queue.pop
74
- Stud.stoppable_sleep(Float::INFINITY, 1) { ( state_on?( blob ) || stopped? ) && 10 > counter.value }
73
+ Stud.stoppable_sleep(Float::INFINITY, 1) { ( state_on? || stopped? ) && 10 > counter.value }
75
74
 
76
- if stopped? && !state_on?( blob )
75
+ if stopped? && !state_on?
77
76
  recover_later( tuple )
78
77
  else
79
78
  counter.increment
80
79
  Thread.new( counter, tuple ) do |counter, tuple|
81
- Blob.new.notify( tuple )
80
+ Notification.new( tuple ).notify
82
81
  counter.decrement
83
82
  end
84
83
  end
@@ -87,9 +86,9 @@ class LogStash::Outputs::Application_insights
87
86
  end
88
87
  end
89
88
 
90
- def state_on? ( blob )
89
+ def state_on?
91
90
  return @notification_state_on if @notification_state_on
92
- @notification_state_on = @test_notification.submit
91
+ @notification_state_on = @test_notification.test
93
92
  return @notification_state_on if @notification_state_on
94
93
  sleep( @resurrect_delay )
95
94
  @notification_state_on
@@ -27,7 +27,7 @@ class LogStash::Outputs::Application_insights
27
27
  configuration = Config.current
28
28
  @logger = configuration[:logger]
29
29
  @storage_account_name_key = configuration[:storage_account_name_key]
30
- @partition_key_prefix =configuration[:azure_storage_blob_prefix].gsub( "/", "" )
30
+ @partition_key_prefix =configuration[:partition_key_prefix]
31
31
 
32
32
  @closing = nil
33
33
  @threads = []
@@ -85,7 +85,8 @@ class LogStash::Outputs::Application_insights
85
85
  entities.each do |entity|
86
86
  typed_tuple = nil
87
87
  until typed_tuple || stopped?
88
- typed_tuple = blob.update_commited_or_uncommited_list( entity.properties )
88
+ blob.table_entity_to_context( entity.properties )
89
+ typed_tuple = blob.update_commited_or_uncommited_list
89
90
  Stud.stoppable_sleep(60, 1) { stopped? } unless typed_tuple
90
91
  end
91
92
 
@@ -0,0 +1,108 @@
1
+ # encoding: utf-8
2
+
3
+ # ----------------------------------------------------------------------------------
4
+ # Logstash Output Application Insights
5
+ #
6
+ # Copyright (c) Microsoft Corporation
7
+ #
8
+ # All rights reserved.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the License);
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
13
+ #
14
+ # Unless required by applicable law or agreed to in writing, software
15
+ # distributed under the License is distributed on an "AS IS" BASIS,
16
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
+ #
18
+ # See the Apache Version 2.0 License for specific language governing
19
+ # permissions and limitations under the License.
20
+ # ----------------------------------------------------------------------------------
21
+ class LogStash::Outputs::Application_insights
22
+ class State_table < Blob
23
+
24
+ def initialize( tuple )
25
+ # super first parameter must be nil. blob first parameter is channel, otherwise it will pass storage_account_name as channel
26
+ super( nil )
27
+ tuple_to_state( tuple ) if tuple
28
+ end
29
+
30
+ # return true on success
31
+ def state_table_insert
32
+ @action = :state_table_insert
33
+ @recoverable = [ :invalid_storage_key, :io_failure, :service_unavailable, :table_exist, :create_table, :table_busy, :entity_exist ]
34
+ @info = "#{@action} #{@log_state} #{@storage_account_name}/#{@container_name}/#{@blob_name}"
35
+ success = storage_io_block {
36
+ create_table_exist_recovery
37
+ if :entity_exist == @recovery
38
+ raise NotRecoverableError if :uploading == @log_state
39
+ else
40
+ entity_values = state_to_table_entity
41
+ entity_values[:PartitionKey] = "#{@configuration[:partition_key_prefix]}-#{@log_state}"
42
+ entity_values[:RowKey] = @blob_name.gsub("/","_")
43
+ @client.tableClient.insert_entity( @configuration[:state_table_name], entity_values )
44
+ end
45
+ }
46
+ @storage_recovery.recover_later( state_to_tuple, :state_table_update, @storage_account_name ) unless success || :uploading == @log_state
47
+ success
48
+ end
49
+
50
+
51
+ def state_table_update
52
+ if :uploading == @log_state
53
+ state_table_delete
54
+ elsif :committed == @log_state
55
+ if state_table_insert && state_table_delete( :uploading )
56
+ State.instance.dec_pending_commits
57
+ State.instance.inc_pending_notifications
58
+ # this is not a recovery, it is actually enqueue to notify
59
+ @notification_recovery.enqueue( state_to_tuple )
60
+ end
61
+ elsif :notified == @log_state
62
+ if (!@configuration[:save_notified_blobs_records] || state_table_insert) && state_table_delete( :committed )
63
+ State.instance.dec_pending_notifications
64
+ end
65
+ end
66
+ end
67
+
68
+
69
+ # return tru on success
70
+ def state_table_delete ( state = nil )
71
+ state ||= @log_state
72
+ @action = :state_table_delete
73
+ @recoverable = [ :invalid_storage_key, :io_failure, :service_unavailable, :table_exist, :create_table, :table_busy, :create_resource ]
74
+ @info = "#{@action} #{state} #{@storage_account_name}/#{@container_name}/#{@blob_name}"
75
+
76
+ success = storage_io_block {
77
+ create_table_exist_recovery
78
+ if :create_resource == @recovery
79
+ @logger.info { "Note: delete entity failed, already deleted, #{@info}, state: #{state}, log_state: #{@log_state}" }
80
+ else
81
+ @client.tableClient.delete_entity( @configuration[:state_table_name], "#{@configuration[:partition_key_prefix]}-#{state}", @blob_name.gsub( "/", "_" ) )
82
+ end
83
+ }
84
+ @storage_recovery.recover_later( state_to_tuple, :state_table_update, @storage_account_name ) unless success
85
+ success
86
+ end
87
+
88
+
89
+ # return entities
90
+ def state_table_query ( storage_account_name, filter , token )
91
+ @storage_account_name = storage_account_name
92
+
93
+ @action = :state_table_query
94
+ @recoverable = [ :invalid_storage_key, :io_failure, :service_unavailable, :table_exist, :create_table, :table_busy ]
95
+ @info = "#{@action} #{@storage_account_name}/#{@configuration[:state_table_name]}"
96
+
97
+ entities = nil
98
+ success = storage_io_block {
99
+ create_table_exist_recovery
100
+ options = { :filter => filter }
101
+ options[:continuation_token] = token if token
102
+ entities = @client.tableClient.query_entities( @configuration[:state_table_name], options )
103
+ }
104
+ entities
105
+ end
106
+
107
+ end
108
+ end