logstash-output-azure 0.3.0 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
- SHA1:
3
- metadata.gz: 570190e66cae98abbd38587c21422274d212ce6d
4
- data.tar.gz: a40db86117fc7b8b53b47d45013df5899e21c8e8
2
+ SHA256:
3
+ metadata.gz: a0957c333ee69df3b98978bda9a84c26d0d2752f1904fbd104d7d4d58e249095
4
+ data.tar.gz: dc9b4e1a009d29a98183bcc984825e41680ba83bd51f5968e7933097f532619e
5
5
  SHA512:
6
- metadata.gz: 9e652e68fc1b07aeb1cd9abed642bf3006eda3f13fbded985e37d5d2446cfdcbf9fec7369c39395d4528c4c3d02d9a516a35678bd9690e9a65ec54e88a154cc4
7
- data.tar.gz: 2fe605a3752c850d43ea945fbae618ad1c96ae3ae10a9dc3838322f5bf4eef046a2ec01fa5fa5d6c4e38bb016eac977838f0a4b88cbd61203387575dbb137e90
6
+ metadata.gz: 65aa1e9eba3e573294306710165a91f06b4f5c304a8020f8f1abd5ce5dde5c3c45d6931665baad8a014a66eec52fe0a81b190e752ce23dedca26aa0d9f724f3b
7
+ data.tar.gz: 5fcdca862617607b619586f854331aa1eda6a47af66f508d3f04bcd9fb5f953d9a2e966a5cfb141763aab0e5d590f4e15d5b7c3678c71c505c9cc1ad2f902ac2
@@ -1,2 +1,5 @@
1
- ## 0.1.0
1
+ ## 0.3.0
2
2
  - Plugin created with the logstash plugin generator
3
+ ## 1.0.0
4
+ - updated specs
5
+ - Documented methods
@@ -3,6 +3,7 @@ reports, or in general have helped logstash along its way.
3
3
 
4
4
  Contributors:
5
5
  * Tuffk - tuffkmulhall@gmail.com
6
+ * BrunoLerner - bru.lerner@gmail.com
6
7
 
7
8
  Note: If you've sent us patches, bug reports, or otherwise contributed to
8
9
  Logstash, and you aren't on the list above and want to be, please let us know
@@ -1,10 +1,7 @@
1
- # encoding: utf-8
2
-
3
1
  require 'logstash/outputs/base'
4
2
  require 'logstash/namespace'
5
3
  require 'azure'
6
4
  require 'tmpdir'
7
- require 'pry'
8
5
 
9
6
  # Logstash outout plugin that uploads the logs to Azure blobs.
10
7
  # The logs are stored on local temporary file which is uploaded as a blob
@@ -32,7 +29,7 @@ require 'pry'
32
29
  # upload que size
33
30
  # @!attribute upload workers count
34
31
  # how much workers for uplaod
35
- # @!attribute rotation_strategy
32
+ # @!attribute rotation_strategy_val
36
33
  # what will be considered to do the tmeporary file rotation
37
34
  # @!attribute tags
38
35
  # tags for the files
@@ -40,10 +37,10 @@ require 'pry'
40
37
  # the encoding of the files
41
38
  # @example basic configuration
42
39
  # output {
43
- # logstash_output_azure {
44
- # storage_account_name => "my-azure-account" # requiered
45
- # storage_access_key => "my-super-secret-key" # requiered
46
- # contianer_name => "my-contianer" # requiered
40
+ # azure {
41
+ # storage_account_name => "my-azure-account" # required
42
+ # storage_access_key => "my-super-secret-key" # required
43
+ # contianer_name => "my-contianer" # required
47
44
  # size_file => 1024*1024*5 # optional
48
45
  # time_file => 10 # optional
49
46
  # restore => true # optional
@@ -51,15 +48,14 @@ require 'pry'
51
48
  # prefix => "a_prefix" # optional
52
49
  # upload_queue_size => 2 # optional
53
50
  # upload_workers_count => 1 # optional
54
- # rotation_strategy => "size_and_time" # optional
51
+ # rotation_strategy_val => "size_and_time" # optional
55
52
  # tags => [] # optional
56
53
  # encoding => "none" # optional
57
54
  # }
58
55
  # }
59
56
  class LogStash::Outputs::LogstashAzureBlobOutput < LogStash::Outputs::Base
60
57
  # name for the namespace under output for logstash configuration
61
- config_name "azure"
62
-
58
+ config_name 'azure'
63
59
 
64
60
  require 'logstash/outputs/blob/writable_directory_validator'
65
61
  require 'logstash/outputs/blob/path_validator'
@@ -71,14 +67,11 @@ class LogStash::Outputs::LogstashAzureBlobOutput < LogStash::Outputs::Base
71
67
  require 'logstash/outputs/blob/uploader'
72
68
  require 'logstash/outputs/blob/file_repository'
73
69
 
74
- PREFIX_KEY_NORMALIZE_CHARACTER = "_"
70
+ PREFIX_KEY_NORMALIZE_CHARACTER = '_'.freeze
75
71
  PERIODIC_CHECK_INTERVAL_IN_SECONDS = 15
76
- CRASH_RECOVERY_THREADPOOL = Concurrent::ThreadPoolExecutor.new({
77
- :min_threads => 1,
78
- :max_threads => 2,
79
- :fallback_policy => :caller_runs
80
- })
81
-
72
+ CRASH_RECOVERY_THREADPOOL = Concurrent::ThreadPoolExecutor.new(min_threads: 1,
73
+ max_threads: 2,
74
+ fallback_policy: :caller_runs)
82
75
 
83
76
  # azure contianer
84
77
  config :storage_account_name, validate: :string, required: false
@@ -97,32 +90,30 @@ class LogStash::Outputs::LogstashAzureBlobOutput < LogStash::Outputs::Base
97
90
  config :prefix, validate: :string, default: ''
98
91
  config :upload_queue_size, validate: :number, default: 2 * (Concurrent.processor_count * 0.25).ceil
99
92
  config :upload_workers_count, validate: :number, default: (Concurrent.processor_count * 0.5).ceil
100
- config :rotation_strategy, validate: %w[size_and_time size time], default: 'size_and_time'
101
- config :tags, :validate => :array, :default => []
102
- config :encoding, :validate => ["none", "gzip"], :default => "none"
93
+ config :rotation_strategy_val, validate: %w[size_and_time size time], default: 'size_and_time'
94
+ config :tags, validate: :array, default: []
95
+ config :encoding, validate: %w[none gzip], default: 'none'
103
96
 
104
- attr_accessor :storage_account_name, :storage_access_key,:container_name,
105
- :size_file,:time_file,:restore,:temporary_directory,:prefix,:upload_queue_size,
106
- :upload_workers_count,:rotation_strategy,:tags,:encoding
107
-
108
- public
97
+ attr_accessor :storage_account_name, :storage_access_key, :container_name,
98
+ :size_file, :time_file, :restore, :temporary_directory, :prefix, :upload_queue_size,
99
+ :upload_workers_count, :rotation_strategy_val, :tags, :encoding
109
100
 
110
101
  # initializes the +LogstashAzureBlobOutput+ instances
111
- # validates all canfig parameters
102
+ # validates all config parameters
112
103
  # initializes the uploader
113
104
  def register
114
105
  unless @prefix.empty?
115
106
  unless PathValidator.valid?(prefix)
116
- raise LogStash::ConfigurationError, "Prefix must not contains: #{PathValidator::INVALID_CHARACTERS}"
107
+ raise LogStash::ConfigurationError.new("Prefix must not contains: #{PathValidator::INVALID_CHARACTERS}")
117
108
  end
118
109
  end
119
110
 
120
111
  unless WritableDirectoryValidator.valid?(@temporary_directory)
121
- raise LogStash::ConfigurationError, "Logstash must have the permissions to write to the temporary directory: #{@temporary_directory}"
112
+ raise LogStash::ConfigurationError.new("Logstash must have the permissions to write to the temporary directory: #{@temporary_directory}")
122
113
  end
123
114
 
124
- if @time_file.nil? && @size_file.nil? || @size_file == 0 && @time_file == 0
125
- raise LogStash::ConfigurationError, 'at least one of time_file or size_file set to a value greater than 0'
115
+ if @time_file.nil? && @size_file.nil? || @size_file.zero? && @time_file.zero?
116
+ raise LogStash::ConfigurationError.new('at least one of time_file or size_file set to a value greater than 0')
126
117
  end
127
118
 
128
119
  @file_repository = FileRepository.new(@tags, @encoding, @temporary_directory)
@@ -138,8 +129,10 @@ class LogStash::Outputs::LogstashAzureBlobOutput < LogStash::Outputs::Base
138
129
 
139
130
  restore_from_crash if @restore
140
131
  start_periodic_check if @rotation.needs_periodic?
141
- end # def register
132
+ end
142
133
 
134
+ # Receives multiple events and check if there is space in temporary directory
135
+ # @param events_and_encoded [Object]
143
136
  def multi_receive_encoded(events_and_encoded)
144
137
  prefix_written_to = Set.new
145
138
 
@@ -152,7 +145,7 @@ class LogStash::Outputs::LogstashAzureBlobOutput < LogStash::Outputs::Base
152
145
  # The output should stop accepting new events coming in, since it cannot do anything with them anymore.
153
146
  # Log the error and rethrow it.
154
147
  rescue Errno::ENOSPC => e
155
- @logger.error('S3: No space left in temporary directory', temporary_directory: @temporary_directory)
148
+ @logger.error('Azure: No space left in temporary directory', temporary_directory: @temporary_directory)
156
149
  raise e
157
150
  end
158
151
  end
@@ -161,7 +154,7 @@ class LogStash::Outputs::LogstashAzureBlobOutput < LogStash::Outputs::Base
161
154
  rotate_if_needed(prefix_written_to)
162
155
  end
163
156
 
164
- # close the tmeporary file and uploads the content to Azure
157
+ # close the temporary file and uploads the content to Azure
165
158
  def close
166
159
  stop_periodic_check if @rotation.needs_periodic?
167
160
 
@@ -182,21 +175,18 @@ class LogStash::Outputs::LogstashAzureBlobOutput < LogStash::Outputs::Base
182
175
  @crash_uploader.stop if @restore # we might have still work to do for recovery so wait until we are done
183
176
  end
184
177
 
178
+ # Validates and normalize prefix key
179
+ # @param prefix_key [String]
185
180
  def normalize_key(prefix_key)
186
181
  prefix_key.gsub(PathValidator.matches_re, PREFIX_KEY_NORMALIZE_CHARACTER)
187
182
  end
188
183
 
189
- def upload_options
190
- {
191
- }
192
- end
193
-
194
184
  # checks periodically the tmeporary file if it needs to be rotated
195
185
  def start_periodic_check
196
- @logger.debug("Start periodic rotation check")
186
+ @logger.debug('Start periodic rotation check')
197
187
 
198
- @periodic_check = Concurrent::TimerTask.new(:execution_interval => PERIODIC_CHECK_INTERVAL_IN_SECONDS) do
199
- @logger.debug("Periodic check for stale files")
188
+ @periodic_check = Concurrent::TimerTask.new(execution_interval: PERIODIC_CHECK_INTERVAL_IN_SECONDS) do
189
+ @logger.debug('Periodic check for stale files')
200
190
 
201
191
  rotate_if_needed(@file_repository.keys)
202
192
  end
@@ -208,21 +198,23 @@ class LogStash::Outputs::LogstashAzureBlobOutput < LogStash::Outputs::Base
208
198
  @periodic_check.shutdown
209
199
  end
210
200
 
211
- # login to azure cloud using azure gem and get the contianer if exist or create
212
- # the continer if it doesn't
201
+ # login to azure cloud using azure gem and create the contianer if it doesn't exist
202
+ # @return [Object] the azure_blob_service object, which is the endpoint to azure gem
213
203
  def blob_container_resource
214
204
  Azure.config.storage_account_name = storage_account_name
215
205
  Azure.config.storage_access_key = storage_access_key
216
206
  azure_blob_service = Azure::Blob::BlobService.new
217
- list = azure_blob_service.list_containers()
207
+ list = azure_blob_service.list_containers
218
208
  list.each do |item|
219
209
  @container = item if item.name == container_name
220
210
  end
221
211
 
222
212
  azure_blob_service.create_container(container_name) unless @container
223
- return azure_blob_service
213
+ azure_blob_service
224
214
  end
225
215
 
216
+ # check if it needs to rotate according to rotation policy and rotates it if it needs
217
+ # @param prefixes [String]
226
218
  def rotate_if_needed(prefixes)
227
219
  prefixes.each do |prefix|
228
220
  # Each file access is thread safe,
@@ -232,10 +224,10 @@ class LogStash::Outputs::LogstashAzureBlobOutput < LogStash::Outputs::Base
232
224
  temp_file = factory.current
233
225
 
234
226
  if @rotation.rotate?(temp_file)
235
- @logger.debug("Rotate file",
236
- :strategy => @rotation.class.name,
237
- :key => temp_file.key,
238
- :path => temp_file.path)
227
+ @logger.debug('Rotate file',
228
+ strategy: @rotation.class.name,
229
+ key: temp_file.key,
230
+ path: temp_file.path)
239
231
 
240
232
  upload_file(temp_file)
241
233
  factory.rotate!
@@ -246,60 +238,46 @@ class LogStash::Outputs::LogstashAzureBlobOutput < LogStash::Outputs::Base
246
238
 
247
239
  # uploads the file using the +Uploader+
248
240
  def upload_file(temp_file)
249
- @logger.debug("Queue for upload", :path => temp_file.path)
241
+ @logger.debug('Queue for upload', path: temp_file.path)
250
242
 
251
243
  # if the queue is full the calling thread will be used to upload
252
244
  temp_file.close # make sure the content is on disk
253
- if temp_file.size > 0
245
+ unless temp_file.empty? # rubocop:disable GuardClause
254
246
  @uploader.upload_async(temp_file,
255
- :on_complete => method(:clean_temporary_file),
256
- :upload_options => upload_options )
247
+ on_complete: method(:clean_temporary_file),
248
+ upload_options: upload_options)
257
249
  end
258
250
  end
259
251
 
260
252
  # creates an instance for the rotation strategy
261
253
  def rotation_strategy
262
- case @rotation_strategy
263
- when "size"
254
+ case @rotation_strategy_val
255
+ when 'size'
264
256
  SizeRotationPolicy.new(size_file)
265
- when "time"
257
+ when 'time'
266
258
  TimeRotationPolicy.new(time_file)
267
- when "size_and_time"
259
+ when 'size_and_time'
268
260
  SizeAndTimeRotationPolicy.new(size_file, time_file)
269
261
  end
270
262
  end
271
263
 
264
+ # Cleans the temporary files after it is uploaded to azure blob
272
265
  def clean_temporary_file(file)
273
- @logger.debug("Removing temporary file", :file => file.path)
266
+ @logger.debug('Removing temporary file', file: file.path)
274
267
  file.delete!
275
268
  end
276
269
 
270
+ # uploads files if there was a crash before
277
271
  def restore_from_crash
278
272
  @crash_uploader = Uploader.new(blob_container_resource, container_name, @logger, CRASH_RECOVERY_THREADPOOL)
279
273
 
280
274
  temp_folder_path = Pathname.new(@temporary_directory)
281
- Dir.glob(::File.join(@temporary_directory, "**/*"))
282
- .select { |file| ::File.file?(file) }
283
- .each do |file|
275
+ Dir.glob(::File.join(@temporary_directory, '**/*'))
276
+ .select { |file| ::File.file?(file) }
277
+ .each do |file|
284
278
  temp_file = TemporaryFile.create_from_existing_file(file, temp_folder_path)
285
- @logger.debug("Recovering from crash and uploading", :file => temp_file.path)
286
- @crash_uploader.upload_async(temp_file, :on_complete => method(:clean_temporary_file), :upload_options => upload_options)
279
+ @logger.debug('Recovering from crash and uploading', file: temp_file.path)
280
+ @crash_uploader.upload_async(temp_file, on_complete: method(:clean_temporary_file), upload_options: upload_options)
287
281
  end
288
282
  end
289
-
290
-
291
- public
292
-
293
- def receive(event)
294
- azure_login
295
- azure_blob_service = Azure::Blob::BlobService.new
296
- containers = azure_blob_service.list_containers
297
- blob = azure_blob_service.create_block_blob(containers[0].name, event.timestamp.to_s, event.to_json)
298
- end # def event
299
-
300
- # inputs the credentials to the azure gem to log in and use azure API
301
- def azure_login
302
- Azure.config.storage_account_name ||= storage_account_name
303
- Azure.config.storage_access_key ||= storage_access_key
304
- end # def azure_login
305
- end # class LogStash::Outputs::LogstashAzureBlobOutput
283
+ end
@@ -1,8 +1,8 @@
1
- # encoding: utf-8
2
- require "java"
3
- require "concurrent"
4
- require "concurrent/timer_task"
5
- require "logstash/util"
1
+
2
+ require 'java'
3
+ require 'concurrent'
4
+ require 'concurrent/timer_task'
5
+ require 'logstash/util'
6
6
 
7
7
  ConcurrentHashMap = java.util.concurrent.ConcurrentHashMap
8
8
 
@@ -10,40 +10,46 @@ module LogStash
10
10
  module Outputs
11
11
  class LogstashAzureBlobOutput
12
12
  # sub class for +LogstashAzureBlobOutput+
13
- # this class manages the tmeporary directory for the temporary files
13
+ # this class manages the temporary directory for the temporary files
14
14
  class FileRepository
15
15
  DEFAULT_STATE_SWEEPER_INTERVAL_SECS = 60
16
16
  DEFAULT_STALE_TIME_SECS = 15 * 60
17
17
  # Ensure that all access or work done
18
18
  # on a factory is threadsafe
19
19
  class PrefixedValue
20
+ # initialize the factory
20
21
  def initialize(file_factory, stale_time)
21
22
  @file_factory = file_factory
22
23
  @lock = Mutex.new
23
24
  @stale_time = stale_time
24
25
  end
25
26
 
27
+ # activate the lock
26
28
  def with_lock
27
- @lock.synchronize {
29
+ @lock.synchronize do
28
30
  yield @file_factory
29
- }
31
+ end
30
32
  end
31
33
 
34
+ # boolean method
32
35
  def stale?
33
- with_lock { |factory| factory.current.size == 0 && (Time.now - factory.current.ctime > @stale_time) }
36
+ with_lock { |factory| factory.current.size.zero? && (Time.now - factory.current.ctime > @stale_time) }
34
37
  end
35
38
 
36
- def apply(prefix)
37
- return self
39
+ # return this class
40
+ def apply(_prefix)
41
+ self
38
42
  end
39
43
 
44
+ # delete the current factory
40
45
  def delete!
41
- with_lock{ |factory| factory.current.delete! }
46
+ with_lock { |factory| factory.current.delete! }
42
47
  end
43
48
  end
44
49
 
45
50
  # class for initializing the repo manager
46
51
  class FactoryInitializer
52
+ # initializes the class
47
53
  def initialize(tags, encoding, temporary_directory, stale_time)
48
54
  @tags = tags
49
55
  @encoding = encoding
@@ -51,17 +57,18 @@ module LogStash
51
57
  @stale_time = stale_time
52
58
  end
53
59
 
60
+ # applies the prefix key
54
61
  def apply(prefix_key)
55
62
  PrefixedValue.new(TemporaryFileFactory.new(prefix_key, @tags, @encoding, @temporary_directory), @stale_time)
56
63
  end
57
64
  end
58
-
65
+ # initializes the class with more variables
59
66
  def initialize(tags, encoding, temporary_directory,
60
67
  stale_time = DEFAULT_STALE_TIME_SECS,
61
68
  sweeper_interval = DEFAULT_STATE_SWEEPER_INTERVAL_SECS)
62
69
  # The path need to contains the prefix so when we start
63
70
  # logtash after a crash we keep the remote structure
64
- @prefixed_factories = ConcurrentHashMap.new
71
+ @prefixed_factories = ConcurrentHashMap.new
65
72
 
66
73
  @sweeper_interval = sweeper_interval
67
74
 
@@ -70,10 +77,12 @@ module LogStash
70
77
  start_stale_sweeper
71
78
  end
72
79
 
80
+ # gets the key set
73
81
  def keys
74
82
  @prefixed_factories.keySet
75
83
  end
76
84
 
85
+ # with lock for each file
77
86
  def each_files
78
87
  @prefixed_factories.elements.each do |prefixed_file|
79
88
  prefixed_file.with_lock { |factory| yield factory.current }
@@ -85,35 +94,41 @@ module LogStash
85
94
  @prefixed_factories.computeIfAbsent(prefix_key, @factory_initializer).with_lock { |factory| yield factory }
86
95
  end
87
96
 
97
+ # gets file from prefix_key
88
98
  def get_file(prefix_key)
89
99
  get_factory(prefix_key) { |factory| yield factory.current }
90
100
  end
91
101
 
102
+ # stops. shutdown
92
103
  def shutdown
93
104
  stop_stale_sweeper
94
105
  end
95
106
 
107
+ # gets factory's size
96
108
  def size
97
109
  @prefixed_factories.size
98
110
  end
99
111
 
112
+ # remove the stale given key and value
100
113
  def remove_stale(k, v)
101
- if v.stale?
114
+ if v.stale? # rubocop:disable Style/GuardClause
102
115
  @prefixed_factories.remove(k, v)
103
116
  v.delete!
104
117
  end
105
118
  end
106
119
 
120
+ # starts the stale sweeper
107
121
  def start_stale_sweeper
108
- @stale_sweeper = Concurrent::TimerTask.new(:execution_interval => @sweeper_interval) do
109
- LogStash::Util.set_thread_name("LogstashAzureBlobOutput, Stale factory sweeper")
122
+ @stale_sweeper = Concurrent::TimerTask.new(execution_interval: @sweeper_interval) do
123
+ LogStash::Util.set_thread_name('LogstashAzureBlobOutput, Stale factory sweeper')
110
124
 
111
- @prefixed_factories.forEach{|k,v| remove_stale(k,v)}
125
+ @prefixed_factories.forEach { |k, v| remove_stale(k, v) }
112
126
  end
113
127
 
114
128
  @stale_sweeper.execute
115
129
  end
116
130
 
131
+ # stops the stale sweeper
117
132
  def stop_stale_sweeper
118
133
  @stale_sweeper.shutdown
119
134
  end