logstash-output-azureblob 0.9.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/CHANGELOG.md +2 -0
- data/CONTRIBUTORS +16 -0
- data/DEVELOPER.md +2 -0
- data/Gemfile +3 -0
- data/LICENSE +11 -0
- data/README.md +102 -0
- data/lib/logstash/outputs/azure_blob.rb +235 -0
- data/lib/logstash/outputs/blob/file_repository.rb +138 -0
- data/lib/logstash/outputs/blob/path_validator.rb +20 -0
- data/lib/logstash/outputs/blob/size_and_time_rotation_policy.rb +28 -0
- data/lib/logstash/outputs/blob/size_rotation_policy.rb +29 -0
- data/lib/logstash/outputs/blob/temporary_file.rb +81 -0
- data/lib/logstash/outputs/blob/temporary_file_factory.rb +135 -0
- data/lib/logstash/outputs/blob/time_rotation_policy.rb +29 -0
- data/lib/logstash/outputs/blob/uploader.rb +75 -0
- data/lib/logstash/outputs/blob/writable_directory_validator.rb +19 -0
- data/lib/logstash/outputs/template.rb +16 -0
- data/logstash-output-azureblob.gemspec +25 -0
- data/spec/outputs/azureblob_spec.rb +49 -0
- data/spec/outputs/blob/file_repository_spec.rb +140 -0
- data/spec/outputs/blob/size_and_time_rotation_policy_spec.rb +76 -0
- data/spec/outputs/blob/size_rotation_policy_spec.rb +39 -0
- data/spec/outputs/blob/temporary_file_factory_spec.rb +88 -0
- data/spec/outputs/blob/temporary_file_spec.rb +46 -0
- data/spec/outputs/blob/time_rotation_policy_spec.rb +59 -0
- data/spec/outputs/blob/uploader_spec.rb +61 -0
- data/spec/outputs/blob/writable_directory_validator_spec.rb +39 -0
- data/spec/spec_helper.rb +5 -0
- data/spec/supports/helpers.rb +27 -0
- metadata +141 -0
@@ -0,0 +1,135 @@
|
|
1
|
+
require 'socket'
|
2
|
+
require 'securerandom'
|
3
|
+
require 'fileutils'
|
4
|
+
require 'zlib'
|
5
|
+
require 'forwardable'
|
6
|
+
|
7
|
+
module LogStash
|
8
|
+
module Outputs
|
9
|
+
class LogstashAzureBlobOutput
|
10
|
+
# a sub class of +LogstashAzureBlobOutput+
|
11
|
+
# creates the temporary files to write and later upload
|
12
|
+
class TemporaryFileFactory
|
13
|
+
FILE_MODE = 'a'.freeze
|
14
|
+
GZIP_ENCODING = 'gzip'.freeze
|
15
|
+
GZIP_EXTENSION = 'txt.gz'.freeze
|
16
|
+
TXT_EXTENSION = 'txt'.freeze
|
17
|
+
STRFTIME = '%Y-%m-%dT%H.%M'.freeze
|
18
|
+
|
19
|
+
attr_accessor :counter, :tags, :prefix, :encoding, :temporary_directory, :current
|
20
|
+
|
21
|
+
# initialize the class
|
22
|
+
def initialize(prefix, tags, encoding, temporary_directory)
|
23
|
+
@counter = 0
|
24
|
+
@prefix = prefix
|
25
|
+
|
26
|
+
@tags = tags
|
27
|
+
@encoding = encoding
|
28
|
+
@temporary_directory = temporary_directory
|
29
|
+
@lock = Mutex.new
|
30
|
+
|
31
|
+
rotate!
|
32
|
+
end
|
33
|
+
|
34
|
+
# do the rotation
|
35
|
+
def rotate!
|
36
|
+
@lock.synchronize do
|
37
|
+
@current = new_file
|
38
|
+
increment_counter
|
39
|
+
@current
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
private
|
44
|
+
|
45
|
+
# if it is not gzip ecoding, then it is txt extension
|
46
|
+
def extension
|
47
|
+
gzip? ? GZIP_EXTENSION : TXT_EXTENSION
|
48
|
+
end
|
49
|
+
|
50
|
+
# boolean method to check if its gzip encoding
|
51
|
+
def gzip?
|
52
|
+
encoding == GZIP_ENCODING
|
53
|
+
end
|
54
|
+
|
55
|
+
# increment the counter in 1 unit
|
56
|
+
def increment_counter
|
57
|
+
@counter += 1
|
58
|
+
end
|
59
|
+
|
60
|
+
# gets the current time
|
61
|
+
def current_time
|
62
|
+
Time.now.strftime(STRFTIME)
|
63
|
+
end
|
64
|
+
|
65
|
+
# method that generate the name of the file to be saved in blob storage
|
66
|
+
def generate_name
|
67
|
+
filename = "#{current_time}.#{SecureRandom.uuid}"
|
68
|
+
|
69
|
+
if !tags.empty?
|
70
|
+
"#{filename}.tag_#{tags.join('.')}.part#{counter}.#{extension}"
|
71
|
+
else
|
72
|
+
"#{filename}.part#{counter}.#{extension}"
|
73
|
+
end
|
74
|
+
end
|
75
|
+
|
76
|
+
# create the file to be saved in blob storage
|
77
|
+
def new_file
|
78
|
+
uuid = SecureRandom.uuid
|
79
|
+
name = generate_name
|
80
|
+
path = ::File.join(temporary_directory, uuid)
|
81
|
+
key = ::File.join(prefix, name)
|
82
|
+
|
83
|
+
FileUtils.mkdir_p(::File.join(path, prefix))
|
84
|
+
|
85
|
+
io = if gzip?
|
86
|
+
# We have to use this wrapper because we cannot access the size of the
|
87
|
+
# file directly on the gzip writer.
|
88
|
+
IOWrappedGzip.new(::File.open(::File.join(path, key), FILE_MODE))
|
89
|
+
else
|
90
|
+
::File.open(::File.join(path, key), FILE_MODE)
|
91
|
+
end
|
92
|
+
|
93
|
+
TemporaryFile.new(key, io, path)
|
94
|
+
end
|
95
|
+
|
96
|
+
# clas for the encoding
|
97
|
+
class IOWrappedGzip
|
98
|
+
extend Forwardable
|
99
|
+
|
100
|
+
def_delegators :@gzip_writer, :write, :close
|
101
|
+
attr_reader :file_io, :gzip_writer
|
102
|
+
|
103
|
+
# initialize the class for encoding
|
104
|
+
def initialize(file_io)
|
105
|
+
@file_io = file_io
|
106
|
+
@gzip_writer = Zlib::GzipWriter.open(file_io)
|
107
|
+
end
|
108
|
+
|
109
|
+
# gets the path
|
110
|
+
def path
|
111
|
+
@gzip_writer.to_io.path
|
112
|
+
end
|
113
|
+
|
114
|
+
# gets the file size
|
115
|
+
def size
|
116
|
+
# to get the current file size
|
117
|
+
if @gzip_writer.pos.zero?
|
118
|
+
# Ensure a zero file size is returned when nothing has
|
119
|
+
# yet been written to the gzip file.
|
120
|
+
0
|
121
|
+
else
|
122
|
+
@gzip_writer.flush
|
123
|
+
@gzip_writer.to_io.size
|
124
|
+
end
|
125
|
+
end
|
126
|
+
|
127
|
+
# gets the fsync
|
128
|
+
def fsync
|
129
|
+
@gzip_writer.to_io.fsync
|
130
|
+
end
|
131
|
+
end
|
132
|
+
end
|
133
|
+
end
|
134
|
+
end
|
135
|
+
end
|
@@ -0,0 +1,29 @@
|
|
1
|
+
module LogStash
|
2
|
+
module Outputs
|
3
|
+
class LogstashAzureBlobOutput
|
4
|
+
# a sub class of +LogstashAzureBlobOutput+
|
5
|
+
# sets the policy for time rotation
|
6
|
+
class TimeRotationPolicy
|
7
|
+
attr_reader :time_file
|
8
|
+
# initialize the class and validate the time file
|
9
|
+
def initialize(time_file)
|
10
|
+
if time_file <= 0
|
11
|
+
raise LogStash::ConfigurationError.new('`time_file` need to be greather than 0')
|
12
|
+
end
|
13
|
+
|
14
|
+
@time_file = time_file * 60
|
15
|
+
end
|
16
|
+
|
17
|
+
# rotates based on time policy
|
18
|
+
def rotate?(file)
|
19
|
+
!file.empty? && (Time.now - file.ctime) >= time_file
|
20
|
+
end
|
21
|
+
|
22
|
+
# boolean method
|
23
|
+
def needs_periodic?
|
24
|
+
true
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
@@ -0,0 +1,75 @@
|
|
1
|
+
require 'logstash/util'
|
2
|
+
require 'azure/storage/blob'
|
3
|
+
require 'azure/storage/common'
|
4
|
+
|
5
|
+
module LogStash
|
6
|
+
module Outputs
|
7
|
+
class LogstashAzureBlobOutput
|
8
|
+
# a sub class of +LogstashAzureBlobOutput+
|
9
|
+
# this class uploads the files to Azure cloud
|
10
|
+
class Uploader
|
11
|
+
TIME_BEFORE_RETRYING_SECONDS = 1
|
12
|
+
DEFAULT_THREADPOOL = Concurrent::ThreadPoolExecutor.new(min_threads: 1,
|
13
|
+
max_threads: 8,
|
14
|
+
max_queue: 1,
|
15
|
+
fallback_policy: :caller_runs)
|
16
|
+
|
17
|
+
attr_accessor :logger, :container_name, :blob_account
|
18
|
+
|
19
|
+
# Initializes the class
|
20
|
+
# @param blob_account [Object] endpoint to azure gem
|
21
|
+
# @param container_name [String] name of the container in azure blob, at this point, if it doesn't exist, it was already created
|
22
|
+
def initialize(blob_account, container_name, logger, threadpool = DEFAULT_THREADPOOL)
|
23
|
+
@blob_account = blob_account
|
24
|
+
@workers_pool = threadpool
|
25
|
+
@logger = logger
|
26
|
+
@container_name = container_name
|
27
|
+
end
|
28
|
+
|
29
|
+
# Create threads to upload the file to the container
|
30
|
+
def upload_async(file, options = {})
|
31
|
+
@workers_pool.post do
|
32
|
+
LogStash::Util.set_thread_name("LogstashAzureBlobOutput output uploader, file: #{file.path}")
|
33
|
+
upload(file, options)
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
# Uploads the file to the container
|
38
|
+
def upload(file, options = {})
|
39
|
+
|
40
|
+
begin
|
41
|
+
content = Object::File.open(file.path, 'rb').read
|
42
|
+
filename = Object::File.basename file.path
|
43
|
+
puts filename
|
44
|
+
blob = blob_account.create_block_blob(container_name, filename, content)
|
45
|
+
puts blob.name
|
46
|
+
rescue => e
|
47
|
+
# When we get here it usually mean that LogstashAzureBlobOutput tried to do some retry by himself (default is 3)
|
48
|
+
# When the retry limit is reached or another error happen we will wait and retry.
|
49
|
+
#
|
50
|
+
# Thread might be stuck here, but I think its better than losing anything
|
51
|
+
# its either a transient errors or something bad really happened.
|
52
|
+
logger.error('Uploading failed, retrying', exception: e.class, message: e.message, filename: filename, path: file.path, container: container_name, blobAccount: blob_account, backtrace: e.backtrace)
|
53
|
+
retry
|
54
|
+
end
|
55
|
+
|
56
|
+
options[:on_complete].call(file) unless options[:on_complete].nil?
|
57
|
+
blob
|
58
|
+
rescue => e
|
59
|
+
logger.error('An error occured in the `on_complete` uploader',
|
60
|
+
exception: e.class,
|
61
|
+
message: e.message,
|
62
|
+
path: file.path,
|
63
|
+
backtrace: e.backtrace)
|
64
|
+
raise e # reraise it since we don't deal with it now
|
65
|
+
end
|
66
|
+
|
67
|
+
# stop threads
|
68
|
+
def stop
|
69
|
+
@workers_pool.shutdown
|
70
|
+
@workers_pool.wait_for_termination(nil) # block until its done
|
71
|
+
end
|
72
|
+
end
|
73
|
+
end
|
74
|
+
end
|
75
|
+
end
|
@@ -0,0 +1,19 @@
|
|
1
|
+
module LogStash
|
2
|
+
module Outputs
|
3
|
+
class LogstashAzureBlobOutput
|
4
|
+
# a sub class of +LogstashAzureBlobOutput+
|
5
|
+
# validates that the specified tmeporary directory can be accesed with
|
6
|
+
# write permission
|
7
|
+
class WritableDirectoryValidator
|
8
|
+
# Checks if a path is valid
|
9
|
+
# @param path [String] String that represents the path
|
10
|
+
def self.valid?(path)
|
11
|
+
FileUtils.mkdir_p(path) unless Dir.exist?(path)
|
12
|
+
::File.writable?(path)
|
13
|
+
rescue
|
14
|
+
false
|
15
|
+
end
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
@@ -0,0 +1,16 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require "logstash/outputs/base"
|
3
|
+
|
4
|
+
# An azureblob output that does nothing.
|
5
|
+
class LogStash::Outputs::Azureblob < LogStash::Outputs::Base
|
6
|
+
config_name "azureblob"
|
7
|
+
|
8
|
+
public
|
9
|
+
def register
|
10
|
+
end # def register
|
11
|
+
|
12
|
+
public
|
13
|
+
def receive(event)
|
14
|
+
return "Event received"
|
15
|
+
end # def event
|
16
|
+
end # class LogStash::Outputs::Azureblob
|
@@ -0,0 +1,25 @@
|
|
1
|
+
Gem::Specification.new do |s|
|
2
|
+
s.name = 'logstash-output-azureblob'
|
3
|
+
s.version = '0.9.0'
|
4
|
+
s.licenses = ['Apache License (2.0)']
|
5
|
+
s.summary = 'Output plugin for logstash to send output to Azure Blob Storage'
|
6
|
+
s.description = 'This output plugin will send logs to azure blob storage. This does not support ADLS2 endpoints'
|
7
|
+
s.homepage = 'https://github.com/seanstark/logstash-output-azureblob'
|
8
|
+
s.authors = ['Sean Stark']
|
9
|
+
s.email = 'sean.stark@microsoft.com'
|
10
|
+
s.require_paths = ['lib']
|
11
|
+
|
12
|
+
# Files
|
13
|
+
s.files = Dir['lib/**/*', 'spec/**/*', 'vendor/**/*', '*.gemspec', '*.md', 'CONTRIBUTORS', 'Gemfile', 'LICENSE', 'NOTICE.TXT']
|
14
|
+
# Tests
|
15
|
+
s.test_files = s.files.grep(%r{^(test|spec|features)/})
|
16
|
+
|
17
|
+
# Special flag to let us know this is actually a logstash plugin
|
18
|
+
s.metadata = { 'logstash_plugin' => 'true', 'logstash_group' => 'output' }
|
19
|
+
|
20
|
+
# Gem dependencies
|
21
|
+
s.add_runtime_dependency 'azure-storage-blob', '~> 2.0'
|
22
|
+
s.add_runtime_dependency 'logstash-codec-plain'
|
23
|
+
s.add_runtime_dependency 'logstash-core-plugin-api', '~> 2.1'
|
24
|
+
s.add_development_dependency 'logstash-devutils'
|
25
|
+
end
|
@@ -0,0 +1,49 @@
|
|
1
|
+
require 'logstash/devutils/rspec/spec_helper'
|
2
|
+
require 'logstash/outputs/azure_blob'
|
3
|
+
require 'logstash/codecs/plain'
|
4
|
+
require 'logstash/event'
|
5
|
+
require 'tmpdir'
|
6
|
+
require 'pry'
|
7
|
+
|
8
|
+
describe LogStash::Outputs::LogstashAzureBlobOutput do
|
9
|
+
let(:config_options) do
|
10
|
+
{
|
11
|
+
storage_account_name: ENV['AZURE_STORAGE_ACCOUNT'],
|
12
|
+
storage_access_key: ENV['AZURE_STORAGE_ACCESS_KEY'],
|
13
|
+
container_name: 'test',
|
14
|
+
size_file: 5242880,
|
15
|
+
time_file: 15,
|
16
|
+
restore: true,
|
17
|
+
temporary_directory: File.join(Dir.tmpdir, 'logstash'),
|
18
|
+
prefix: '',
|
19
|
+
upload_queue_size: 2 * (Concurrent.processor_count * 0.25).ceil,
|
20
|
+
upload_workers_count: (Concurrent.processor_count * 0.5).ceil,
|
21
|
+
rotation_strategy: 'size_and_time',
|
22
|
+
tags: [],
|
23
|
+
encoding: 'none'
|
24
|
+
}
|
25
|
+
end
|
26
|
+
let(:sample_event) { LogStash::Event.new(source: 'alguna', tags: %w[tag1 tag2], fields: { field1: 1, field2: true }) }
|
27
|
+
|
28
|
+
# let(:output) { described_class.new() }
|
29
|
+
|
30
|
+
# before do
|
31
|
+
# output.register
|
32
|
+
# end
|
33
|
+
|
34
|
+
# it 'should create' do
|
35
|
+
# blober = described_class.new
|
36
|
+
# blober.register
|
37
|
+
# expect(blober.storage_account_name).not_to be_nil
|
38
|
+
# expect(blober.storage_access_key).not_to be_nil
|
39
|
+
# expect(blober.container_name).not_to be_nil
|
40
|
+
# end
|
41
|
+
|
42
|
+
describe 'receive message' do
|
43
|
+
subject { output.receive(sample_event) }
|
44
|
+
# xit 'should return the blob sent to Azure' do
|
45
|
+
# md5 = Digest::MD5.base64digest(sample_event.to_json)
|
46
|
+
# expect(subject.properties[:content_md5]).to eq(md5)
|
47
|
+
# end
|
48
|
+
end
|
49
|
+
end
|
@@ -0,0 +1,140 @@
|
|
1
|
+
require 'logstash/outputs/azure_blob'
|
2
|
+
require 'stud/temporary'
|
3
|
+
require 'fileutils'
|
4
|
+
require_relative '../../spec_helper'
|
5
|
+
|
6
|
+
describe LogStash::Outputs::LogstashAzureBlobOutput::FileRepository do
|
7
|
+
let(:tags) { [] }
|
8
|
+
let(:encoding) { 'none' }
|
9
|
+
let(:temporary_directory) { Stud::Temporary.pathname }
|
10
|
+
let(:prefix_key) { 'a-key' }
|
11
|
+
|
12
|
+
before do
|
13
|
+
FileUtils.mkdir_p(temporary_directory)
|
14
|
+
end
|
15
|
+
|
16
|
+
subject { described_class.new(tags, encoding, temporary_directory) }
|
17
|
+
|
18
|
+
it 'returns a temporary file' do
|
19
|
+
subject.get_file(prefix_key) do |file|
|
20
|
+
expect(file).to be_kind_of(LogStash::Outputs::LogstashAzureBlobOutput::TemporaryFile)
|
21
|
+
end
|
22
|
+
end
|
23
|
+
|
24
|
+
it 'returns the same file for the same prefix key' do
|
25
|
+
file_path = nil
|
26
|
+
|
27
|
+
subject.get_file(prefix_key) do |file|
|
28
|
+
file_path = file.path
|
29
|
+
end
|
30
|
+
|
31
|
+
subject.get_file(prefix_key) do |file|
|
32
|
+
expect(file.path).to eq(file_path)
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
it 'returns the same file for the same dynamic prefix key' do
|
37
|
+
prefix = '%{type}/%{+YYYY}/%{+MM}/%{+dd}/'
|
38
|
+
event = LogStash::Event.new('type' => 'syslog')
|
39
|
+
key = event.sprintf(prefix)
|
40
|
+
file_path = nil
|
41
|
+
|
42
|
+
subject.get_file(key) do |file|
|
43
|
+
file_path = file.path
|
44
|
+
end
|
45
|
+
|
46
|
+
subject.get_file(key) do |file|
|
47
|
+
expect(file.path).to eq(file_path)
|
48
|
+
end
|
49
|
+
end
|
50
|
+
|
51
|
+
it 'returns different file for different prefix keys' do
|
52
|
+
file_path = nil
|
53
|
+
|
54
|
+
subject.get_file(prefix_key) do |file|
|
55
|
+
file_path = file.path
|
56
|
+
end
|
57
|
+
|
58
|
+
subject.get_file('another_prefix_key') do |file|
|
59
|
+
expect(file.path).not_to eq(file_path)
|
60
|
+
end
|
61
|
+
end
|
62
|
+
|
63
|
+
it 'allows to get the file factory for a specific prefix' do
|
64
|
+
subject.get_factory(prefix_key) do |factory|
|
65
|
+
expect(factory).to be_kind_of(LogStash::Outputs::LogstashAzureBlobOutput::TemporaryFileFactory)
|
66
|
+
end
|
67
|
+
end
|
68
|
+
|
69
|
+
it 'returns a different file factory for a different prefix keys' do
|
70
|
+
factory = nil
|
71
|
+
|
72
|
+
subject.get_factory(prefix_key) do |f|
|
73
|
+
factory = f
|
74
|
+
end
|
75
|
+
|
76
|
+
subject.get_factory('another_prefix_key') do |f|
|
77
|
+
expect(factory).not_to eq(f)
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
81
|
+
it 'returns the number of prefix keys' do
|
82
|
+
expect(subject.size).to eq(0)
|
83
|
+
subject.get_file(prefix_key) { |file| file.write('something') }
|
84
|
+
expect(subject.size).to eq(1)
|
85
|
+
end
|
86
|
+
|
87
|
+
it 'returns all available keys' do
|
88
|
+
subject.get_file(prefix_key) { |file| file.write('something') }
|
89
|
+
expect(subject.keys.toArray).to include(prefix_key)
|
90
|
+
expect(subject.keys.toArray.size).to eq(1)
|
91
|
+
end
|
92
|
+
|
93
|
+
it 'clean stale factories' do
|
94
|
+
@file_repository = described_class.new(tags, encoding, temporary_directory, 1, 1)
|
95
|
+
expect(@file_repository.size).to eq(0)
|
96
|
+
path = ''
|
97
|
+
@file_repository.get_factory(prefix_key) do |factory|
|
98
|
+
factory.current.write('hello')
|
99
|
+
# force a rotation so we get an empty file that will get stale.
|
100
|
+
factory.rotate!
|
101
|
+
path = factory.current.temp_path
|
102
|
+
end
|
103
|
+
|
104
|
+
@file_repository.get_file('another-prefix') { |file| file.write('hello') }
|
105
|
+
expect(@file_repository.size).to eq(2)
|
106
|
+
try(10) { expect(@file_repository.size).to eq(1) }
|
107
|
+
expect(File.directory?(path)).to be_falsey
|
108
|
+
end
|
109
|
+
end
|
110
|
+
|
111
|
+
describe LogStash::Outputs::LogstashAzureBlobOutput::FileRepository::PrefixedValue do
|
112
|
+
let(:factory) { spy('factory', current: file) }
|
113
|
+
subject { described_class.new(factory, 1) }
|
114
|
+
|
115
|
+
context '#stale?' do
|
116
|
+
context 'the file is empty and older than stale time' do
|
117
|
+
let(:file) { double('file', size: 0, ctime: Time.now - 5) }
|
118
|
+
|
119
|
+
it 'returns true' do
|
120
|
+
expect(subject.stale?).to be_truthy
|
121
|
+
end
|
122
|
+
end
|
123
|
+
|
124
|
+
context 'when the file has data in it' do
|
125
|
+
let(:file) { double('file', size: 200, ctime: Time.now - 5) }
|
126
|
+
|
127
|
+
it 'returns false' do
|
128
|
+
expect(subject.stale?).to be_falsey
|
129
|
+
end
|
130
|
+
end
|
131
|
+
|
132
|
+
context 'when the file is not old enough' do
|
133
|
+
let(:file) { double('file', size: 0, ctime: Time.now + 100) }
|
134
|
+
|
135
|
+
it 'returns false' do
|
136
|
+
expect(subject.stale?).to be_falsey
|
137
|
+
end
|
138
|
+
end
|
139
|
+
end
|
140
|
+
end
|
@@ -0,0 +1,76 @@
|
|
1
|
+
require 'logstash/devutils/rspec/spec_helper'
|
2
|
+
require 'logstash/outputs/blob/size_and_time_rotation_policy'
|
3
|
+
require 'logstash/outputs/blob/temporary_file'
|
4
|
+
|
5
|
+
describe LogStash::Outputs::LogstashAzureBlobOutput::SizeAndTimeRotationPolicy do
|
6
|
+
let(:file_size) { 10 }
|
7
|
+
let(:time_file) { 1 }
|
8
|
+
subject { described_class.new(file_size, time_file) }
|
9
|
+
|
10
|
+
let(:temporary_directory) { Stud::Temporary.pathname }
|
11
|
+
let(:temporary_file) { Stud::Temporary.file }
|
12
|
+
let(:name) { 'foobar' }
|
13
|
+
let(:content) { 'hello' * 1000 }
|
14
|
+
let(:file) { LogStash::Outputs::LogstashAzureBlobOutput::TemporaryFile.new(name, temporary_file, temporary_directory) }
|
15
|
+
|
16
|
+
it 'raises an exception if the `time_file` is set to 0' do
|
17
|
+
expect { described_class.new(100, 0) }.to raise_error(LogStash::ConfigurationError, /time_file/)
|
18
|
+
end
|
19
|
+
|
20
|
+
it 'raises an exception if the `time_file` is < 0' do
|
21
|
+
expect { described_class.new(100, -100) }.to raise_error(LogStash::ConfigurationError, /time_file/)
|
22
|
+
end
|
23
|
+
|
24
|
+
it 'raises an exception if the `size_file` is 0' do
|
25
|
+
expect { described_class.new(0, 100) }.to raise_error(LogStash::ConfigurationError, /size_file/)
|
26
|
+
end
|
27
|
+
|
28
|
+
it 'raises an exception if the `size_file` is < 0' do
|
29
|
+
expect { described_class.new(-100, 100) }.to raise_error(LogStash::ConfigurationError, /size_file/)
|
30
|
+
end
|
31
|
+
|
32
|
+
it 'returns true if the size on disk is higher than the `file_size`' do
|
33
|
+
file.write(content)
|
34
|
+
file.fsync
|
35
|
+
expect(subject.rotate?(file)).to be_truthy
|
36
|
+
end
|
37
|
+
|
38
|
+
it 'returns false if the size is inferior than the `file_size`' do
|
39
|
+
expect(subject.rotate?(file)).to be_falsey
|
40
|
+
end
|
41
|
+
|
42
|
+
context 'when the size of the file is superior to 0' do
|
43
|
+
let(:file_size) { 10000 }
|
44
|
+
|
45
|
+
before :each do
|
46
|
+
file.write(content)
|
47
|
+
file.fsync
|
48
|
+
end
|
49
|
+
|
50
|
+
it 'returns true if the file old enough' do
|
51
|
+
allow(file).to receive(:ctime).and_return(Time.now - (time_file * 2 * 60))
|
52
|
+
expect(subject.rotate?(file)).to be_truthy
|
53
|
+
end
|
54
|
+
|
55
|
+
it 'returns false is not old enough' do
|
56
|
+
allow(file).to receive(:ctime).and_return(Time.now + time_file * 10)
|
57
|
+
expect(subject.rotate?(file)).to be_falsey
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
context 'When the size of the file is 0' do
|
62
|
+
it 'returns false if the file old enough' do
|
63
|
+
expect(subject.rotate?(file)).to be_falsey
|
64
|
+
end
|
65
|
+
|
66
|
+
it 'returns false is not old enough' do
|
67
|
+
expect(subject.rotate?(file)).to be_falsey
|
68
|
+
end
|
69
|
+
end
|
70
|
+
|
71
|
+
context '#needs_periodic?' do
|
72
|
+
it 'return true' do
|
73
|
+
expect(subject.needs_periodic?).to be_truthy
|
74
|
+
end
|
75
|
+
end
|
76
|
+
end
|
@@ -0,0 +1,39 @@
|
|
1
|
+
require 'logstash/devutils/rspec/spec_helper'
|
2
|
+
require 'logstash/outputs/blob/size_rotation_policy'
|
3
|
+
require 'logstash/outputs/blob/temporary_file'
|
4
|
+
require 'fileutils'
|
5
|
+
|
6
|
+
describe LogStash::Outputs::LogstashAzureBlobOutput::SizeRotationPolicy do
|
7
|
+
subject { described_class.new(size_file) }
|
8
|
+
|
9
|
+
let(:temporary_directory) { Stud::Temporary.directory }
|
10
|
+
let(:temporary_file) { Stud::Temporary.file }
|
11
|
+
let(:name) { 'foobar' }
|
12
|
+
let(:content) { 'hello' * 1000 }
|
13
|
+
let(:size_file) { 10 } # in bytes
|
14
|
+
let(:file) { LogStash::Outputs::LogstashAzureBlobOutput::TemporaryFile.new(name, temporary_file, temporary_directory) }
|
15
|
+
|
16
|
+
it 'returns true if the size on disk is higher than the `size_file`' do
|
17
|
+
file.write(content)
|
18
|
+
file.fsync
|
19
|
+
expect(subject.rotate?(file)).to be_truthy
|
20
|
+
end
|
21
|
+
|
22
|
+
it 'returns false if the size is inferior than the `size_file`' do
|
23
|
+
expect(subject.rotate?(file)).to be_falsey
|
24
|
+
end
|
25
|
+
|
26
|
+
it 'raises an exception if the `size_file` is 0' do
|
27
|
+
expect { described_class.new(0) }.to raise_error(LogStash::ConfigurationError, /need to be greather than 0/)
|
28
|
+
end
|
29
|
+
|
30
|
+
it 'raises an exception if the `size_file` is < 0' do
|
31
|
+
expect { described_class.new(-100) }.to raise_error(LogStash::ConfigurationError, /need to be greather than 0/)
|
32
|
+
end
|
33
|
+
|
34
|
+
context '#needs_periodic?' do
|
35
|
+
it 'return false' do
|
36
|
+
expect(subject.needs_periodic?).to be_falsey
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
@@ -0,0 +1,88 @@
|
|
1
|
+
require 'logstash/outputs/blob/temporary_file_factory'
|
2
|
+
require 'logstash/outputs/blob/temporary_file'
|
3
|
+
require 'stud/temporary'
|
4
|
+
require 'fileutils'
|
5
|
+
|
6
|
+
describe LogStash::Outputs::LogstashAzureBlobOutput::TemporaryFileFactory do
|
7
|
+
let(:prefix) { 'foobar' }
|
8
|
+
let(:tags) { [] }
|
9
|
+
let(:temporary_directory) { Stud::Temporary.pathname }
|
10
|
+
|
11
|
+
before do
|
12
|
+
FileUtils.mkdir_p(temporary_directory)
|
13
|
+
end
|
14
|
+
|
15
|
+
subject { described_class.new(prefix, tags, encoding, temporary_directory) }
|
16
|
+
|
17
|
+
shared_examples 'file factory' do
|
18
|
+
it 'creates the file on disk' do
|
19
|
+
expect(File.exist?(subject.current.path)).to be_truthy
|
20
|
+
end
|
21
|
+
|
22
|
+
it 'returns a size equal to zero after file creation' do
|
23
|
+
expect(subject.current.size).to eq(0)
|
24
|
+
end
|
25
|
+
|
26
|
+
it 'create a temporary file when initialized' do
|
27
|
+
expect(subject.current).to be_kind_of(LogStash::Outputs::LogstashAzureBlobOutput::TemporaryFile)
|
28
|
+
end
|
29
|
+
|
30
|
+
it 'create a file in the right format' do
|
31
|
+
expect(subject.current.path).to match(extension)
|
32
|
+
end
|
33
|
+
|
34
|
+
it 'allow to rotate the file' do
|
35
|
+
file_path = subject.current.path
|
36
|
+
expect(subject.rotate!.path).not_to eq(file_path)
|
37
|
+
end
|
38
|
+
|
39
|
+
it 'increments the part name on rotation' do
|
40
|
+
expect(subject.current.path).to match(/part0/)
|
41
|
+
expect(subject.rotate!.path).to match(/part1/)
|
42
|
+
end
|
43
|
+
|
44
|
+
it 'includes the date' do
|
45
|
+
n = Time.now
|
46
|
+
expect(subject.current.path).to include(n.strftime('%Y-%m-%dT'))
|
47
|
+
end
|
48
|
+
|
49
|
+
it 'include the file key in the path' do
|
50
|
+
file = subject.current
|
51
|
+
expect(file.path).to match(/#{file.key}/)
|
52
|
+
end
|
53
|
+
|
54
|
+
it 'create a unique directory in the temporary directory for each file' do
|
55
|
+
uuid = 'hola'
|
56
|
+
expect(SecureRandom).to receive(:uuid).and_return(uuid).twice
|
57
|
+
expect(subject.current.path).to include(uuid)
|
58
|
+
end
|
59
|
+
|
60
|
+
context 'with tags supplied' do
|
61
|
+
let(:tags) { %w[secret service] }
|
62
|
+
|
63
|
+
it 'adds tags to the filename' do
|
64
|
+
expect(subject.current.path).to match(/tag_#{tags.join('.')}.part/)
|
65
|
+
end
|
66
|
+
end
|
67
|
+
|
68
|
+
context 'without tags' do
|
69
|
+
it "doesn't add tags to the filename" do
|
70
|
+
expect(subject.current.path).not_to match(/tag_/)
|
71
|
+
end
|
72
|
+
end
|
73
|
+
end
|
74
|
+
|
75
|
+
context 'when gzip' do
|
76
|
+
let(:encoding) { 'gzip' }
|
77
|
+
let(:extension) { /\.txt.gz$/ }
|
78
|
+
|
79
|
+
include_examples 'file factory'
|
80
|
+
end
|
81
|
+
|
82
|
+
context 'when encoding set to `none`' do
|
83
|
+
let(:encoding) { 'none' }
|
84
|
+
let(:extension) { /\.txt$/ }
|
85
|
+
|
86
|
+
include_examples 'file factory'
|
87
|
+
end
|
88
|
+
end
|