logstash-output-qingstor 0.1.3 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Gemfile +0 -1
- data/README.md +1 -1
- data/lib/logstash/outputs/qingstor/file_repository.rb +64 -49
- data/lib/logstash/outputs/qingstor/qingstor_validator.rb +19 -18
- data/lib/logstash/outputs/qingstor/rotation_policy.rb +35 -28
- data/lib/logstash/outputs/qingstor/temporary_file.rb +38 -36
- data/lib/logstash/outputs/qingstor/temporary_file_factory.rb +61 -57
- data/lib/logstash/outputs/qingstor/uploader.rb +41 -36
- data/lib/logstash/outputs/qingstor.rb +144 -124
- data/logstash-output-qingstor.gemspec +12 -12
- data/spec/outputs/qingstor/file_repository_spec.rb +24 -22
- data/spec/outputs/qingstor/qingstor_validator_spec.rb +14 -12
- data/spec/outputs/qingstor/rotation_policy_spec.rb +58 -43
- data/spec/outputs/qingstor/temporary_file_factory_spec.rb +34 -32
- data/spec/outputs/qingstor/temporary_file_spec.rb +31 -30
- data/spec/outputs/qingstor/uploader_spec.rb +29 -30
- data/spec/outputs/qingstor_spec.rb +53 -44
- data/spec/outputs/qs_access_helper.rb +8 -8
- data/spec/outputs/spec_helper.rb +3 -3
- metadata +3 -5
- data/lib/logstash/outputs/qingstor/size_rotation_policy.rb +0 -26
- data/lib/logstash/outputs/qingstor/time_rotation_policy.rb +0 -26
@@ -1,64 +1,69 @@
|
|
1
1
|
# encoding: utf-8
|
2
|
-
|
3
|
-
require
|
4
|
-
require
|
5
|
-
require
|
2
|
+
|
3
|
+
require 'logstash/outputs/qingstor'
|
4
|
+
require 'qingstor/sdk'
|
5
|
+
require 'concurrent'
|
6
|
+
require 'digest/md5'
|
7
|
+
require 'base64'
|
8
|
+
|
6
9
|
module LogStash
|
7
10
|
module Outputs
|
8
11
|
class Qingstor
|
9
|
-
class Uploader
|
12
|
+
class Uploader
|
10
13
|
TIME_BEFORE_RETRYING_SECONDS = 1
|
11
|
-
DEFAULT_THREADPOOL = Concurrent::ThreadPoolExecutor.new(
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
14
|
+
DEFAULT_THREADPOOL = Concurrent::ThreadPoolExecutor.new(
|
15
|
+
:min_thread => 1,
|
16
|
+
:max_thread => 8,
|
17
|
+
:max_queue => 1,
|
18
|
+
:fallback_policy => :caller_runs
|
19
|
+
)
|
20
|
+
|
21
|
+
attr_reader :bucket, :upload_options, :logger
|
17
22
|
|
18
23
|
def initialize(bucket, logger, threadpool = DEFAULT_THREADPOOL)
|
19
24
|
@bucket = bucket
|
20
25
|
@logger = logger
|
21
26
|
@workers_pool = threadpool
|
22
|
-
end
|
27
|
+
end
|
23
28
|
|
24
29
|
def upload_async(file, options = {})
|
25
30
|
@workers_pool.post do
|
26
31
|
upload(file, options)
|
27
|
-
end
|
28
|
-
end
|
32
|
+
end
|
33
|
+
end
|
29
34
|
|
30
35
|
def upload(file, options = {})
|
31
36
|
upload_options = options.fetch(:upload_options, {})
|
32
37
|
|
33
38
|
file_md5 = Digest::MD5.file(file.path).to_s
|
34
|
-
|
39
|
+
|
35
40
|
upload_headers = {
|
36
|
-
|
37
|
-
|
41
|
+
'content_md5' => file_md5,
|
42
|
+
'body' => ::File.open(file.path)
|
38
43
|
}
|
39
44
|
|
40
|
-
|
41
|
-
base64_key = Base64.strict_encode64
|
42
|
-
key_md5 = Digest::MD5.hexdigest
|
43
|
-
base64_key_md5 = Base64.strict_encode64
|
44
|
-
upload_headers.merge!(
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
45
|
+
unless upload_options[:server_side_encryption_algorithm].nil?
|
46
|
+
base64_key = Base64.strict_encode64(upload_options[:customer_key])
|
47
|
+
key_md5 = Digest::MD5.hexdigest(upload_options[:customer_key])
|
48
|
+
base64_key_md5 = Base64.strict_encode64(key_md5)
|
49
|
+
upload_headers.merge!(
|
50
|
+
'x_qs_encryption_customer_algorithm' =>
|
51
|
+
upload_options[:server_side_encryption_algorithm],
|
52
|
+
'x_qs_encryption_customer_key' => base64_key,
|
53
|
+
'x_qs_encryption_customer_key_md5' => base64_key_md5
|
54
|
+
)
|
55
|
+
end
|
56
|
+
@logger.debug('uploading file', :file => file.key)
|
51
57
|
bucket.put_object(file.key, upload_headers)
|
52
58
|
|
53
59
|
options[:on_complete].call(file) unless options[:on_complete].nil?
|
54
|
-
end
|
60
|
+
end
|
55
61
|
|
56
|
-
def stop
|
62
|
+
def stop
|
57
63
|
@workers_pool.shutdown
|
58
64
|
@workers_pool.wait_for_termination(nil)
|
59
|
-
end
|
60
|
-
end
|
61
|
-
end
|
62
|
-
end
|
63
|
-
end
|
64
|
-
|
65
|
+
end
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|
69
|
+
end
|
@@ -1,37 +1,41 @@
|
|
1
1
|
# encoding: utf-8
|
2
|
-
|
3
|
-
require
|
4
|
-
require
|
5
|
-
require
|
6
|
-
require
|
7
|
-
require
|
2
|
+
|
3
|
+
require 'logstash-core'
|
4
|
+
require 'logstash/outputs/base'
|
5
|
+
require 'logstash/namespace'
|
6
|
+
require 'tmpdir'
|
7
|
+
require 'qingstor/sdk'
|
8
|
+
require 'concurrent'
|
8
9
|
|
9
10
|
class LogStash::Outputs::Qingstor < LogStash::Outputs::Base
|
10
|
-
require
|
11
|
-
require
|
12
|
-
require
|
13
|
-
require
|
14
|
-
require
|
15
|
-
require
|
11
|
+
require 'logstash/outputs/qingstor/temporary_file'
|
12
|
+
require 'logstash/outputs/qingstor/temporary_file_factory'
|
13
|
+
require 'logstash/outputs/qingstor/file_repository'
|
14
|
+
require 'logstash/outputs/qingstor/rotation_policy'
|
15
|
+
require 'logstash/outputs/qingstor/uploader'
|
16
|
+
require 'logstash/outputs/qingstor/qingstor_validator'
|
16
17
|
|
17
18
|
PERIODIC_CHECK_INTERVAL_IN_SECONDS = 15
|
18
|
-
CRASH_RECOVERY_THREADPOOL = Concurrent::ThreadPoolExecutor.new(
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
#
|
27
|
-
#
|
19
|
+
CRASH_RECOVERY_THREADPOOL = Concurrent::ThreadPoolExecutor.new(
|
20
|
+
:min_threads => 1,
|
21
|
+
:max_threads => 2,
|
22
|
+
:fallback_policy => :caller_runs
|
23
|
+
)
|
24
|
+
|
25
|
+
config_name 'qingstor'
|
26
|
+
|
27
|
+
# When configured as :single a single instance of the Output will be shared
|
28
|
+
# among the pipeline worker threads. Access to the `#multi_receive/
|
29
|
+
# #multi_receive_encoded/#receive` method will be synchronized i.e. only one
|
30
|
+
# thread will be active at a time making threadsafety much simpler.
|
28
31
|
#
|
29
|
-
# You can set this to :shared if your output is threadsafe. This will
|
30
|
-
# concurrency but you will need to make appropriate uses of mutexes
|
32
|
+
# You can set this to :shared if your output is threadsafe. This will
|
33
|
+
# maximize concurrency but you will need to make appropriate uses of mutexes
|
34
|
+
# in `#multi_receive/#receive`.
|
31
35
|
#
|
32
|
-
# Only the `#multi_receive/#multi_receive_encoded` methods need to actually
|
33
|
-
# will only be executed in a single thread
|
34
|
-
concurrency :
|
36
|
+
# Only the `#multi_receive/#multi_receive_encoded` methods need to actually
|
37
|
+
# be threadsafe, the other methods will only be executed in a single thread
|
38
|
+
concurrency :single
|
35
39
|
|
36
40
|
# The key id to access your QingStor
|
37
41
|
config :access_key_id, :validate => :string, :required => true
|
@@ -49,76 +53,85 @@ class LogStash::Outputs::Qingstor < LogStash::Outputs::Base
|
|
49
53
|
config :bucket, :validate => :string, :required => true
|
50
54
|
|
51
55
|
# The region of the QingStor bucket
|
52
|
-
config :region, :validate => [
|
56
|
+
config :region, :validate => %w[pek3a sh1a], :default => 'pek3a'
|
53
57
|
|
54
|
-
# The prefix of filenames which will work as directory in qingstor
|
58
|
+
# The prefix of filenames which will work as directory in qingstor
|
55
59
|
config :prefix, :validate => :string, :default => ''
|
56
60
|
|
57
|
-
# Set the directory where logstash store the tmp files before
|
58
|
-
# sending it to
|
59
|
-
config :tmpdir, :validate => :string,
|
61
|
+
# Set the directory where logstash store the tmp files before
|
62
|
+
# sending it to QingStor, default directory in linux /tmp/logstash2qingstor
|
63
|
+
config :tmpdir, :validate => :string,
|
64
|
+
:default => File.join(Dir.tmpdir, 'logstash2qingstor')
|
60
65
|
|
61
|
-
# Define tags to append to the file on the
|
66
|
+
# Define tags to append to the file on the QingStor bucket
|
62
67
|
config :tags, :validate => :array, :default => []
|
63
68
|
|
64
69
|
# Specify the content encoding. Supports ("gzip"), defaults to "none"
|
65
|
-
config :encoding, :validate => [
|
70
|
+
config :encoding, :validate => %w[gzip none], :default => 'none'
|
66
71
|
|
67
|
-
# Define the strategy to use to decide when we need to rotate the file
|
68
|
-
#
|
69
|
-
|
72
|
+
# Define the strategy to use to decide when we need to rotate the file
|
73
|
+
# and push it to QingStor.
|
74
|
+
# The default strategy is to check for both size and time, the first one to
|
75
|
+
# match will rotate the file.
|
76
|
+
config :rotation_strategy, :validate => %w[size_and_time size time],
|
77
|
+
:default => 'size_and_time'
|
70
78
|
|
71
|
-
# Define the size requirement for each file to upload to
|
79
|
+
# Define the size requirement for each file to upload to QingStor. In byte.
|
72
80
|
config :file_size, :validate => :number, :default => 1024 * 1024 * 5
|
73
81
|
|
74
|
-
# Define the time interval for each file to upload to
|
75
|
-
config :file_time, :validate => :number, :default => 15
|
82
|
+
# Define the time interval for each file to upload to QingStor. In minutes.
|
83
|
+
config :file_time, :validate => :number, :default => 15
|
76
84
|
|
77
|
-
# Specify maximum number of workers to use to upload the files to
|
78
|
-
config :upload_workers_count,
|
85
|
+
# Specify maximum number of workers to use to upload the files to QingStor
|
86
|
+
config :upload_workers_count,
|
87
|
+
:validate => :number,
|
88
|
+
:default => (Concurrent.processor_count * 0.5).ceil
|
79
89
|
|
80
90
|
# Number of items we can keep in the local queue before uploading them
|
81
|
-
config :upload_queue_size,
|
91
|
+
config :upload_queue_size,
|
92
|
+
:validate => :number,
|
93
|
+
:default => 2 * (Concurrent.processor_count * 0.25).ceil
|
82
94
|
|
83
95
|
# Specifies what type of encryption to use when SSE is enabled.
|
84
|
-
config :server_side_encryption_algorithm, :validate => [
|
96
|
+
config :server_side_encryption_algorithm, :validate => %w[AES256 none],
|
97
|
+
:default => 'none'
|
85
98
|
|
86
|
-
# Specifies the encryption customer key that would be used in server side
|
99
|
+
# Specifies the encryption customer key that would be used in server side
|
87
100
|
config :customer_key, :validate => :string
|
88
101
|
|
89
|
-
# Specifies if set to true, it would upload existing file in targeting folder
|
90
|
-
|
102
|
+
# Specifies if set to true, it would upload existing file in targeting folder
|
103
|
+
# when the plugin is launched
|
104
|
+
config :restore, :validate => :boolean, :default => false
|
91
105
|
|
92
|
-
public
|
93
106
|
def register
|
94
107
|
QingstorValidator.prefix_valid?(@prefix) unless @prefix.nil?
|
95
108
|
|
96
|
-
|
97
|
-
raise LogStash::ConfigurationError,
|
109
|
+
unless directory_valid?(@tmpdir)
|
110
|
+
raise LogStash::ConfigurationError,
|
111
|
+
"Logstash must have the permissions to write to: #{@tmpdir}"
|
98
112
|
end
|
99
|
-
|
113
|
+
|
100
114
|
@file_repository = FileRepository.new(@tags, @encoding, @tmpdir)
|
101
115
|
|
102
116
|
@rotation = RotationPolicy.new(@rotation_strategy, @file_size, @file_time)
|
103
117
|
|
104
|
-
executor = Concurrent::ThreadPoolExecutor.new(
|
118
|
+
executor = Concurrent::ThreadPoolExecutor.new(
|
105
119
|
:min_threads => 1,
|
106
120
|
:max_threads => @upload_workers_count,
|
107
121
|
:max_queue => @upload_queue_size,
|
108
|
-
:fallback_policy => :caller_runs
|
109
|
-
|
122
|
+
:fallback_policy => :caller_runs
|
123
|
+
)
|
110
124
|
|
111
|
-
@qs_bucket =
|
125
|
+
@qs_bucket = getbucket
|
112
126
|
QingstorValidator.bucket_valid?(@qs_bucket)
|
113
127
|
|
114
128
|
@uploader = Uploader.new(@qs_bucket, @logger, executor)
|
115
129
|
|
116
130
|
start_periodic_check if @rotation.needs_periodic?
|
117
131
|
|
118
|
-
restore_from_crash if @restore
|
132
|
+
restore_from_crash if @restore
|
119
133
|
end # def register
|
120
134
|
|
121
|
-
public
|
122
135
|
def multi_receive_encoded(events_and_encoded)
|
123
136
|
prefix_written_to = Set.new
|
124
137
|
|
@@ -127,18 +140,21 @@ class LogStash::Outputs::Qingstor < LogStash::Outputs::Base
|
|
127
140
|
prefix_written_to << prefix_key
|
128
141
|
|
129
142
|
begin
|
130
|
-
@file_repository.get_file(prefix_key)
|
143
|
+
@file_repository.get_file(prefix_key) do |f|
|
144
|
+
content = encoded.strip + "\n"
|
145
|
+
f.write(content)
|
146
|
+
end
|
131
147
|
rescue Errno::ENOSPC => e
|
132
|
-
@logger.error(
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
#
|
148
|
+
@logger.error('QingStor: Nospace left in temporary directory',
|
149
|
+
:tmpdir => @tmpdir)
|
150
|
+
raise e
|
151
|
+
end
|
152
|
+
end # end of each method
|
153
|
+
|
154
|
+
# check the file after file writing
|
155
|
+
# Groups IO calls to optimize fstat checks
|
139
156
|
rotate_if_needed(prefix_written_to)
|
140
|
-
end
|
141
|
-
|
157
|
+
end # def multi_receive_encoded
|
142
158
|
|
143
159
|
def rotate_if_needed(prefixs)
|
144
160
|
prefixs.each do |prefix|
|
@@ -146,103 +162,107 @@ class LogStash::Outputs::Qingstor < LogStash::Outputs::Base
|
|
146
162
|
tmp_file = factory.current
|
147
163
|
|
148
164
|
if @rotation.rotate?(tmp_file)
|
149
|
-
@logger.debug(
|
165
|
+
@logger.debug('Rotate file',
|
150
166
|
:strategy => tmp_file.key,
|
151
167
|
:path => tmp_file.path)
|
152
168
|
upload_file(tmp_file)
|
153
169
|
factory.rotate!
|
154
|
-
end
|
155
|
-
end
|
156
|
-
end
|
170
|
+
end
|
171
|
+
end
|
172
|
+
end
|
157
173
|
end # def rotate_if_needed
|
158
174
|
|
159
175
|
def upload_file(file)
|
160
|
-
@logger.debug(
|
161
|
-
file.close
|
162
|
-
@logger.debug(
|
176
|
+
@logger.debug('Add file to uploading queue', :key => file.key)
|
177
|
+
file.close
|
178
|
+
@logger.debug('upload options', :upload_options => upload_options)
|
163
179
|
@uploader.upload_async(file,
|
164
|
-
|
165
|
-
|
166
|
-
end
|
180
|
+
:on_complete => method(:clean_temporary_file),
|
181
|
+
:upload_options => upload_options)
|
182
|
+
end
|
167
183
|
|
168
|
-
def
|
184
|
+
def getbucket
|
169
185
|
@qs_config = QingStor::SDK::Config.init @access_key_id, @secret_access_key
|
170
|
-
@qs_config.update(
|
186
|
+
@qs_config.update(:host => @host, :port => @port) unless @host.nil?
|
171
187
|
@qs_service = QingStor::SDK::Service.new @qs_config
|
172
188
|
@qs_service.bucket @bucket, @region
|
173
|
-
end
|
189
|
+
end
|
174
190
|
|
175
|
-
def close
|
191
|
+
def close
|
176
192
|
stop_periodic_check if @rotation.needs_periodic?
|
177
193
|
|
178
|
-
@logger.debug(
|
194
|
+
@logger.debug('uploading current workspace')
|
179
195
|
@file_repository.each_files do |file|
|
180
196
|
upload_file(file)
|
181
|
-
end
|
197
|
+
end
|
182
198
|
|
183
199
|
@file_repository.shutdown
|
184
200
|
|
185
|
-
@uploader.stop
|
201
|
+
@uploader.stop
|
186
202
|
|
187
203
|
@crash_uploader.stop if @restore
|
188
|
-
end
|
204
|
+
end
|
189
205
|
|
190
206
|
def upload_options
|
191
207
|
options = {
|
192
|
-
:content_encoding => @encoding ==
|
208
|
+
:content_encoding => @encoding == 'gzip' ? 'gzip' : nil
|
193
209
|
}
|
194
210
|
|
195
|
-
if
|
196
|
-
options
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
options
|
203
|
-
end
|
211
|
+
if @server_side_encryption_algorithm == 'AES256' && !@customer_key.nil?
|
212
|
+
options[:server_side_encryption_algorithm] = @server_side_encryption_algorithm
|
213
|
+
options[:customer_key] = @customer_key
|
214
|
+
end
|
215
|
+
|
216
|
+
options
|
217
|
+
end
|
204
218
|
|
205
219
|
def clean_temporary_file(file)
|
206
|
-
@logger.debug(
|
220
|
+
@logger.debug('Removing temporary file', :file => file.path)
|
207
221
|
file.delete!
|
208
|
-
end
|
222
|
+
end
|
209
223
|
|
210
224
|
def start_periodic_check
|
211
|
-
@logger.debug(
|
225
|
+
@logger.debug('Start periodic rotation check')
|
212
226
|
|
213
|
-
@periodic_check = Concurrent::TimerTask.new(
|
214
|
-
|
227
|
+
@periodic_check = Concurrent::TimerTask.new(
|
228
|
+
:execution_interval => PERIODIC_CHECK_INTERVAL_IN_SECONDS
|
229
|
+
) do
|
230
|
+
@logger.debug('Periodic check for stale files')
|
215
231
|
|
216
232
|
rotate_if_needed(@file_repository.keys)
|
217
233
|
end
|
218
234
|
|
219
|
-
@periodic_check.execute
|
220
|
-
end
|
235
|
+
@periodic_check.execute
|
236
|
+
end
|
221
237
|
|
222
|
-
def stop_periodic_check
|
223
|
-
@periodic_check.shutdown
|
224
|
-
end
|
238
|
+
def stop_periodic_check
|
239
|
+
@periodic_check.shutdown
|
240
|
+
end
|
225
241
|
|
226
242
|
def directory_valid?(path)
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
end
|
233
|
-
end
|
243
|
+
FileUtils.mkdir_p(path) unless Dir.exist?(path)
|
244
|
+
::File.writable?(path)
|
245
|
+
rescue
|
246
|
+
false
|
247
|
+
end
|
234
248
|
|
235
|
-
def restore_from_crash
|
236
|
-
@crash_uploader = Uploader.new(@qs_bucket, @logger,
|
249
|
+
def restore_from_crash
|
250
|
+
@crash_uploader = Uploader.new(@qs_bucket, @logger,
|
251
|
+
CRASH_RECOVERY_THREADPOOL)
|
237
252
|
|
238
253
|
temp_folder_path = Pathname.new(@tmpdir)
|
239
|
-
Dir.glob(::File.join(@tmpdir,
|
240
|
-
|
241
|
-
|
242
|
-
temp_file = TemporaryFile.create_from_existing_file(file,
|
243
|
-
|
244
|
-
@
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
254
|
+
Dir.glob(::File.join(@tmpdir, '**/*'))
|
255
|
+
.select { |file| ::File.file?(file) }
|
256
|
+
.each do |file|
|
257
|
+
temp_file = TemporaryFile.create_from_existing_file(file,
|
258
|
+
temp_folder_path)
|
259
|
+
@logger.debug('Recoving from crash and uploading',
|
260
|
+
:file => temp_file.path)
|
261
|
+
@crash_uploader.upload_async(
|
262
|
+
temp_file,
|
263
|
+
:on_complete => method(:clean_temporary_file),
|
264
|
+
:upload_options => upload_options
|
265
|
+
)
|
266
|
+
end
|
267
|
+
end
|
268
|
+
end # class LogStash::Outputs::Qingstor
|
@@ -1,28 +1,28 @@
|
|
1
1
|
Gem::Specification.new do |s|
|
2
2
|
s.name = 'logstash-output-qingstor'
|
3
|
-
s.version = '0.
|
3
|
+
s.version = '0.2.0'
|
4
4
|
s.licenses = ['Apache License (2.0)']
|
5
5
|
s.summary = 'logstash output plugin for qingstor'
|
6
6
|
s.description = 'Collect the outputs of logstash and store into Qingstor'
|
7
|
-
s.homepage = 'https://github.com/
|
7
|
+
s.homepage = 'https://github.com/yunify/logstash-output-qingstor'
|
8
8
|
s.authors = ['Evan Zhao']
|
9
9
|
s.email = 'tacingiht@gmail.com'
|
10
10
|
s.require_paths = ['lib']
|
11
11
|
|
12
12
|
# Files
|
13
|
-
s.files = Dir['lib/**/*','spec/**/*','vendor/**/*','*.gemspec','*.md','CONTRIBUTORS','Gemfile','LICENSE','NOTICE.TXT']
|
14
|
-
|
13
|
+
s.files = Dir['lib/**/*', 'spec/**/*', 'vendor/**/*', '*.gemspec', '*.md', 'CONTRIBUTORS', 'Gemfile', 'LICENSE', 'NOTICE.TXT']
|
14
|
+
# Tests
|
15
15
|
s.test_files = s.files.grep(%r{^(test|spec|features)/})
|
16
16
|
|
17
17
|
# Special flag to let us know this is actually a logstash plugin
|
18
|
-
s.metadata = {
|
18
|
+
s.metadata = { 'logstash_plugin' => 'true', 'logstash_group' => 'output' }
|
19
19
|
|
20
20
|
# Gem dependencies
|
21
|
-
s.add_runtime_dependency
|
22
|
-
s.add_runtime_dependency
|
23
|
-
s.add_runtime_dependency
|
24
|
-
s.add_runtime_dependency
|
25
|
-
|
26
|
-
s.add_development_dependency
|
27
|
-
s.add_development_dependency
|
21
|
+
s.add_runtime_dependency 'logstash-core-plugin-api', '~> 2.0'
|
22
|
+
s.add_runtime_dependency 'logstash-codec-plain'
|
23
|
+
s.add_runtime_dependency 'qingstor-sdk', '>=1.9.2'
|
24
|
+
s.add_runtime_dependency 'concurrent-ruby'
|
25
|
+
|
26
|
+
s.add_development_dependency 'stud', '~> 0.0.22'
|
27
|
+
s.add_development_dependency 'logstash-devutils'
|
28
28
|
end
|
@@ -1,26 +1,28 @@
|
|
1
|
-
require
|
2
|
-
require
|
3
|
-
require
|
4
|
-
require
|
5
|
-
require
|
6
|
-
|
7
|
-
describe LogStash::Outputs::Qingstor::FileRepository do
|
8
|
-
let(:tags) { ["tag1", "tag2", "tag3"]}
|
9
|
-
let(:encoding) { "none" }
|
10
|
-
let(:tmpdir) { File.join(Dir.tmpdir, "lg2qs") }
|
11
|
-
let(:prefix) { "aprefix" }
|
1
|
+
require 'logstash/devutils/rspec/spec_helper'
|
2
|
+
require 'logstash/outputs/qingstor/temporary_file'
|
3
|
+
require 'logstash/outputs/qingstor/temporary_file_factory'
|
4
|
+
require 'logstash/outputs/qingstor/file_repository'
|
5
|
+
require 'tmpdir'
|
12
6
|
|
7
|
+
describe LogStash::Outputs::Qingstor::FileRepository do
|
13
8
|
subject { described_class.new(tags, encoding, tmpdir) }
|
14
9
|
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
10
|
+
let(:tags) { %w[tag1 tag2 tag3] }
|
11
|
+
let(:encoding) { 'none' }
|
12
|
+
let(:tmpdir) { File.join(Dir.tmpdir, 'lg2qs') }
|
13
|
+
let(:prefix) { 'aprefix' }
|
14
|
+
|
15
|
+
it 'can get current file io' do
|
16
|
+
subject.get_file(prefix) do |file|
|
17
|
+
expect(file).to be_kind_of(LogStash::Outputs::Qingstor::TemporaryFile)
|
18
|
+
end
|
19
|
+
end
|
20
20
|
|
21
|
-
it
|
22
|
-
subject.get_factory(prefix) do |factory|
|
23
|
-
expect(factory).to be_kind_of(
|
24
|
-
|
25
|
-
|
26
|
-
end
|
21
|
+
it 'can get current file factory' do
|
22
|
+
subject.get_factory(prefix) do |factory|
|
23
|
+
expect(factory).to be_kind_of(
|
24
|
+
LogStash::Outputs::Qingstor::TemporaryFileFactory
|
25
|
+
)
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
@@ -1,21 +1,23 @@
|
|
1
1
|
# encoding: utf-8
|
2
|
-
|
3
|
-
require
|
4
|
-
|
2
|
+
|
3
|
+
require 'logstash/devutils/rspec/spec_helper'
|
4
|
+
require 'logstash/outputs/qingstor/qingstor_validator'
|
5
|
+
require_relative '../qs_access_helper'
|
5
6
|
|
6
7
|
describe LogStash::Outputs::Qingstor::QingstorValidator do
|
7
|
-
let(:normal_prefix) {
|
8
|
-
let(:wrong_prefix1) {
|
8
|
+
let(:normal_prefix) { 'super/bucket' }
|
9
|
+
let(:wrong_prefix1) { '/wrong/prefix' }
|
9
10
|
let(:wrong_prefix2) { normal_prefix * 100 }
|
10
11
|
let(:bucket) { qs_bucket_init }
|
11
12
|
|
12
|
-
it
|
13
|
-
expect{ described_class.prefix_valid?(wrong_prefix1) }
|
14
|
-
|
13
|
+
it 'raise error if the prefix is not valid' do
|
14
|
+
expect { described_class.prefix_valid?(wrong_prefix1) }
|
15
|
+
.to raise_error(LogStash::ConfigurationError)
|
16
|
+
expect { described_class.prefix_valid?(wrong_prefix2) }
|
17
|
+
.to raise_error(LogStash::ConfigurationError)
|
15
18
|
end
|
16
19
|
|
17
|
-
it
|
20
|
+
it 'return true if the prefix is valid' do
|
18
21
|
expect(described_class.prefix_valid?(normal_prefix)).to be_truthy
|
19
|
-
end
|
20
|
-
|
21
|
-
end
|
22
|
+
end
|
23
|
+
end
|