backup 3.6.0 → 3.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +2 -0
- data/lib/backup.rb +14 -4
- data/lib/backup/archive.rb +3 -2
- data/lib/backup/cleaner.rb +4 -2
- data/lib/backup/cli.rb +7 -5
- data/lib/backup/cloud_io/base.rb +41 -0
- data/lib/backup/cloud_io/cloud_files.rb +296 -0
- data/lib/backup/cloud_io/s3.rb +252 -0
- data/lib/backup/compressor/gzip.rb +2 -1
- data/lib/backup/config.rb +13 -5
- data/lib/backup/configuration.rb +1 -1
- data/lib/backup/configuration/helpers.rb +3 -1
- data/lib/backup/database/base.rb +3 -1
- data/lib/backup/database/mongodb.rb +2 -2
- data/lib/backup/database/mysql.rb +2 -2
- data/lib/backup/database/postgresql.rb +12 -2
- data/lib/backup/database/redis.rb +3 -2
- data/lib/backup/encryptor/gpg.rb +8 -10
- data/lib/backup/errors.rb +39 -70
- data/lib/backup/logger.rb +7 -2
- data/lib/backup/logger/fog_adapter.rb +30 -0
- data/lib/backup/model.rb +32 -14
- data/lib/backup/notifier/base.rb +4 -3
- data/lib/backup/notifier/campfire.rb +0 -1
- data/lib/backup/notifier/http_post.rb +122 -0
- data/lib/backup/notifier/mail.rb +38 -0
- data/lib/backup/notifier/nagios.rb +69 -0
- data/lib/backup/notifier/prowl.rb +0 -1
- data/lib/backup/notifier/pushover.rb +0 -1
- data/lib/backup/package.rb +5 -0
- data/lib/backup/packager.rb +3 -2
- data/lib/backup/pipeline.rb +4 -2
- data/lib/backup/storage/base.rb +2 -1
- data/lib/backup/storage/cloud_files.rb +151 -0
- data/lib/backup/storage/cycler.rb +4 -2
- data/lib/backup/storage/dropbox.rb +20 -16
- data/lib/backup/storage/ftp.rb +1 -2
- data/lib/backup/storage/local.rb +3 -3
- data/lib/backup/storage/ninefold.rb +3 -4
- data/lib/backup/storage/rsync.rb +1 -2
- data/lib/backup/storage/s3.rb +49 -158
- data/lib/backup/storage/scp.rb +3 -4
- data/lib/backup/storage/sftp.rb +1 -2
- data/lib/backup/syncer/base.rb +0 -1
- data/lib/backup/syncer/cloud/base.rb +129 -208
- data/lib/backup/syncer/cloud/cloud_files.rb +56 -41
- data/lib/backup/syncer/cloud/local_file.rb +93 -0
- data/lib/backup/syncer/cloud/s3.rb +78 -31
- data/lib/backup/syncer/rsync/base.rb +7 -0
- data/lib/backup/syncer/rsync/local.rb +0 -5
- data/lib/backup/syncer/rsync/push.rb +1 -2
- data/lib/backup/utilities.rb +18 -15
- data/lib/backup/version.rb +1 -1
- data/templates/cli/notifier/http_post +35 -0
- data/templates/cli/notifier/nagios +13 -0
- data/templates/cli/storage/cloud_files +8 -17
- data/templates/cli/storage/s3 +3 -10
- data/templates/cli/syncer/cloud_files +3 -31
- data/templates/cli/syncer/s3 +3 -27
- data/templates/notifier/mail/failure.erb +6 -1
- data/templates/notifier/mail/success.erb +6 -1
- data/templates/notifier/mail/warning.erb +6 -1
- metadata +37 -42
- data/lib/backup/storage/cloudfiles.rb +0 -68
data/lib/backup/storage/scp.rb
CHANGED
@@ -4,6 +4,7 @@ require 'net/scp'
|
|
4
4
|
module Backup
|
5
5
|
module Storage
|
6
6
|
class SCP < Base
|
7
|
+
class Error < Backup::Error; end
|
7
8
|
|
8
9
|
##
|
9
10
|
# Server credentials
|
@@ -13,9 +14,8 @@ module Backup
|
|
13
14
|
# Server IP Address and SCP port
|
14
15
|
attr_accessor :ip, :port
|
15
16
|
|
16
|
-
def initialize(model, storage_id = nil
|
17
|
+
def initialize(model, storage_id = nil)
|
17
18
|
super
|
18
|
-
instance_eval(&block) if block_given?
|
19
19
|
|
20
20
|
@port ||= 22
|
21
21
|
@path ||= 'backups'
|
@@ -55,8 +55,7 @@ module Backup
|
|
55
55
|
end
|
56
56
|
end
|
57
57
|
unless errors.empty?
|
58
|
-
raise
|
59
|
-
"Net::SSH reported the following errors:\n" +
|
58
|
+
raise Error, "Net::SSH reported the following errors:\n" +
|
60
59
|
errors.join("\n")
|
61
60
|
end
|
62
61
|
end
|
data/lib/backup/storage/sftp.rb
CHANGED
@@ -13,9 +13,8 @@ module Backup
|
|
13
13
|
# Server IP Address and SFTP port
|
14
14
|
attr_accessor :ip, :port
|
15
15
|
|
16
|
-
def initialize(model, storage_id = nil
|
16
|
+
def initialize(model, storage_id = nil)
|
17
17
|
super
|
18
|
-
instance_eval(&block) if block_given?
|
19
18
|
|
20
19
|
@port ||= 22
|
21
20
|
@path ||= 'backups'
|
data/lib/backup/syncer/base.rb
CHANGED
@@ -1,257 +1,178 @@
|
|
1
1
|
# encoding: utf-8
|
2
2
|
|
3
|
-
##
|
4
|
-
# Only load the Fog gem, along with the Parallel gem, when the
|
5
|
-
# Backup::Syncer::Cloud class is loaded
|
6
|
-
# Backup::Dependency.load('fog')
|
7
|
-
# Backup::Dependency.load('parallel')
|
8
|
-
require 'fog'
|
9
|
-
require 'parallel'
|
10
|
-
|
11
3
|
module Backup
|
12
4
|
module Syncer
|
13
5
|
module Cloud
|
14
|
-
class
|
6
|
+
class Error < Backup::Error; end
|
15
7
|
|
16
|
-
|
17
|
-
# Create a Mutex to synchronize certain parts of the code
|
18
|
-
# in order to prevent race conditions or broken STDOUT.
|
8
|
+
class Base < Syncer::Base
|
19
9
|
MUTEX = Mutex.new
|
20
10
|
|
21
11
|
##
|
22
|
-
#
|
23
|
-
#
|
24
|
-
#
|
25
|
-
attr_accessor :
|
12
|
+
# Number of threads to use for concurrency.
|
13
|
+
#
|
14
|
+
# Default: 0 (no concurrency)
|
15
|
+
attr_accessor :thread_count
|
26
16
|
|
27
17
|
##
|
28
|
-
#
|
29
|
-
#
|
30
|
-
|
18
|
+
# Number of times to retry failed operations.
|
19
|
+
#
|
20
|
+
# Default: 10
|
21
|
+
attr_accessor :max_retries
|
31
22
|
|
32
23
|
##
|
33
|
-
#
|
34
|
-
# the Cloud::S3 or Cloud::CloudFiles Syncer.
|
24
|
+
# Time in seconds to pause before each retry.
|
35
25
|
#
|
36
|
-
#
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
#
|
41
|
-
# If not specified in the pre-configured defaults,
|
42
|
-
# the Cloud specific defaults are set here before evaluating
|
43
|
-
# any block provided in the user's configuration file.
|
44
|
-
def initialize(syncer_id = nil)
|
26
|
+
# Default: 30
|
27
|
+
attr_accessor :retry_waitsec
|
28
|
+
|
29
|
+
def initialize(syncer_id = nil, &block)
|
45
30
|
super
|
31
|
+
instance_eval(&block) if block_given?
|
32
|
+
|
33
|
+
@thread_count ||= 0
|
34
|
+
@max_retries ||= 10
|
35
|
+
@retry_waitsec ||= 30
|
46
36
|
|
47
|
-
@path
|
48
|
-
@
|
49
|
-
@concurrency_level ||= 2
|
37
|
+
@path ||= 'backups'
|
38
|
+
@path = path.sub(/^\//, '')
|
50
39
|
end
|
51
40
|
|
52
|
-
##
|
53
|
-
# Performs the Sync operation
|
54
41
|
def perform!
|
55
42
|
log!(:started)
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
43
|
+
@transfer_count = 0
|
44
|
+
@unchanged_count = 0
|
45
|
+
@skipped_count = 0
|
46
|
+
@orphans = thread_count > 0 ? Queue.new : []
|
47
|
+
|
48
|
+
directories.each {|dir| sync_directory(dir) }
|
49
|
+
orphans_result = process_orphans
|
50
|
+
|
51
|
+
Logger.info "\nSummary:"
|
52
|
+
Logger.info "\s\sTransferred Files: #{ @transfer_count }"
|
53
|
+
Logger.info "\s\s#{ orphans_result }"
|
54
|
+
Logger.info "\s\sUnchanged Files: #{ @unchanged_count }"
|
55
|
+
Logger.warn "\s\sSkipped Files: #{ @skipped_count }" if @skipped_count > 0
|
66
56
|
log!(:finished)
|
67
57
|
end
|
68
58
|
|
69
59
|
private
|
70
60
|
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
61
|
+
def sync_directory(dir)
|
62
|
+
remote_base = File.join(path, File.basename(dir))
|
63
|
+
Logger.info "Gathering remote data for '#{ remote_base }'..."
|
64
|
+
remote_files = get_remote_files(remote_base)
|
65
|
+
|
66
|
+
Logger.info("Gathering local data for '#{ File.expand_path(dir) }'...")
|
67
|
+
local_files = LocalFile.find(dir)
|
68
|
+
|
69
|
+
relative_paths = (local_files.keys | remote_files.keys).sort
|
70
|
+
if relative_paths.empty?
|
71
|
+
Logger.info 'No local or remote files found'
|
72
|
+
else
|
73
|
+
Logger.info 'Syncing...'
|
74
|
+
sync_block = Proc.new do |relative_path|
|
75
|
+
local_file = local_files[relative_path]
|
76
|
+
remote_md5 = remote_files[relative_path]
|
77
|
+
remote_path = File.join(remote_base, relative_path)
|
78
|
+
sync_file(local_file, remote_path, remote_md5)
|
79
|
+
end
|
89
80
|
|
90
|
-
|
91
|
-
|
92
|
-
all_file_names.each(&block)
|
93
|
-
when :threads
|
94
|
-
Parallel.each all_file_names,
|
95
|
-
:in_threads => concurrency_level, &block
|
96
|
-
when :processes
|
97
|
-
Parallel.each all_file_names,
|
98
|
-
:in_processes => concurrency_level, &block
|
81
|
+
if thread_count > 0
|
82
|
+
sync_in_threads(relative_paths, sync_block)
|
99
83
|
else
|
100
|
-
|
101
|
-
"Unknown concurrency_type setting: #{ concurrency_type.inspect }"
|
84
|
+
relative_paths.each(&sync_block)
|
102
85
|
end
|
103
86
|
end
|
87
|
+
end
|
104
88
|
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
##
|
116
|
-
# Returns a Hash of local files, validated to ensure the path
|
117
|
-
# does not contain invalid UTF-8 byte sequences.
|
118
|
-
# The keys are the filesystem paths, relative to @directory.
|
119
|
-
# The values are the LocalFile objects for that given file.
|
120
|
-
def local_files
|
121
|
-
@local_files ||= begin
|
122
|
-
hash = {}
|
123
|
-
local_hashes.lines.map do |line|
|
124
|
-
LocalFile.new(@directory, line)
|
125
|
-
end.compact.each do |file|
|
126
|
-
hash.merge!(file.relative_path => file)
|
89
|
+
def sync_in_threads(relative_paths, sync_block)
|
90
|
+
queue = Queue.new
|
91
|
+
queue << relative_paths.shift until relative_paths.empty?
|
92
|
+
num_threads = [thread_count, queue.size].min
|
93
|
+
Logger.info "\s\sUsing #{ num_threads } Threads"
|
94
|
+
threads = num_threads.times.map do
|
95
|
+
Thread.new do
|
96
|
+
loop do
|
97
|
+
path = queue.shift(true) rescue nil
|
98
|
+
path ? sync_block.call(path) : break
|
127
99
|
end
|
128
|
-
hash
|
129
100
|
end
|
130
101
|
end
|
131
102
|
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
Logger.info("\s\sGenerating checksums for '#{ @directory }'")
|
139
|
-
cmd = "#{ utility(:find) } -L '#{ @directory }' -type f -print0 | " +
|
140
|
-
"#{ utility(:xargs) } -0 #{ utility(:openssl) } md5 2> /dev/null"
|
141
|
-
%x[#{ cmd }]
|
142
|
-
end
|
143
|
-
|
144
|
-
##
|
145
|
-
# Returns a Hash of remote files
|
146
|
-
# The keys are the remote paths, relative to @remote_base
|
147
|
-
# The values are the Fog file objects for that given file
|
148
|
-
def remote_files
|
149
|
-
@remote_files ||= begin
|
150
|
-
hash = {}
|
151
|
-
@bucket.files.all(:prefix => @remote_base).each do |file|
|
152
|
-
hash.merge!(file.key.sub("#{ @remote_base }/", '') => file)
|
153
|
-
end
|
154
|
-
hash
|
103
|
+
# abort if any thread raises an exception
|
104
|
+
while threads.any?(&:alive?)
|
105
|
+
if threads.any? {|thr| thr.status.nil? }
|
106
|
+
threads.each(&:kill)
|
107
|
+
Thread.pass while threads.any?(&:alive?)
|
108
|
+
break
|
155
109
|
end
|
110
|
+
sleep num_threads * 0.1
|
156
111
|
end
|
112
|
+
threads.each(&:join)
|
113
|
+
end
|
157
114
|
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
MUTEX.synchronize {
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
else
|
182
|
-
MUTEX.synchronize {
|
183
|
-
Logger.info("\s\s[skipping] '#{ remote_path }'")
|
184
|
-
}
|
185
|
-
end
|
186
|
-
elsif remote_file
|
187
|
-
if mirror
|
188
|
-
MUTEX.synchronize {
|
189
|
-
Logger.info("\s\s[removing] '#{ remote_path }'")
|
190
|
-
}
|
191
|
-
remote_file.destroy
|
192
|
-
else
|
193
|
-
MUTEX.synchronize {
|
194
|
-
Logger.info("\s\s[leaving] '#{ remote_path }'")
|
195
|
-
}
|
115
|
+
# If an exception is raised in multiple threads, only the exception
|
116
|
+
# raised in the first thread that Thread#join is called on will be
|
117
|
+
# handled. So all exceptions are logged first with their details,
|
118
|
+
# then a generic exception is raised.
|
119
|
+
def sync_file(local_file, remote_path, remote_md5)
|
120
|
+
if local_file && File.exist?(local_file.path)
|
121
|
+
if local_file.md5 == remote_md5
|
122
|
+
MUTEX.synchronize { @unchanged_count += 1 }
|
123
|
+
else
|
124
|
+
Logger.info("\s\s[transferring] '#{ remote_path }'")
|
125
|
+
begin
|
126
|
+
cloud_io.upload(local_file.path, remote_path)
|
127
|
+
MUTEX.synchronize { @transfer_count += 1 }
|
128
|
+
rescue CloudIO::FileSizeError => err
|
129
|
+
MUTEX.synchronize { @skipped_count += 1 }
|
130
|
+
Logger.warn Error.wrap(err, "Skipping '#{ remote_path }'")
|
131
|
+
rescue => err
|
132
|
+
Logger.error(err)
|
133
|
+
raise Error, <<-EOS
|
134
|
+
Syncer Failed!
|
135
|
+
See the Retry [info] and [error] messages (if any)
|
136
|
+
for details on each failed operation.
|
137
|
+
EOS
|
196
138
|
end
|
197
139
|
end
|
140
|
+
elsif remote_md5
|
141
|
+
@orphans << remote_path
|
198
142
|
end
|
199
|
-
end
|
200
|
-
|
201
|
-
class LocalFile
|
202
|
-
attr_reader :path, :relative_path, :md5
|
203
|
-
|
204
|
-
##
|
205
|
-
# Return a new LocalFile object if it's valid.
|
206
|
-
# Otherwise, log a warning and return nil.
|
207
|
-
def self.new(*args)
|
208
|
-
local_file = super(*args)
|
209
|
-
if local_file.invalid?
|
210
|
-
Logger.warn(
|
211
|
-
"\s\s[skipping] #{ local_file.path }\n" +
|
212
|
-
"\s\sPath Contains Invalid UTF-8 byte sequences"
|
213
|
-
)
|
214
|
-
return nil
|
215
|
-
end
|
216
|
-
local_file
|
217
|
-
end
|
143
|
+
end
|
218
144
|
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
# relative_path and md5 hash for the file.
|
223
|
-
def initialize(directory, line)
|
224
|
-
@invalid = false
|
225
|
-
@directory = sanitize(directory)
|
226
|
-
line = sanitize(line).chomp
|
227
|
-
@path = line.slice(4..-36)
|
228
|
-
@md5 = line.slice(-32..-1)
|
229
|
-
@relative_path = @path.sub(@directory + '/', '')
|
145
|
+
def process_orphans
|
146
|
+
if @orphans.empty?
|
147
|
+
return mirror ? 'Deleted Files: 0' : 'Orphaned Files: 0'
|
230
148
|
end
|
231
149
|
|
232
|
-
|
233
|
-
@
|
150
|
+
if @orphans.is_a?(Queue)
|
151
|
+
@orphans = @orphans.size.times.map { @orphans.shift }
|
234
152
|
end
|
235
153
|
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
154
|
+
if mirror
|
155
|
+
Logger.info @orphans.map {|path|
|
156
|
+
"\s\s[removing] '#{ path }'"
|
157
|
+
}.join("\n")
|
158
|
+
|
159
|
+
begin
|
160
|
+
cloud_io.delete(@orphans)
|
161
|
+
"Deleted Files: #{ @orphans.count }"
|
162
|
+
rescue => err
|
163
|
+
Logger.warn Error.wrap(err, 'Delete Operation Failed')
|
164
|
+
"Attempted to Delete: #{ @orphans.count } " +
|
165
|
+
"(See log messages for actual results)"
|
166
|
+
end
|
167
|
+
else
|
168
|
+
Logger.info @orphans.map {|path|
|
169
|
+
"\s\s[orphaned] '#{ path }'"
|
170
|
+
}.join("\n")
|
171
|
+
"Orphaned Files: #{ @orphans.count }"
|
250
172
|
end
|
173
|
+
end
|
251
174
|
|
252
|
-
|
253
|
-
|
254
|
-
end # class Base < Syncer::Base
|
255
|
-
end # module Cloud
|
175
|
+
end
|
176
|
+
end
|
256
177
|
end
|
257
178
|
end
|
@@ -1,77 +1,92 @@
|
|
1
1
|
# encoding: utf-8
|
2
|
+
require 'backup/cloud_io/cloud_files'
|
2
3
|
|
3
4
|
module Backup
|
4
5
|
module Syncer
|
5
6
|
module Cloud
|
6
7
|
class CloudFiles < Base
|
8
|
+
class Error < Backup::Error; end
|
7
9
|
|
8
10
|
##
|
9
11
|
# Rackspace CloudFiles Credentials
|
10
|
-
attr_accessor :
|
12
|
+
attr_accessor :username, :api_key
|
11
13
|
|
12
14
|
##
|
13
15
|
# Rackspace CloudFiles Container
|
14
16
|
attr_accessor :container
|
15
17
|
|
16
18
|
##
|
17
|
-
# Rackspace AuthURL
|
18
|
-
# to a different Rackspace datacenter
|
19
|
-
# - https://auth.api.rackspacecloud.com (Default: US)
|
20
|
-
# - https://lon.auth.api.rackspacecloud.com (UK)
|
19
|
+
# Rackspace AuthURL (optional)
|
21
20
|
attr_accessor :auth_url
|
22
21
|
|
23
22
|
##
|
24
|
-
#
|
25
|
-
|
26
|
-
# This only works if Backup runs on a Rackspace server
|
27
|
-
attr_accessor :servicenet
|
23
|
+
# Rackspace Region (optional)
|
24
|
+
attr_accessor :region
|
28
25
|
|
29
26
|
##
|
30
|
-
#
|
31
|
-
#
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
# which in turn will invoke Syncer::Base.
|
36
|
-
#
|
37
|
-
# Once pre-configured defaults and Cloud specific defaults are set,
|
38
|
-
# the block from the user's configuration file is evaluated.
|
39
|
-
def initialize(syncer_id = nil, &block)
|
27
|
+
# Rackspace Service Net
|
28
|
+
# (LAN-based transfers to avoid charges and improve performance)
|
29
|
+
attr_accessor :servicenet
|
30
|
+
|
31
|
+
def initialize(syncer_id = nil)
|
40
32
|
super
|
41
33
|
|
42
|
-
|
43
|
-
|
34
|
+
@servicenet ||= false
|
35
|
+
|
36
|
+
check_configuration
|
44
37
|
end
|
45
38
|
|
46
39
|
private
|
47
40
|
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
:
|
53
|
-
:
|
54
|
-
:
|
55
|
-
:
|
56
|
-
:
|
41
|
+
def cloud_io
|
42
|
+
@cloud_io ||= CloudIO::CloudFiles.new(
|
43
|
+
:username => username,
|
44
|
+
:api_key => api_key,
|
45
|
+
:auth_url => auth_url,
|
46
|
+
:region => region,
|
47
|
+
:servicenet => servicenet,
|
48
|
+
:container => container,
|
49
|
+
:max_retries => max_retries,
|
50
|
+
:retry_waitsec => retry_waitsec,
|
51
|
+
# Syncer can not use SLOs.
|
52
|
+
:segments_container => nil,
|
53
|
+
:segment_size => 0
|
57
54
|
)
|
58
55
|
end
|
59
56
|
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
57
|
+
def get_remote_files(remote_base)
|
58
|
+
hash = {}
|
59
|
+
cloud_io.objects(remote_base).each do |object|
|
60
|
+
relative_path = object.name.sub(remote_base + '/', '')
|
61
|
+
hash[relative_path] = object.hash
|
62
|
+
end
|
63
|
+
hash
|
67
64
|
end
|
68
65
|
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
66
|
+
def check_configuration
|
67
|
+
required = %w{ username api_key container }
|
68
|
+
raise Error, <<-EOS if required.map {|name| send(name) }.any?(&:nil?)
|
69
|
+
Configuration Error
|
70
|
+
#{ required.map {|name| "##{ name }"}.join(', ') } are all required
|
71
|
+
EOS
|
73
72
|
end
|
74
73
|
|
74
|
+
attr_deprecate :concurrency_type, :version => '3.7.0',
|
75
|
+
:message => 'Use #thread_count instead.',
|
76
|
+
:action => lambda {|klass, val|
|
77
|
+
if val == :threads
|
78
|
+
klass.thread_count = 2 unless klass.thread_count
|
79
|
+
else
|
80
|
+
klass.thread_count = 0
|
81
|
+
end
|
82
|
+
}
|
83
|
+
|
84
|
+
attr_deprecate :concurrency_level, :version => '3.7.0',
|
85
|
+
:message => 'Use #thread_count instead.',
|
86
|
+
:action => lambda {|klass, val|
|
87
|
+
klass.thread_count = val unless klass.thread_count == 0
|
88
|
+
}
|
89
|
+
|
75
90
|
end # class Cloudfiles < Base
|
76
91
|
end # module Cloud
|
77
92
|
end
|