backup 4.4.1 → 5.0.0.beta.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +5 -5
- data/LICENSE +19 -0
- data/README.md +1 -1
- data/lib/backup.rb +74 -78
- data/lib/backup/archive.rb +31 -32
- data/lib/backup/binder.rb +2 -6
- data/lib/backup/cleaner.rb +14 -18
- data/lib/backup/cli.rb +104 -108
- data/lib/backup/cloud_io/base.rb +4 -7
- data/lib/backup/cloud_io/cloud_files.rb +60 -62
- data/lib/backup/cloud_io/s3.rb +69 -76
- data/lib/backup/compressor/base.rb +4 -7
- data/lib/backup/compressor/bzip2.rb +3 -7
- data/lib/backup/compressor/custom.rb +2 -6
- data/lib/backup/compressor/gzip.rb +16 -17
- data/lib/backup/config.rb +17 -18
- data/lib/backup/config/dsl.rb +16 -17
- data/lib/backup/config/helpers.rb +10 -16
- data/lib/backup/database/base.rb +22 -21
- data/lib/backup/database/mongodb.rb +36 -37
- data/lib/backup/database/mysql.rb +40 -41
- data/lib/backup/database/openldap.rb +8 -10
- data/lib/backup/database/postgresql.rb +29 -30
- data/lib/backup/database/redis.rb +27 -30
- data/lib/backup/database/riak.rb +15 -18
- data/lib/backup/database/sqlite.rb +4 -6
- data/lib/backup/encryptor/base.rb +2 -4
- data/lib/backup/encryptor/gpg.rb +49 -59
- data/lib/backup/encryptor/open_ssl.rb +11 -14
- data/lib/backup/errors.rb +7 -12
- data/lib/backup/logger.rb +16 -18
- data/lib/backup/logger/console.rb +5 -8
- data/lib/backup/logger/fog_adapter.rb +2 -6
- data/lib/backup/logger/logfile.rb +10 -12
- data/lib/backup/logger/syslog.rb +2 -4
- data/lib/backup/model.rb +75 -40
- data/lib/backup/notifier/base.rb +24 -26
- data/lib/backup/notifier/campfire.rb +9 -11
- data/lib/backup/notifier/command.rb +0 -3
- data/lib/backup/notifier/datadog.rb +9 -12
- data/lib/backup/notifier/flowdock.rb +13 -17
- data/lib/backup/notifier/hipchat.rb +11 -13
- data/lib/backup/notifier/http_post.rb +11 -14
- data/lib/backup/notifier/mail.rb +44 -47
- data/lib/backup/notifier/nagios.rb +5 -9
- data/lib/backup/notifier/pagerduty.rb +10 -12
- data/lib/backup/notifier/prowl.rb +15 -15
- data/lib/backup/notifier/pushover.rb +7 -10
- data/lib/backup/notifier/ses.rb +34 -16
- data/lib/backup/notifier/slack.rb +39 -40
- data/lib/backup/notifier/twitter.rb +2 -5
- data/lib/backup/notifier/zabbix.rb +11 -14
- data/lib/backup/package.rb +5 -9
- data/lib/backup/packager.rb +16 -17
- data/lib/backup/pipeline.rb +17 -21
- data/lib/backup/splitter.rb +8 -11
- data/lib/backup/storage/base.rb +5 -8
- data/lib/backup/storage/cloud_files.rb +21 -23
- data/lib/backup/storage/cycler.rb +10 -15
- data/lib/backup/storage/dropbox.rb +15 -21
- data/lib/backup/storage/ftp.rb +8 -10
- data/lib/backup/storage/local.rb +5 -8
- data/lib/backup/storage/qiniu.rb +8 -8
- data/lib/backup/storage/rsync.rb +24 -26
- data/lib/backup/storage/s3.rb +27 -28
- data/lib/backup/storage/scp.rb +10 -12
- data/lib/backup/storage/sftp.rb +10 -12
- data/lib/backup/syncer/base.rb +5 -8
- data/lib/backup/syncer/cloud/base.rb +27 -30
- data/lib/backup/syncer/cloud/cloud_files.rb +16 -18
- data/lib/backup/syncer/cloud/local_file.rb +5 -8
- data/lib/backup/syncer/cloud/s3.rb +23 -24
- data/lib/backup/syncer/rsync/base.rb +6 -10
- data/lib/backup/syncer/rsync/local.rb +1 -5
- data/lib/backup/syncer/rsync/pull.rb +6 -10
- data/lib/backup/syncer/rsync/push.rb +18 -22
- data/lib/backup/template.rb +9 -14
- data/lib/backup/utilities.rb +82 -69
- data/lib/backup/version.rb +1 -3
- metadata +100 -660
data/lib/backup/cloud_io/s3.rb
CHANGED
@@ -1,9 +1,8 @@
|
|
1
|
-
|
2
|
-
require
|
3
|
-
require
|
4
|
-
require
|
5
|
-
require
|
6
|
-
require 'stringio'
|
1
|
+
require "backup/cloud_io/base"
|
2
|
+
require "fog"
|
3
|
+
require "digest/md5"
|
4
|
+
require "base64"
|
5
|
+
require "stringio"
|
7
6
|
|
8
7
|
module Backup
|
9
8
|
module CloudIO
|
@@ -14,8 +13,8 @@ module Backup
|
|
14
13
|
MAX_MULTIPART_SIZE = 1024**4 * 5 # 5 TiB
|
15
14
|
|
16
15
|
attr_reader :access_key_id, :secret_access_key, :use_iam_profile,
|
17
|
-
|
18
|
-
|
16
|
+
:region, :bucket, :chunk_size, :encryption, :storage_class,
|
17
|
+
:fog_options
|
19
18
|
|
20
19
|
def initialize(options = {})
|
21
20
|
super
|
@@ -39,9 +38,9 @@ module Backup
|
|
39
38
|
if chunk_bytes > 0 && file_size > chunk_bytes
|
40
39
|
raise FileSizeError, <<-EOS if file_size > MAX_MULTIPART_SIZE
|
41
40
|
File Too Large
|
42
|
-
File: #{
|
43
|
-
Size: #{
|
44
|
-
Max Multipart Upload Size is #{
|
41
|
+
File: #{src}
|
42
|
+
Size: #{file_size}
|
43
|
+
Max Multipart Upload Size is #{MAX_MULTIPART_SIZE} (5 TiB)
|
45
44
|
EOS
|
46
45
|
|
47
46
|
chunk_bytes = adjusted_chunk_bytes(chunk_bytes, file_size)
|
@@ -51,9 +50,9 @@ module Backup
|
|
51
50
|
else
|
52
51
|
raise FileSizeError, <<-EOS if file_size > MAX_FILE_SIZE
|
53
52
|
File Too Large
|
54
|
-
File: #{
|
55
|
-
Size: #{
|
56
|
-
Max File Size is #{
|
53
|
+
File: #{src}
|
54
|
+
Size: #{file_size}
|
55
|
+
Max File Size is #{MAX_FILE_SIZE} (5 GiB)
|
57
56
|
EOS
|
58
57
|
|
59
58
|
put_object(src, dest)
|
@@ -68,15 +67,15 @@ module Backup
|
|
68
67
|
def objects(prefix)
|
69
68
|
objects = []
|
70
69
|
resp = nil
|
71
|
-
prefix = prefix.chomp(
|
72
|
-
opts = {
|
70
|
+
prefix = prefix.chomp("/")
|
71
|
+
opts = { "prefix" => prefix + "/" }
|
73
72
|
|
74
|
-
while resp.nil? || resp.body[
|
75
|
-
opts
|
76
|
-
with_retries("GET '#{
|
73
|
+
while resp.nil? || resp.body["IsTruncated"]
|
74
|
+
opts["marker"] = objects.last.key unless objects.empty?
|
75
|
+
with_retries("GET '#{bucket}/#{prefix}/*'") do
|
77
76
|
resp = connection.get_bucket(bucket, opts)
|
78
77
|
end
|
79
|
-
resp.body[
|
78
|
+
resp.body["Contents"].each do |obj_data|
|
80
79
|
objects << Object.new(self, obj_data)
|
81
80
|
end
|
82
81
|
end
|
@@ -87,7 +86,7 @@ module Backup
|
|
87
86
|
# Used by Object to fetch metadata if needed.
|
88
87
|
def head_object(object)
|
89
88
|
resp = nil
|
90
|
-
with_retries("HEAD '#{
|
89
|
+
with_retries("HEAD '#{bucket}/#{object.key}'") do
|
91
90
|
resp = connection.head_object(bucket, object.key)
|
92
91
|
end
|
93
92
|
resp
|
@@ -102,18 +101,18 @@ module Backup
|
|
102
101
|
keys = Array(objects_or_keys).dup
|
103
102
|
keys.map!(&:key) if keys.first.is_a?(Object)
|
104
103
|
|
105
|
-
opts = { :
|
104
|
+
opts = { quiet: true } # only report Errors in DeleteResult
|
106
105
|
until keys.empty?
|
107
|
-
|
108
|
-
with_retries(
|
109
|
-
resp = connection.delete_multiple_objects(bucket,
|
110
|
-
unless resp.body[
|
111
|
-
errors = resp.body[
|
112
|
-
error = result[
|
113
|
-
"Failed to delete: #{
|
114
|
-
|
106
|
+
keys_partial = keys.slice!(0, 1000)
|
107
|
+
with_retries("DELETE Multiple Objects") do
|
108
|
+
resp = connection.delete_multiple_objects(bucket, keys_partial, opts.dup)
|
109
|
+
unless resp.body["DeleteResult"].empty?
|
110
|
+
errors = resp.body["DeleteResult"].map do |result|
|
111
|
+
error = result["Error"]
|
112
|
+
"Failed to delete: #{error["Key"]}\n" \
|
113
|
+
"Reason: #{error["Code"]}: #{error["Message"]}"
|
115
114
|
end.join("\n")
|
116
|
-
raise Error, "The server returned the following:\n#{
|
115
|
+
raise Error, "The server returned the following:\n#{errors}"
|
117
116
|
end
|
118
117
|
end
|
119
118
|
end
|
@@ -122,41 +121,40 @@ module Backup
|
|
122
121
|
private
|
123
122
|
|
124
123
|
def connection
|
125
|
-
@connection ||=
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
:aws_access_key_id
|
132
|
-
:aws_secret_access_key
|
133
|
-
|
124
|
+
@connection ||=
|
125
|
+
begin
|
126
|
+
opts = { provider: "AWS", region: region }
|
127
|
+
if use_iam_profile
|
128
|
+
opts[:use_iam_profile] = true
|
129
|
+
else
|
130
|
+
opts[:aws_access_key_id] = access_key_id
|
131
|
+
opts[:aws_secret_access_key] = secret_access_key
|
132
|
+
end
|
133
|
+
opts.merge!(fog_options || {})
|
134
|
+
conn = Fog::Storage.new(opts)
|
135
|
+
conn.sync_clock
|
136
|
+
conn
|
134
137
|
end
|
135
|
-
opts.merge!(fog_options || {})
|
136
|
-
conn = Fog::Storage.new(opts)
|
137
|
-
conn.sync_clock
|
138
|
-
conn
|
139
|
-
end
|
140
138
|
end
|
141
139
|
|
142
140
|
def put_object(src, dest)
|
143
141
|
md5 = Base64.encode64(Digest::MD5.file(src).digest).chomp
|
144
|
-
options = headers.merge(
|
145
|
-
with_retries("PUT '#{
|
146
|
-
File.open(src,
|
142
|
+
options = headers.merge("Content-MD5" => md5)
|
143
|
+
with_retries("PUT '#{bucket}/#{dest}'") do
|
144
|
+
File.open(src, "r") do |file|
|
147
145
|
connection.put_object(bucket, dest, file, options)
|
148
146
|
end
|
149
147
|
end
|
150
148
|
end
|
151
149
|
|
152
150
|
def initiate_multipart(dest)
|
153
|
-
Logger.info "\s\sInitiate Multipart '#{
|
151
|
+
Logger.info "\s\sInitiate Multipart '#{bucket}/#{dest}'"
|
154
152
|
|
155
153
|
resp = nil
|
156
|
-
with_retries("POST '#{
|
154
|
+
with_retries("POST '#{bucket}/#{dest}' (Initiate)") do
|
157
155
|
resp = connection.initiate_multipart_upload(bucket, dest, headers)
|
158
156
|
end
|
159
|
-
resp.body[
|
157
|
+
resp.body["UploadId"]
|
160
158
|
end
|
161
159
|
|
162
160
|
# Each part's MD5 is sent to verify the transfer.
|
@@ -164,26 +162,26 @@ module Backup
|
|
164
162
|
# once the multipart upload is completed.
|
165
163
|
def upload_parts(src, dest, upload_id, chunk_bytes, file_size)
|
166
164
|
total_parts = (file_size / chunk_bytes.to_f).ceil
|
167
|
-
progress = (0.1..0.9).step(0.1).map {|n| (total_parts * n).floor }
|
168
|
-
Logger.info "\s\sUploading #{
|
165
|
+
progress = (0.1..0.9).step(0.1).map { |n| (total_parts * n).floor }
|
166
|
+
Logger.info "\s\sUploading #{total_parts} Parts..."
|
169
167
|
|
170
168
|
parts = []
|
171
|
-
File.open(src,
|
169
|
+
File.open(src, "r") do |file|
|
172
170
|
part_number = 0
|
173
171
|
while data = file.read(chunk_bytes)
|
174
172
|
part_number += 1
|
175
173
|
md5 = Base64.encode64(Digest::MD5.digest(data)).chomp
|
176
174
|
|
177
|
-
with_retries("PUT '#{
|
175
|
+
with_retries("PUT '#{bucket}/#{dest}' Part ##{part_number}") do
|
178
176
|
resp = connection.upload_part(
|
179
177
|
bucket, dest, upload_id, part_number, StringIO.new(data),
|
180
|
-
|
178
|
+
"Content-MD5" => md5
|
181
179
|
)
|
182
|
-
parts << resp.headers[
|
180
|
+
parts << resp.headers["ETag"]
|
183
181
|
end
|
184
182
|
|
185
183
|
if i = progress.rindex(part_number)
|
186
|
-
Logger.info "\s\s...#{
|
184
|
+
Logger.info "\s\s...#{i + 1}0% Complete..."
|
187
185
|
end
|
188
186
|
end
|
189
187
|
end
|
@@ -191,13 +189,13 @@ module Backup
|
|
191
189
|
end
|
192
190
|
|
193
191
|
def complete_multipart(dest, upload_id, parts)
|
194
|
-
Logger.info "\s\sComplete Multipart '#{
|
192
|
+
Logger.info "\s\sComplete Multipart '#{bucket}/#{dest}'"
|
195
193
|
|
196
|
-
with_retries("POST '#{
|
194
|
+
with_retries("POST '#{bucket}/#{dest}' (Complete)") do
|
197
195
|
resp = connection.complete_multipart_upload(bucket, dest, upload_id, parts)
|
198
|
-
raise Error, <<-EOS if resp.body[
|
196
|
+
raise Error, <<-EOS if resp.body["Code"]
|
199
197
|
The server returned the following error:
|
200
|
-
#{
|
198
|
+
#{resp.body["Code"]}: #{resp.body["Message"]}
|
201
199
|
EOS
|
202
200
|
end
|
203
201
|
end
|
@@ -206,14 +204,10 @@ module Backup
|
|
206
204
|
headers = {}
|
207
205
|
|
208
206
|
enc = encryption.to_s.upcase
|
209
|
-
headers.
|
210
|
-
{ 'x-amz-server-side-encryption' => enc}
|
211
|
-
) unless enc.empty?
|
207
|
+
headers["x-amz-server-side-encryption"] = enc unless enc.empty?
|
212
208
|
|
213
209
|
sc = storage_class.to_s.upcase
|
214
|
-
headers.
|
215
|
-
{ 'x-amz-storage-class' => sc }
|
216
|
-
) unless sc.empty? || sc == 'STANDARD'
|
210
|
+
headers["x-amz-storage-class"] = sc unless sc.empty? || sc == "STANDARD"
|
217
211
|
|
218
212
|
headers
|
219
213
|
end
|
@@ -225,10 +219,10 @@ module Backup
|
|
225
219
|
mb += 1 until file_size / (1024**2 * mb).to_f <= 10_000
|
226
220
|
Logger.warn Error.new(<<-EOS)
|
227
221
|
Chunk Size Adjusted
|
228
|
-
Your original #chunk_size of #{
|
229
|
-
to #{
|
222
|
+
Your original #chunk_size of #{orig_mb} MiB has been adjusted
|
223
|
+
to #{mb} MiB in order to satisfy the limit of 10,000 chunks.
|
230
224
|
To enforce your chosen #chunk_size, you should use the Splitter.
|
231
|
-
e.g. split_into_chunks_of #{
|
225
|
+
e.g. split_into_chunks_of #{mb * 10_000} (#chunk_size * 10_000)
|
232
226
|
EOS
|
233
227
|
1024**2 * mb
|
234
228
|
end
|
@@ -238,14 +232,14 @@ module Backup
|
|
238
232
|
|
239
233
|
def initialize(cloud_io, data)
|
240
234
|
@cloud_io = cloud_io
|
241
|
-
@key = data[
|
242
|
-
@etag = data[
|
243
|
-
@storage_class = data[
|
235
|
+
@key = data["Key"]
|
236
|
+
@etag = data["ETag"]
|
237
|
+
@storage_class = data["StorageClass"]
|
244
238
|
end
|
245
239
|
|
246
240
|
# currently 'AES256' or nil
|
247
241
|
def encryption
|
248
|
-
metadata[
|
242
|
+
metadata["x-amz-server-side-encryption"]
|
249
243
|
end
|
250
244
|
|
251
245
|
private
|
@@ -254,7 +248,6 @@ module Backup
|
|
254
248
|
@metadata ||= @cloud_io.head_object(self).headers
|
255
249
|
end
|
256
250
|
end
|
257
|
-
|
258
251
|
end
|
259
252
|
end
|
260
253
|
end
|
@@ -1,5 +1,3 @@
|
|
1
|
-
# encoding: utf-8
|
2
|
-
|
3
1
|
module Backup
|
4
2
|
module Compressor
|
5
3
|
class Base
|
@@ -18,18 +16,17 @@ module Backup
|
|
18
16
|
##
|
19
17
|
# Return the compressor name, with Backup namespace removed
|
20
18
|
def compressor_name
|
21
|
-
self.class.to_s.sub(
|
19
|
+
self.class.to_s.sub("Backup::", "")
|
22
20
|
end
|
23
21
|
|
24
22
|
##
|
25
23
|
# Logs a message to the console and log file to inform
|
26
24
|
# the client that Backup is using the compressor
|
27
25
|
def log!
|
28
|
-
Logger.info "Using #{
|
29
|
-
" Command: '#{
|
30
|
-
" Ext: '#{
|
26
|
+
Logger.info "Using #{compressor_name} for compression.\n" \
|
27
|
+
" Command: '#{@cmd}'\n" \
|
28
|
+
" Ext: '#{@ext}'"
|
31
29
|
end
|
32
|
-
|
33
30
|
end
|
34
31
|
end
|
35
32
|
end
|
@@ -1,9 +1,6 @@
|
|
1
|
-
# encoding: utf-8
|
2
|
-
|
3
1
|
module Backup
|
4
2
|
module Compressor
|
5
3
|
class Bzip2 < Base
|
6
|
-
|
7
4
|
##
|
8
5
|
# Specify the level of compression to use.
|
9
6
|
#
|
@@ -24,16 +21,15 @@ module Backup
|
|
24
21
|
|
25
22
|
instance_eval(&block) if block_given?
|
26
23
|
|
27
|
-
@cmd = "#{
|
28
|
-
@ext =
|
24
|
+
@cmd = "#{utility(:bzip2)}#{options}"
|
25
|
+
@ext = ".bz2"
|
29
26
|
end
|
30
27
|
|
31
28
|
private
|
32
29
|
|
33
30
|
def options
|
34
|
-
" -#{
|
31
|
+
" -#{@level}" if @level
|
35
32
|
end
|
36
|
-
|
37
33
|
end
|
38
34
|
end
|
39
35
|
end
|
@@ -1,9 +1,6 @@
|
|
1
|
-
# encoding: utf-8
|
2
|
-
|
3
1
|
module Backup
|
4
2
|
module Compressor
|
5
3
|
class Custom < Base
|
6
|
-
|
7
4
|
##
|
8
5
|
# Specify the system command to invoke a compressor,
|
9
6
|
# including any command-line arguments.
|
@@ -36,9 +33,9 @@ module Backup
|
|
36
33
|
# Return the command line using the full path.
|
37
34
|
# Ensures the command exists and is executable.
|
38
35
|
def set_cmd
|
39
|
-
parts = @command.to_s.split(
|
36
|
+
parts = @command.to_s.split(" ")
|
40
37
|
parts[0] = utility(parts[0])
|
41
|
-
parts.join(
|
38
|
+
parts.join(" ")
|
42
39
|
end
|
43
40
|
|
44
41
|
##
|
@@ -47,7 +44,6 @@ module Backup
|
|
47
44
|
def set_ext
|
48
45
|
@extension.to_s.strip
|
49
46
|
end
|
50
|
-
|
51
47
|
end
|
52
48
|
end
|
53
49
|
end
|
@@ -1,5 +1,3 @@
|
|
1
|
-
# encoding: utf-8
|
2
|
-
|
3
1
|
module Backup
|
4
2
|
module Compressor
|
5
3
|
class Gzip < Base
|
@@ -35,8 +33,8 @@ module Backup
|
|
35
33
|
# Determine if +--rsyncable+ is supported and cache the result.
|
36
34
|
def self.has_rsyncable?
|
37
35
|
return @has_rsyncable unless @has_rsyncable.nil?
|
38
|
-
cmd = "#{
|
39
|
-
@has_rsyncable =
|
36
|
+
cmd = "#{utility(:gzip)} --rsyncable --version >/dev/null 2>&1; echo $?"
|
37
|
+
@has_rsyncable = `#{cmd}`.chomp == "0"
|
40
38
|
end
|
41
39
|
|
42
40
|
##
|
@@ -49,26 +47,27 @@ module Backup
|
|
49
47
|
|
50
48
|
instance_eval(&block) if block_given?
|
51
49
|
|
52
|
-
@cmd = "#{
|
53
|
-
@ext =
|
50
|
+
@cmd = "#{utility(:gzip)}#{options}"
|
51
|
+
@ext = ".gz"
|
54
52
|
end
|
55
53
|
|
56
54
|
private
|
57
55
|
|
58
56
|
def options
|
59
|
-
opts =
|
60
|
-
opts << " -#{
|
61
|
-
if
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
57
|
+
opts = ""
|
58
|
+
opts << " -#{@level}" if @level
|
59
|
+
if @rsyncable
|
60
|
+
if self.class.has_rsyncable?
|
61
|
+
opts << " --rsyncable"
|
62
|
+
else
|
63
|
+
Logger.warn Error.new(<<-EOS)
|
64
|
+
'rsyncable' option ignored.
|
65
|
+
Your system's 'gzip' does not support the `--rsyncable` option.
|
66
|
+
EOS
|
67
|
+
end
|
68
|
+
end
|
69
69
|
opts
|
70
70
|
end
|
71
|
-
|
72
71
|
end
|
73
72
|
end
|
74
73
|
end
|
data/lib/backup/config.rb
CHANGED
@@ -1,15 +1,14 @@
|
|
1
|
-
|
2
|
-
require
|
3
|
-
require 'backup/config/helpers'
|
1
|
+
require "backup/config/dsl"
|
2
|
+
require "backup/config/helpers"
|
4
3
|
|
5
4
|
module Backup
|
6
5
|
module Config
|
7
6
|
class Error < Backup::Error; end
|
8
7
|
|
9
8
|
DEFAULTS = {
|
10
|
-
:
|
11
|
-
:
|
12
|
-
:
|
9
|
+
config_file: "config.rb",
|
10
|
+
data_path: ".data",
|
11
|
+
tmp_path: ".tmp"
|
13
12
|
}
|
14
13
|
|
15
14
|
class << self
|
@@ -19,20 +18,20 @@ module Backup
|
|
19
18
|
|
20
19
|
# Loads the user's +config.rb+ and all model files.
|
21
20
|
def load(options = {})
|
22
|
-
update(options)
|
21
|
+
update(options) # from the command line
|
23
22
|
|
24
23
|
unless File.exist?(config_file)
|
25
24
|
raise Error, "Could not find configuration file: '#{config_file}'."
|
26
25
|
end
|
27
26
|
|
28
27
|
config = File.read(config_file)
|
29
|
-
version = Backup::VERSION.split(
|
28
|
+
version = Backup::VERSION.split(".").first
|
30
29
|
unless config =~ /^# Backup v#{ version }\.x Configuration$/
|
31
30
|
raise Error, <<-EOS
|
32
31
|
Invalid Configuration File
|
33
|
-
The configuration file at '#{
|
34
|
-
does not appear to be a Backup v#{
|
35
|
-
If you have upgraded to v#{
|
32
|
+
The configuration file at '#{config_file}'
|
33
|
+
does not appear to be a Backup v#{version}.x configuration file.
|
34
|
+
If you have upgraded to v#{version}.x from a previous version,
|
36
35
|
you need to upgrade your configuration file.
|
37
36
|
Please see the instructions for upgrading in the Backup documentation.
|
38
37
|
EOS
|
@@ -44,7 +43,7 @@ module Backup
|
|
44
43
|
update(dsl._config_options) # from config.rb
|
45
44
|
update(options) # command line takes precedence
|
46
45
|
|
47
|
-
Dir[File.join(File.dirname(config_file),
|
46
|
+
Dir[File.join(File.dirname(config_file), "models", "*.rb")].each do |model|
|
48
47
|
dsl.instance_eval(File.read(model), model)
|
49
48
|
end
|
50
49
|
end
|
@@ -79,7 +78,7 @@ module Backup
|
|
79
78
|
raise Error, <<-EOS
|
80
79
|
Root Path Not Found
|
81
80
|
When specifying a --root-path, the path must exist.
|
82
|
-
Path was: #{
|
81
|
+
Path was: #{path}
|
83
82
|
EOS
|
84
83
|
end
|
85
84
|
@root_path = path
|
@@ -88,7 +87,7 @@ module Backup
|
|
88
87
|
def set_path_variable(name, path, ending, root_path)
|
89
88
|
# strip any trailing '/' in case the user supplied this as part of
|
90
89
|
# an absolute path, so we can match it against File.expand_path()
|
91
|
-
path = path.to_s.sub(/\/\s*$/,
|
90
|
+
path = path.to_s.sub(/\/\s*$/, "").lstrip
|
92
91
|
new_path = false
|
93
92
|
# If no path is given, the variable will not be set/updated
|
94
93
|
# unless a root_path was given. In which case the value will
|
@@ -108,12 +107,12 @@ module Backup
|
|
108
107
|
end
|
109
108
|
|
110
109
|
def reset!
|
111
|
-
@user = ENV[
|
112
|
-
@root_path = File.join(File.expand_path(ENV[
|
113
|
-
update(:
|
110
|
+
@user = ENV["USER"] || Etc.getpwuid.name
|
111
|
+
@root_path = File.join(File.expand_path(ENV["HOME"] || ""), "Backup")
|
112
|
+
update(root_path: @root_path)
|
114
113
|
end
|
115
114
|
end
|
116
115
|
|
117
|
-
reset!
|
116
|
+
reset! # set defaults on load
|
118
117
|
end
|
119
118
|
end
|