backup 4.4.1 → 5.0.0.beta.3
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/LICENSE +19 -0
- data/README.md +13 -9
- data/bin/docker_test +24 -0
- data/lib/backup/archive.rb +31 -32
- data/lib/backup/binder.rb +2 -6
- data/lib/backup/cleaner.rb +14 -18
- data/lib/backup/cli.rb +104 -108
- data/lib/backup/cloud_io/base.rb +4 -7
- data/lib/backup/cloud_io/cloud_files.rb +60 -62
- data/lib/backup/cloud_io/s3.rb +69 -76
- data/lib/backup/compressor/base.rb +4 -7
- data/lib/backup/compressor/bzip2.rb +3 -7
- data/lib/backup/compressor/custom.rb +2 -6
- data/lib/backup/compressor/gzip.rb +16 -17
- data/lib/backup/config/dsl.rb +16 -17
- data/lib/backup/config/helpers.rb +10 -16
- data/lib/backup/config.rb +17 -18
- data/lib/backup/database/base.rb +22 -21
- data/lib/backup/database/mongodb.rb +36 -37
- data/lib/backup/database/mysql.rb +40 -41
- data/lib/backup/database/openldap.rb +8 -10
- data/lib/backup/database/postgresql.rb +29 -30
- data/lib/backup/database/redis.rb +27 -30
- data/lib/backup/database/riak.rb +15 -18
- data/lib/backup/database/sqlite.rb +4 -6
- data/lib/backup/encryptor/base.rb +2 -4
- data/lib/backup/encryptor/gpg.rb +49 -59
- data/lib/backup/encryptor/open_ssl.rb +11 -14
- data/lib/backup/errors.rb +7 -12
- data/lib/backup/logger/console.rb +5 -8
- data/lib/backup/logger/fog_adapter.rb +2 -6
- data/lib/backup/logger/logfile.rb +10 -12
- data/lib/backup/logger/syslog.rb +2 -4
- data/lib/backup/logger.rb +16 -18
- data/lib/backup/model.rb +33 -40
- data/lib/backup/notifier/base.rb +24 -26
- data/lib/backup/notifier/campfire.rb +9 -11
- data/lib/backup/notifier/command.rb +0 -3
- data/lib/backup/notifier/datadog.rb +9 -12
- data/lib/backup/notifier/flowdock.rb +13 -17
- data/lib/backup/notifier/hipchat.rb +11 -13
- data/lib/backup/notifier/http_post.rb +11 -14
- data/lib/backup/notifier/mail.rb +42 -59
- data/lib/backup/notifier/nagios.rb +5 -9
- data/lib/backup/notifier/pagerduty.rb +10 -12
- data/lib/backup/notifier/prowl.rb +15 -15
- data/lib/backup/notifier/pushover.rb +7 -10
- data/lib/backup/notifier/ses.rb +34 -16
- data/lib/backup/notifier/slack.rb +39 -40
- data/lib/backup/notifier/twitter.rb +2 -5
- data/lib/backup/notifier/zabbix.rb +11 -14
- data/lib/backup/package.rb +5 -9
- data/lib/backup/packager.rb +16 -17
- data/lib/backup/pipeline.rb +17 -21
- data/lib/backup/splitter.rb +8 -11
- data/lib/backup/storage/base.rb +5 -8
- data/lib/backup/storage/cloud_files.rb +21 -23
- data/lib/backup/storage/cycler.rb +10 -15
- data/lib/backup/storage/dropbox.rb +15 -21
- data/lib/backup/storage/ftp.rb +14 -10
- data/lib/backup/storage/local.rb +5 -8
- data/lib/backup/storage/qiniu.rb +8 -8
- data/lib/backup/storage/rsync.rb +24 -26
- data/lib/backup/storage/s3.rb +27 -28
- data/lib/backup/storage/scp.rb +10 -12
- data/lib/backup/storage/sftp.rb +10 -12
- data/lib/backup/syncer/base.rb +5 -8
- data/lib/backup/syncer/cloud/base.rb +27 -30
- data/lib/backup/syncer/cloud/cloud_files.rb +16 -18
- data/lib/backup/syncer/cloud/local_file.rb +5 -8
- data/lib/backup/syncer/cloud/s3.rb +23 -24
- data/lib/backup/syncer/rsync/base.rb +6 -10
- data/lib/backup/syncer/rsync/local.rb +1 -5
- data/lib/backup/syncer/rsync/pull.rb +6 -10
- data/lib/backup/syncer/rsync/push.rb +18 -22
- data/lib/backup/template.rb +9 -14
- data/lib/backup/utilities.rb +78 -69
- data/lib/backup/version.rb +1 -3
- data/lib/backup.rb +74 -78
- metadata +107 -676
@@ -1,7 +1,6 @@
|
|
1
|
-
|
2
|
-
require
|
3
|
-
require
|
4
|
-
require 'digest/md5'
|
1
|
+
require "backup/cloud_io/base"
|
2
|
+
require "fog"
|
3
|
+
require "digest/md5"
|
5
4
|
|
6
5
|
module Backup
|
7
6
|
module CloudIO
|
@@ -13,8 +12,8 @@ module Backup
|
|
13
12
|
SEGMENT_BUFFER = 1024**2 # 1 MiB
|
14
13
|
|
15
14
|
attr_reader :username, :api_key, :auth_url, :region, :servicenet,
|
16
|
-
|
17
|
-
|
15
|
+
:container, :segments_container, :segment_size, :days_to_keep,
|
16
|
+
:fog_options
|
18
17
|
|
19
18
|
def initialize(options = {})
|
20
19
|
super
|
@@ -41,9 +40,9 @@ module Backup
|
|
41
40
|
if segment_bytes > 0 && file_size > segment_bytes
|
42
41
|
raise FileSizeError, <<-EOS if file_size > MAX_SLO_SIZE
|
43
42
|
File Too Large
|
44
|
-
File: #{
|
45
|
-
Size: #{
|
46
|
-
Max SLO Size is #{
|
43
|
+
File: #{src}
|
44
|
+
Size: #{file_size}
|
45
|
+
Max SLO Size is #{MAX_SLO_SIZE} (5 GiB * 1000 segments)
|
47
46
|
EOS
|
48
47
|
|
49
48
|
segment_bytes = adjusted_segment_bytes(segment_bytes, file_size)
|
@@ -52,9 +51,9 @@ module Backup
|
|
52
51
|
else
|
53
52
|
raise FileSizeError, <<-EOS if file_size > MAX_FILE_SIZE
|
54
53
|
File Too Large
|
55
|
-
File: #{
|
56
|
-
Size: #{
|
57
|
-
Max File Size is #{
|
54
|
+
File: #{src}
|
55
|
+
Size: #{file_size}
|
56
|
+
Max File Size is #{MAX_FILE_SIZE} (5 GiB)
|
58
57
|
EOS
|
59
58
|
|
60
59
|
put_object(src, dest)
|
@@ -69,14 +68,14 @@ module Backup
|
|
69
68
|
def objects(prefix)
|
70
69
|
objects = []
|
71
70
|
resp = nil
|
72
|
-
prefix = prefix.chomp(
|
73
|
-
opts = { :
|
71
|
+
prefix = prefix.chomp("/")
|
72
|
+
opts = { prefix: prefix + "/" }
|
74
73
|
|
75
74
|
create_containers
|
76
75
|
|
77
|
-
while resp.nil? || resp.body.count ==
|
78
|
-
opts
|
79
|
-
with_retries("GET '#{
|
76
|
+
while resp.nil? || resp.body.count == 10_000
|
77
|
+
opts[:marker] = objects.last.name unless objects.empty?
|
78
|
+
with_retries("GET '#{container}/#{prefix}/*'") do
|
80
79
|
resp = connection.get_container(container, opts)
|
81
80
|
end
|
82
81
|
resp.body.each do |obj_data|
|
@@ -90,7 +89,7 @@ module Backup
|
|
90
89
|
# Used by Object to fetch metadata if needed.
|
91
90
|
def head_object(object)
|
92
91
|
resp = nil
|
93
|
-
with_retries("HEAD '#{
|
92
|
+
with_retries("HEAD '#{container}/#{object.name}'") do
|
94
93
|
resp = connection.head_object(container, object.name)
|
95
94
|
end
|
96
95
|
resp
|
@@ -106,14 +105,14 @@ module Backup
|
|
106
105
|
names.map!(&:name) if names.first.is_a?(Object)
|
107
106
|
|
108
107
|
until names.empty?
|
109
|
-
|
110
|
-
with_retries(
|
111
|
-
resp = connection.delete_multiple_objects(container,
|
112
|
-
resp_status = resp.body[
|
113
|
-
raise Error, <<-EOS unless resp_status ==
|
114
|
-
#{
|
108
|
+
names_partial = names.slice!(0, 10_000)
|
109
|
+
with_retries("DELETE Multiple Objects") do
|
110
|
+
resp = connection.delete_multiple_objects(container, names_partial)
|
111
|
+
resp_status = resp.body["Response Status"]
|
112
|
+
raise Error, <<-EOS unless resp_status == "200 OK"
|
113
|
+
#{resp_status}
|
115
114
|
The server returned the following:
|
116
|
-
#{
|
115
|
+
#{resp.body.inspect}
|
117
116
|
EOS
|
118
117
|
end
|
119
118
|
end
|
@@ -126,13 +125,13 @@ module Backup
|
|
126
125
|
# - Missing segments will be ignored.
|
127
126
|
def delete_slo(objects)
|
128
127
|
Array(objects).each do |object|
|
129
|
-
with_retries("DELETE SLO Manifest '#{
|
128
|
+
with_retries("DELETE SLO Manifest '#{container}/#{object.name}'") do
|
130
129
|
resp = connection.delete_static_large_object(container, object.name)
|
131
|
-
resp_status = resp.body[
|
132
|
-
raise Error, <<-EOS unless resp_status ==
|
133
|
-
#{
|
130
|
+
resp_status = resp.body["Response Status"]
|
131
|
+
raise Error, <<-EOS unless resp_status == "200 OK"
|
132
|
+
#{resp_status}
|
134
133
|
The server returned the following:
|
135
|
-
#{
|
134
|
+
#{resp.body.inspect}
|
136
135
|
EOS
|
137
136
|
end
|
138
137
|
end
|
@@ -142,12 +141,12 @@ module Backup
|
|
142
141
|
|
143
142
|
def connection
|
144
143
|
@connection ||= Fog::Storage.new({
|
145
|
-
:
|
146
|
-
:
|
147
|
-
:
|
148
|
-
:
|
149
|
-
:
|
150
|
-
:
|
144
|
+
provider: "Rackspace",
|
145
|
+
rackspace_username: username,
|
146
|
+
rackspace_api_key: api_key,
|
147
|
+
rackspace_auth_url: auth_url,
|
148
|
+
rackspace_region: region,
|
149
|
+
rackspace_servicenet: servicenet
|
151
150
|
}.merge(fog_options || {}))
|
152
151
|
end
|
153
152
|
|
@@ -155,16 +154,16 @@ module Backup
|
|
155
154
|
return if @containers_created
|
156
155
|
@containers_created = true
|
157
156
|
|
158
|
-
with_retries(
|
157
|
+
with_retries("Create Containers") do
|
159
158
|
connection.put_container(container)
|
160
159
|
connection.put_container(segments_container) if segments_container
|
161
160
|
end
|
162
161
|
end
|
163
162
|
|
164
163
|
def put_object(src, dest)
|
165
|
-
opts = headers.merge(
|
166
|
-
with_retries("PUT '#{
|
167
|
-
File.open(src,
|
164
|
+
opts = headers.merge("ETag" => Digest::MD5.file(src).hexdigest)
|
165
|
+
with_retries("PUT '#{container}/#{dest}'") do
|
166
|
+
File.open(src, "r") do |file|
|
168
167
|
connection.put_object(container, dest, file, opts)
|
169
168
|
end
|
170
169
|
end
|
@@ -176,25 +175,25 @@ module Backup
|
|
176
175
|
# SLO manifest object is uploaded.
|
177
176
|
def upload_segments(src, dest, segment_bytes, file_size)
|
178
177
|
total_segments = (file_size / segment_bytes.to_f).ceil
|
179
|
-
progress = (0.1..0.9).step(0.1).map {|n| (total_segments * n).floor }
|
180
|
-
Logger.info "\s\sUploading #{
|
178
|
+
progress = (0.1..0.9).step(0.1).map { |n| (total_segments * n).floor }
|
179
|
+
Logger.info "\s\sUploading #{total_segments} SLO Segments..."
|
181
180
|
|
182
181
|
segments = []
|
183
|
-
File.open(src,
|
182
|
+
File.open(src, "r") do |file|
|
184
183
|
segment_number = 0
|
185
184
|
until file.eof?
|
186
185
|
segment_number += 1
|
187
|
-
object = "#{
|
186
|
+
object = "#{dest}/#{segment_number.to_s.rjust(4, "0")}"
|
188
187
|
pos = file.pos
|
189
188
|
md5 = segment_md5(file, segment_bytes)
|
190
|
-
opts = headers.merge(
|
189
|
+
opts = headers.merge("ETag" => md5)
|
191
190
|
|
192
|
-
with_retries("PUT '#{
|
191
|
+
with_retries("PUT '#{segments_container}/#{object}'") do
|
193
192
|
file.seek(pos)
|
194
193
|
offset = 0
|
195
194
|
connection.put_object(segments_container, object, nil, opts) do
|
196
195
|
# block is called to stream data until it returns ''
|
197
|
-
data =
|
196
|
+
data = ""
|
198
197
|
if offset <= segment_bytes - SEGMENT_BUFFER
|
199
198
|
data = file.read(SEGMENT_BUFFER).to_s # nil => ''
|
200
199
|
offset += data.size
|
@@ -204,13 +203,13 @@ module Backup
|
|
204
203
|
end
|
205
204
|
|
206
205
|
segments << {
|
207
|
-
:
|
208
|
-
:
|
209
|
-
:
|
206
|
+
path: "#{segments_container}/#{object}",
|
207
|
+
etag: md5,
|
208
|
+
size_bytes: file.pos - pos
|
210
209
|
}
|
211
210
|
|
212
211
|
if i = progress.rindex(segment_number)
|
213
|
-
Logger.info "\s\s...#{
|
212
|
+
Logger.info "\s\s...#{i + 1}0% Complete..."
|
214
213
|
end
|
215
214
|
end
|
216
215
|
end
|
@@ -234,9 +233,9 @@ module Backup
|
|
234
233
|
# are not found. However, each segment's ETag was verified when we
|
235
234
|
# uploaded the segments, so this should only retry failed requests.
|
236
235
|
def upload_manifest(dest, segments)
|
237
|
-
Logger.info "\s\sStoring SLO Manifest '#{
|
236
|
+
Logger.info "\s\sStoring SLO Manifest '#{container}/#{dest}'"
|
238
237
|
|
239
|
-
with_retries("PUT SLO Manifest '#{
|
238
|
+
with_retries("PUT SLO Manifest '#{container}/#{dest}'") do
|
240
239
|
connection.put_static_obj_manifest(container, dest, segments, headers)
|
241
240
|
end
|
242
241
|
end
|
@@ -245,7 +244,7 @@ module Backup
|
|
245
244
|
# This includes non-SLO objects, the SLO manifest and all segments.
|
246
245
|
def headers
|
247
246
|
headers = {}
|
248
|
-
headers
|
247
|
+
headers["X-Delete-At"] = delete_at if delete_at
|
249
248
|
headers
|
250
249
|
end
|
251
250
|
|
@@ -261,10 +260,10 @@ module Backup
|
|
261
260
|
mb += 1 until file_size / (1024**2 * mb).to_f <= 1000
|
262
261
|
Logger.warn Error.new(<<-EOS)
|
263
262
|
Segment Size Adjusted
|
264
|
-
Your original #segment_size of #{
|
265
|
-
to #{
|
263
|
+
Your original #segment_size of #{orig_mb} MiB has been adjusted
|
264
|
+
to #{mb} MiB in order to satisfy the limit of 1000 segments.
|
266
265
|
To enforce your chosen #segment_size, you should use the Splitter.
|
267
|
-
e.g. split_into_chunks_of #{
|
266
|
+
e.g. split_into_chunks_of #{mb * 1000} (#segment_size * 1000)
|
268
267
|
EOS
|
269
268
|
1024**2 * mb
|
270
269
|
end
|
@@ -274,16 +273,16 @@ module Backup
|
|
274
273
|
|
275
274
|
def initialize(cloud_io, data)
|
276
275
|
@cloud_io = cloud_io
|
277
|
-
@name = data[
|
278
|
-
@hash = data[
|
276
|
+
@name = data["name"]
|
277
|
+
@hash = data["hash"]
|
279
278
|
end
|
280
279
|
|
281
280
|
def slo?
|
282
|
-
!!metadata[
|
281
|
+
!!metadata["X-Static-Large-Object"]
|
283
282
|
end
|
284
283
|
|
285
284
|
def marked_for_deletion?
|
286
|
-
!!metadata[
|
285
|
+
!!metadata["X-Delete-At"]
|
287
286
|
end
|
288
287
|
|
289
288
|
private
|
@@ -292,7 +291,6 @@ module Backup
|
|
292
291
|
@metadata ||= @cloud_io.head_object(self).headers
|
293
292
|
end
|
294
293
|
end
|
295
|
-
|
296
294
|
end
|
297
295
|
end
|
298
296
|
end
|
data/lib/backup/cloud_io/s3.rb
CHANGED
@@ -1,9 +1,8 @@
|
|
1
|
-
|
2
|
-
require
|
3
|
-
require
|
4
|
-
require
|
5
|
-
require
|
6
|
-
require 'stringio'
|
1
|
+
require "backup/cloud_io/base"
|
2
|
+
require "fog"
|
3
|
+
require "digest/md5"
|
4
|
+
require "base64"
|
5
|
+
require "stringio"
|
7
6
|
|
8
7
|
module Backup
|
9
8
|
module CloudIO
|
@@ -14,8 +13,8 @@ module Backup
|
|
14
13
|
MAX_MULTIPART_SIZE = 1024**4 * 5 # 5 TiB
|
15
14
|
|
16
15
|
attr_reader :access_key_id, :secret_access_key, :use_iam_profile,
|
17
|
-
|
18
|
-
|
16
|
+
:region, :bucket, :chunk_size, :encryption, :storage_class,
|
17
|
+
:fog_options
|
19
18
|
|
20
19
|
def initialize(options = {})
|
21
20
|
super
|
@@ -39,9 +38,9 @@ module Backup
|
|
39
38
|
if chunk_bytes > 0 && file_size > chunk_bytes
|
40
39
|
raise FileSizeError, <<-EOS if file_size > MAX_MULTIPART_SIZE
|
41
40
|
File Too Large
|
42
|
-
File: #{
|
43
|
-
Size: #{
|
44
|
-
Max Multipart Upload Size is #{
|
41
|
+
File: #{src}
|
42
|
+
Size: #{file_size}
|
43
|
+
Max Multipart Upload Size is #{MAX_MULTIPART_SIZE} (5 TiB)
|
45
44
|
EOS
|
46
45
|
|
47
46
|
chunk_bytes = adjusted_chunk_bytes(chunk_bytes, file_size)
|
@@ -51,9 +50,9 @@ module Backup
|
|
51
50
|
else
|
52
51
|
raise FileSizeError, <<-EOS if file_size > MAX_FILE_SIZE
|
53
52
|
File Too Large
|
54
|
-
File: #{
|
55
|
-
Size: #{
|
56
|
-
Max File Size is #{
|
53
|
+
File: #{src}
|
54
|
+
Size: #{file_size}
|
55
|
+
Max File Size is #{MAX_FILE_SIZE} (5 GiB)
|
57
56
|
EOS
|
58
57
|
|
59
58
|
put_object(src, dest)
|
@@ -68,15 +67,15 @@ module Backup
|
|
68
67
|
def objects(prefix)
|
69
68
|
objects = []
|
70
69
|
resp = nil
|
71
|
-
prefix = prefix.chomp(
|
72
|
-
opts = {
|
70
|
+
prefix = prefix.chomp("/")
|
71
|
+
opts = { "prefix" => prefix + "/" }
|
73
72
|
|
74
|
-
while resp.nil? || resp.body[
|
75
|
-
opts
|
76
|
-
with_retries("GET '#{
|
73
|
+
while resp.nil? || resp.body["IsTruncated"]
|
74
|
+
opts["marker"] = objects.last.key unless objects.empty?
|
75
|
+
with_retries("GET '#{bucket}/#{prefix}/*'") do
|
77
76
|
resp = connection.get_bucket(bucket, opts)
|
78
77
|
end
|
79
|
-
resp.body[
|
78
|
+
resp.body["Contents"].each do |obj_data|
|
80
79
|
objects << Object.new(self, obj_data)
|
81
80
|
end
|
82
81
|
end
|
@@ -87,7 +86,7 @@ module Backup
|
|
87
86
|
# Used by Object to fetch metadata if needed.
|
88
87
|
def head_object(object)
|
89
88
|
resp = nil
|
90
|
-
with_retries("HEAD '#{
|
89
|
+
with_retries("HEAD '#{bucket}/#{object.key}'") do
|
91
90
|
resp = connection.head_object(bucket, object.key)
|
92
91
|
end
|
93
92
|
resp
|
@@ -102,18 +101,18 @@ module Backup
|
|
102
101
|
keys = Array(objects_or_keys).dup
|
103
102
|
keys.map!(&:key) if keys.first.is_a?(Object)
|
104
103
|
|
105
|
-
opts = { :
|
104
|
+
opts = { quiet: true } # only report Errors in DeleteResult
|
106
105
|
until keys.empty?
|
107
|
-
|
108
|
-
with_retries(
|
109
|
-
resp = connection.delete_multiple_objects(bucket,
|
110
|
-
unless resp.body[
|
111
|
-
errors = resp.body[
|
112
|
-
error = result[
|
113
|
-
"Failed to delete: #{
|
114
|
-
|
106
|
+
keys_partial = keys.slice!(0, 1000)
|
107
|
+
with_retries("DELETE Multiple Objects") do
|
108
|
+
resp = connection.delete_multiple_objects(bucket, keys_partial, opts.dup)
|
109
|
+
unless resp.body["DeleteResult"].empty?
|
110
|
+
errors = resp.body["DeleteResult"].map do |result|
|
111
|
+
error = result["Error"]
|
112
|
+
"Failed to delete: #{error["Key"]}\n" \
|
113
|
+
"Reason: #{error["Code"]}: #{error["Message"]}"
|
115
114
|
end.join("\n")
|
116
|
-
raise Error, "The server returned the following:\n#{
|
115
|
+
raise Error, "The server returned the following:\n#{errors}"
|
117
116
|
end
|
118
117
|
end
|
119
118
|
end
|
@@ -122,41 +121,40 @@ module Backup
|
|
122
121
|
private
|
123
122
|
|
124
123
|
def connection
|
125
|
-
@connection ||=
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
:aws_access_key_id
|
132
|
-
:aws_secret_access_key
|
133
|
-
|
124
|
+
@connection ||=
|
125
|
+
begin
|
126
|
+
opts = { provider: "AWS", region: region }
|
127
|
+
if use_iam_profile
|
128
|
+
opts[:use_iam_profile] = true
|
129
|
+
else
|
130
|
+
opts[:aws_access_key_id] = access_key_id
|
131
|
+
opts[:aws_secret_access_key] = secret_access_key
|
132
|
+
end
|
133
|
+
opts.merge!(fog_options || {})
|
134
|
+
conn = Fog::Storage.new(opts)
|
135
|
+
conn.sync_clock
|
136
|
+
conn
|
134
137
|
end
|
135
|
-
opts.merge!(fog_options || {})
|
136
|
-
conn = Fog::Storage.new(opts)
|
137
|
-
conn.sync_clock
|
138
|
-
conn
|
139
|
-
end
|
140
138
|
end
|
141
139
|
|
142
140
|
def put_object(src, dest)
|
143
141
|
md5 = Base64.encode64(Digest::MD5.file(src).digest).chomp
|
144
|
-
options = headers.merge(
|
145
|
-
with_retries("PUT '#{
|
146
|
-
File.open(src,
|
142
|
+
options = headers.merge("Content-MD5" => md5)
|
143
|
+
with_retries("PUT '#{bucket}/#{dest}'") do
|
144
|
+
File.open(src, "r") do |file|
|
147
145
|
connection.put_object(bucket, dest, file, options)
|
148
146
|
end
|
149
147
|
end
|
150
148
|
end
|
151
149
|
|
152
150
|
def initiate_multipart(dest)
|
153
|
-
Logger.info "\s\sInitiate Multipart '#{
|
151
|
+
Logger.info "\s\sInitiate Multipart '#{bucket}/#{dest}'"
|
154
152
|
|
155
153
|
resp = nil
|
156
|
-
with_retries("POST '#{
|
154
|
+
with_retries("POST '#{bucket}/#{dest}' (Initiate)") do
|
157
155
|
resp = connection.initiate_multipart_upload(bucket, dest, headers)
|
158
156
|
end
|
159
|
-
resp.body[
|
157
|
+
resp.body["UploadId"]
|
160
158
|
end
|
161
159
|
|
162
160
|
# Each part's MD5 is sent to verify the transfer.
|
@@ -164,26 +162,26 @@ module Backup
|
|
164
162
|
# once the multipart upload is completed.
|
165
163
|
def upload_parts(src, dest, upload_id, chunk_bytes, file_size)
|
166
164
|
total_parts = (file_size / chunk_bytes.to_f).ceil
|
167
|
-
progress = (0.1..0.9).step(0.1).map {|n| (total_parts * n).floor }
|
168
|
-
Logger.info "\s\sUploading #{
|
165
|
+
progress = (0.1..0.9).step(0.1).map { |n| (total_parts * n).floor }
|
166
|
+
Logger.info "\s\sUploading #{total_parts} Parts..."
|
169
167
|
|
170
168
|
parts = []
|
171
|
-
File.open(src,
|
169
|
+
File.open(src, "r") do |file|
|
172
170
|
part_number = 0
|
173
171
|
while data = file.read(chunk_bytes)
|
174
172
|
part_number += 1
|
175
173
|
md5 = Base64.encode64(Digest::MD5.digest(data)).chomp
|
176
174
|
|
177
|
-
with_retries("PUT '#{
|
175
|
+
with_retries("PUT '#{bucket}/#{dest}' Part ##{part_number}") do
|
178
176
|
resp = connection.upload_part(
|
179
177
|
bucket, dest, upload_id, part_number, StringIO.new(data),
|
180
|
-
|
178
|
+
"Content-MD5" => md5
|
181
179
|
)
|
182
|
-
parts << resp.headers[
|
180
|
+
parts << resp.headers["ETag"]
|
183
181
|
end
|
184
182
|
|
185
183
|
if i = progress.rindex(part_number)
|
186
|
-
Logger.info "\s\s...#{
|
184
|
+
Logger.info "\s\s...#{i + 1}0% Complete..."
|
187
185
|
end
|
188
186
|
end
|
189
187
|
end
|
@@ -191,13 +189,13 @@ module Backup
|
|
191
189
|
end
|
192
190
|
|
193
191
|
def complete_multipart(dest, upload_id, parts)
|
194
|
-
Logger.info "\s\sComplete Multipart '#{
|
192
|
+
Logger.info "\s\sComplete Multipart '#{bucket}/#{dest}'"
|
195
193
|
|
196
|
-
with_retries("POST '#{
|
194
|
+
with_retries("POST '#{bucket}/#{dest}' (Complete)") do
|
197
195
|
resp = connection.complete_multipart_upload(bucket, dest, upload_id, parts)
|
198
|
-
raise Error, <<-EOS if resp.body[
|
196
|
+
raise Error, <<-EOS if resp.body["Code"]
|
199
197
|
The server returned the following error:
|
200
|
-
#{
|
198
|
+
#{resp.body["Code"]}: #{resp.body["Message"]}
|
201
199
|
EOS
|
202
200
|
end
|
203
201
|
end
|
@@ -206,14 +204,10 @@ module Backup
|
|
206
204
|
headers = {}
|
207
205
|
|
208
206
|
enc = encryption.to_s.upcase
|
209
|
-
headers.
|
210
|
-
{ 'x-amz-server-side-encryption' => enc}
|
211
|
-
) unless enc.empty?
|
207
|
+
headers["x-amz-server-side-encryption"] = enc unless enc.empty?
|
212
208
|
|
213
209
|
sc = storage_class.to_s.upcase
|
214
|
-
headers.
|
215
|
-
{ 'x-amz-storage-class' => sc }
|
216
|
-
) unless sc.empty? || sc == 'STANDARD'
|
210
|
+
headers["x-amz-storage-class"] = sc unless sc.empty? || sc == "STANDARD"
|
217
211
|
|
218
212
|
headers
|
219
213
|
end
|
@@ -225,10 +219,10 @@ module Backup
|
|
225
219
|
mb += 1 until file_size / (1024**2 * mb).to_f <= 10_000
|
226
220
|
Logger.warn Error.new(<<-EOS)
|
227
221
|
Chunk Size Adjusted
|
228
|
-
Your original #chunk_size of #{
|
229
|
-
to #{
|
222
|
+
Your original #chunk_size of #{orig_mb} MiB has been adjusted
|
223
|
+
to #{mb} MiB in order to satisfy the limit of 10,000 chunks.
|
230
224
|
To enforce your chosen #chunk_size, you should use the Splitter.
|
231
|
-
e.g. split_into_chunks_of #{
|
225
|
+
e.g. split_into_chunks_of #{mb * 10_000} (#chunk_size * 10_000)
|
232
226
|
EOS
|
233
227
|
1024**2 * mb
|
234
228
|
end
|
@@ -238,14 +232,14 @@ module Backup
|
|
238
232
|
|
239
233
|
def initialize(cloud_io, data)
|
240
234
|
@cloud_io = cloud_io
|
241
|
-
@key = data[
|
242
|
-
@etag = data[
|
243
|
-
@storage_class = data[
|
235
|
+
@key = data["Key"]
|
236
|
+
@etag = data["ETag"]
|
237
|
+
@storage_class = data["StorageClass"]
|
244
238
|
end
|
245
239
|
|
246
240
|
# currently 'AES256' or nil
|
247
241
|
def encryption
|
248
|
-
metadata[
|
242
|
+
metadata["x-amz-server-side-encryption"]
|
249
243
|
end
|
250
244
|
|
251
245
|
private
|
@@ -254,7 +248,6 @@ module Backup
|
|
254
248
|
@metadata ||= @cloud_io.head_object(self).headers
|
255
249
|
end
|
256
250
|
end
|
257
|
-
|
258
251
|
end
|
259
252
|
end
|
260
253
|
end
|
@@ -1,5 +1,3 @@
|
|
1
|
-
# encoding: utf-8
|
2
|
-
|
3
1
|
module Backup
|
4
2
|
module Compressor
|
5
3
|
class Base
|
@@ -18,18 +16,17 @@ module Backup
|
|
18
16
|
##
|
19
17
|
# Return the compressor name, with Backup namespace removed
|
20
18
|
def compressor_name
|
21
|
-
self.class.to_s.sub(
|
19
|
+
self.class.to_s.sub("Backup::", "")
|
22
20
|
end
|
23
21
|
|
24
22
|
##
|
25
23
|
# Logs a message to the console and log file to inform
|
26
24
|
# the client that Backup is using the compressor
|
27
25
|
def log!
|
28
|
-
Logger.info "Using #{
|
29
|
-
" Command: '#{
|
30
|
-
" Ext: '#{
|
26
|
+
Logger.info "Using #{compressor_name} for compression.\n" \
|
27
|
+
" Command: '#{@cmd}'\n" \
|
28
|
+
" Ext: '#{@ext}'"
|
31
29
|
end
|
32
|
-
|
33
30
|
end
|
34
31
|
end
|
35
32
|
end
|
@@ -1,9 +1,6 @@
|
|
1
|
-
# encoding: utf-8
|
2
|
-
|
3
1
|
module Backup
|
4
2
|
module Compressor
|
5
3
|
class Bzip2 < Base
|
6
|
-
|
7
4
|
##
|
8
5
|
# Specify the level of compression to use.
|
9
6
|
#
|
@@ -24,16 +21,15 @@ module Backup
|
|
24
21
|
|
25
22
|
instance_eval(&block) if block_given?
|
26
23
|
|
27
|
-
@cmd = "#{
|
28
|
-
@ext =
|
24
|
+
@cmd = "#{utility(:bzip2)}#{options}"
|
25
|
+
@ext = ".bz2"
|
29
26
|
end
|
30
27
|
|
31
28
|
private
|
32
29
|
|
33
30
|
def options
|
34
|
-
" -#{
|
31
|
+
" -#{@level}" if @level
|
35
32
|
end
|
36
|
-
|
37
33
|
end
|
38
34
|
end
|
39
35
|
end
|
@@ -1,9 +1,6 @@
|
|
1
|
-
# encoding: utf-8
|
2
|
-
|
3
1
|
module Backup
|
4
2
|
module Compressor
|
5
3
|
class Custom < Base
|
6
|
-
|
7
4
|
##
|
8
5
|
# Specify the system command to invoke a compressor,
|
9
6
|
# including any command-line arguments.
|
@@ -36,9 +33,9 @@ module Backup
|
|
36
33
|
# Return the command line using the full path.
|
37
34
|
# Ensures the command exists and is executable.
|
38
35
|
def set_cmd
|
39
|
-
parts = @command.to_s.split(
|
36
|
+
parts = @command.to_s.split(" ")
|
40
37
|
parts[0] = utility(parts[0])
|
41
|
-
parts.join(
|
38
|
+
parts.join(" ")
|
42
39
|
end
|
43
40
|
|
44
41
|
##
|
@@ -47,7 +44,6 @@ module Backup
|
|
47
44
|
def set_ext
|
48
45
|
@extension.to_s.strip
|
49
46
|
end
|
50
|
-
|
51
47
|
end
|
52
48
|
end
|
53
49
|
end
|