backup 3.6.0 → 3.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +2 -0
- data/lib/backup.rb +14 -4
- data/lib/backup/archive.rb +3 -2
- data/lib/backup/cleaner.rb +4 -2
- data/lib/backup/cli.rb +7 -5
- data/lib/backup/cloud_io/base.rb +41 -0
- data/lib/backup/cloud_io/cloud_files.rb +296 -0
- data/lib/backup/cloud_io/s3.rb +252 -0
- data/lib/backup/compressor/gzip.rb +2 -1
- data/lib/backup/config.rb +13 -5
- data/lib/backup/configuration.rb +1 -1
- data/lib/backup/configuration/helpers.rb +3 -1
- data/lib/backup/database/base.rb +3 -1
- data/lib/backup/database/mongodb.rb +2 -2
- data/lib/backup/database/mysql.rb +2 -2
- data/lib/backup/database/postgresql.rb +12 -2
- data/lib/backup/database/redis.rb +3 -2
- data/lib/backup/encryptor/gpg.rb +8 -10
- data/lib/backup/errors.rb +39 -70
- data/lib/backup/logger.rb +7 -2
- data/lib/backup/logger/fog_adapter.rb +30 -0
- data/lib/backup/model.rb +32 -14
- data/lib/backup/notifier/base.rb +4 -3
- data/lib/backup/notifier/campfire.rb +0 -1
- data/lib/backup/notifier/http_post.rb +122 -0
- data/lib/backup/notifier/mail.rb +38 -0
- data/lib/backup/notifier/nagios.rb +69 -0
- data/lib/backup/notifier/prowl.rb +0 -1
- data/lib/backup/notifier/pushover.rb +0 -1
- data/lib/backup/package.rb +5 -0
- data/lib/backup/packager.rb +3 -2
- data/lib/backup/pipeline.rb +4 -2
- data/lib/backup/storage/base.rb +2 -1
- data/lib/backup/storage/cloud_files.rb +151 -0
- data/lib/backup/storage/cycler.rb +4 -2
- data/lib/backup/storage/dropbox.rb +20 -16
- data/lib/backup/storage/ftp.rb +1 -2
- data/lib/backup/storage/local.rb +3 -3
- data/lib/backup/storage/ninefold.rb +3 -4
- data/lib/backup/storage/rsync.rb +1 -2
- data/lib/backup/storage/s3.rb +49 -158
- data/lib/backup/storage/scp.rb +3 -4
- data/lib/backup/storage/sftp.rb +1 -2
- data/lib/backup/syncer/base.rb +0 -1
- data/lib/backup/syncer/cloud/base.rb +129 -208
- data/lib/backup/syncer/cloud/cloud_files.rb +56 -41
- data/lib/backup/syncer/cloud/local_file.rb +93 -0
- data/lib/backup/syncer/cloud/s3.rb +78 -31
- data/lib/backup/syncer/rsync/base.rb +7 -0
- data/lib/backup/syncer/rsync/local.rb +0 -5
- data/lib/backup/syncer/rsync/push.rb +1 -2
- data/lib/backup/utilities.rb +18 -15
- data/lib/backup/version.rb +1 -1
- data/templates/cli/notifier/http_post +35 -0
- data/templates/cli/notifier/nagios +13 -0
- data/templates/cli/storage/cloud_files +8 -17
- data/templates/cli/storage/s3 +3 -10
- data/templates/cli/syncer/cloud_files +3 -31
- data/templates/cli/syncer/s3 +3 -27
- data/templates/notifier/mail/failure.erb +6 -1
- data/templates/notifier/mail/success.erb +6 -1
- data/templates/notifier/mail/warning.erb +6 -1
- metadata +37 -42
- data/lib/backup/storage/cloudfiles.rb +0 -68
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: eee6bab50e1788fc4d4095ee5dee8fad3f1d5b0c
|
4
|
+
data.tar.gz: 4c6c6f7cab298371726c8c33faab6e46f0aaa675
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: f401dc3a3f4a50747c3f13cc819292a6196f6d0c3913e31d079b2384f7185f7f6348f2202489a0f2341739dc96c7544478ad45ab96c1b2afaee72ddb0a0ca1c4
|
7
|
+
data.tar.gz: a867d269512d8ca5eb4b19bba111e13555a1a9167d689d7d16da730ddddeaf8d4e7b125571d65298adee9eb56548ac58c79be5b320ee870f69e6233bf05bc841
|
data/README.md
CHANGED
data/lib/backup.rb
CHANGED
@@ -7,10 +7,17 @@ require 'syslog'
|
|
7
7
|
require 'yaml'
|
8
8
|
require 'etc'
|
9
9
|
require 'forwardable'
|
10
|
+
require 'thread'
|
10
11
|
|
11
12
|
require 'open4'
|
12
13
|
require 'thor'
|
13
14
|
|
15
|
+
require 'excon'
|
16
|
+
# Include response.inspect in error messages.
|
17
|
+
Excon.defaults[:debug_response] = true
|
18
|
+
# Excon should not retry failed requests. We handle that.
|
19
|
+
Excon.defaults[:middlewares].delete(Excon::Middleware::Idempotent)
|
20
|
+
|
14
21
|
##
|
15
22
|
# The Backup Ruby Gem
|
16
23
|
module Backup
|
@@ -19,11 +26,11 @@ module Backup
|
|
19
26
|
# Backup's internal paths
|
20
27
|
LIBRARY_PATH = File.join(File.dirname(__FILE__), 'backup')
|
21
28
|
STORAGE_PATH = File.join(LIBRARY_PATH, 'storage')
|
29
|
+
SYNCER_PATH = File.join(LIBRARY_PATH, 'syncer')
|
22
30
|
DATABASE_PATH = File.join(LIBRARY_PATH, 'database')
|
23
31
|
COMPRESSOR_PATH = File.join(LIBRARY_PATH, 'compressor')
|
24
32
|
ENCRYPTOR_PATH = File.join(LIBRARY_PATH, 'encryptor')
|
25
33
|
NOTIFIER_PATH = File.join(LIBRARY_PATH, 'notifier')
|
26
|
-
SYNCER_PATH = File.join(LIBRARY_PATH, 'syncer')
|
27
34
|
TEMPLATE_PATH = File.expand_path('../../templates', __FILE__)
|
28
35
|
|
29
36
|
##
|
@@ -32,7 +39,7 @@ module Backup
|
|
32
39
|
autoload :Base, File.join(STORAGE_PATH, 'base')
|
33
40
|
autoload :Cycler, File.join(STORAGE_PATH, 'cycler')
|
34
41
|
autoload :S3, File.join(STORAGE_PATH, 's3')
|
35
|
-
autoload :CloudFiles, File.join(STORAGE_PATH, '
|
42
|
+
autoload :CloudFiles, File.join(STORAGE_PATH, 'cloud_files')
|
36
43
|
autoload :Ninefold, File.join(STORAGE_PATH, 'ninefold')
|
37
44
|
autoload :Dropbox, File.join(STORAGE_PATH, 'dropbox')
|
38
45
|
autoload :FTP, File.join(STORAGE_PATH, 'ftp')
|
@@ -48,6 +55,7 @@ module Backup
|
|
48
55
|
autoload :Base, File.join(SYNCER_PATH, 'base')
|
49
56
|
module Cloud
|
50
57
|
autoload :Base, File.join(SYNCER_PATH, 'cloud', 'base')
|
58
|
+
autoload :LocalFile, File.join(SYNCER_PATH, 'cloud', 'local_file')
|
51
59
|
autoload :CloudFiles, File.join(SYNCER_PATH, 'cloud', 'cloud_files')
|
52
60
|
autoload :S3, File.join(SYNCER_PATH, 'cloud', 's3')
|
53
61
|
end
|
@@ -99,11 +107,15 @@ module Backup
|
|
99
107
|
autoload :Prowl, File.join(NOTIFIER_PATH, 'prowl')
|
100
108
|
autoload :Hipchat, File.join(NOTIFIER_PATH, 'hipchat')
|
101
109
|
autoload :Pushover, File.join(NOTIFIER_PATH, 'pushover')
|
110
|
+
autoload :HttpPost, File.join(NOTIFIER_PATH, 'http_post')
|
111
|
+
autoload :Nagios, File.join(NOTIFIER_PATH, 'nagios')
|
102
112
|
end
|
103
113
|
|
104
114
|
##
|
105
115
|
# Require Backup base files
|
106
116
|
%w{
|
117
|
+
errors
|
118
|
+
logger
|
107
119
|
utilities
|
108
120
|
archive
|
109
121
|
binder
|
@@ -111,8 +123,6 @@ module Backup
|
|
111
123
|
config
|
112
124
|
cli
|
113
125
|
configuration
|
114
|
-
errors
|
115
|
-
logger
|
116
126
|
model
|
117
127
|
package
|
118
128
|
packager
|
data/lib/backup/archive.rb
CHANGED
@@ -2,6 +2,8 @@
|
|
2
2
|
|
3
3
|
module Backup
|
4
4
|
class Archive
|
5
|
+
class Error < Backup::Error; end
|
6
|
+
|
5
7
|
include Backup::Utilities::Helpers
|
6
8
|
attr_reader :name, :options
|
7
9
|
|
@@ -88,8 +90,7 @@ module Backup
|
|
88
90
|
if pipeline.success?
|
89
91
|
Logger.info "Archive '#{ name }' Complete!"
|
90
92
|
else
|
91
|
-
raise
|
92
|
-
"Failed to Create Archive '#{ name }'\n" +
|
93
|
+
raise Error, "Failed to Create Archive '#{ name }'\n" +
|
93
94
|
pipeline.error_messages
|
94
95
|
end
|
95
96
|
end
|
data/lib/backup/cleaner.rb
CHANGED
@@ -2,6 +2,8 @@
|
|
2
2
|
|
3
3
|
module Backup
|
4
4
|
module Cleaner
|
5
|
+
class Error < Backup::Error; end
|
6
|
+
|
5
7
|
class << self
|
6
8
|
|
7
9
|
##
|
@@ -37,7 +39,7 @@ module Backup
|
|
37
39
|
end
|
38
40
|
|
39
41
|
unless messages.empty?
|
40
|
-
Logger.warn
|
42
|
+
Logger.warn Error.new(<<-EOS)
|
41
43
|
Cleanup Warning
|
42
44
|
#{ messages.join("\n") }
|
43
45
|
Please check the log for messages and/or your notifications
|
@@ -93,7 +95,7 @@ module Backup
|
|
93
95
|
end
|
94
96
|
|
95
97
|
unless messages.empty?
|
96
|
-
Logger.warn
|
98
|
+
Logger.warn Error.new(<<-EOS)
|
97
99
|
Cleanup Warning
|
98
100
|
#{ messages.join("\n") }
|
99
101
|
Make sure you check these files before the next scheduled backup for
|
data/lib/backup/cli.rb
CHANGED
@@ -4,6 +4,8 @@
|
|
4
4
|
# Build the Backup Command Line Interface using Thor
|
5
5
|
module Backup
|
6
6
|
class CLI < Thor
|
7
|
+
class Error < Backup::Error; end
|
8
|
+
class FatalError < Backup::FatalError; end
|
7
9
|
|
8
10
|
##
|
9
11
|
# [Perform]
|
@@ -142,14 +144,14 @@ module Backup
|
|
142
144
|
Model.find_by_trigger(trigger)
|
143
145
|
}.flatten.uniq
|
144
146
|
|
145
|
-
raise
|
147
|
+
raise Error, "No Models found for trigger(s) " +
|
146
148
|
"'#{ triggers.join(',') }'." if models.empty?
|
147
149
|
|
148
150
|
# Finalize Logger and begin real-time logging.
|
149
151
|
Logger.start!
|
150
152
|
|
151
153
|
rescue Exception => err
|
152
|
-
Logger.error
|
154
|
+
Logger.error Error.wrap(err)
|
153
155
|
# Logger configuration will be ignored
|
154
156
|
# and messages will be output to the console only.
|
155
157
|
Logger.abort!
|
@@ -166,7 +168,7 @@ module Backup
|
|
166
168
|
when 2
|
167
169
|
errors = true
|
168
170
|
unless models.empty?
|
169
|
-
Logger.info
|
171
|
+
Logger.info Error.new(<<-EOS)
|
170
172
|
Backup will now continue...
|
171
173
|
The following triggers will now be processed:
|
172
174
|
(#{ models.map {|m| m.trigger }.join(', ') })
|
@@ -175,7 +177,7 @@ module Backup
|
|
175
177
|
when 3
|
176
178
|
fatal = true
|
177
179
|
unless models.empty?
|
178
|
-
Logger.error
|
180
|
+
Logger.error FatalError.new(<<-EOS)
|
179
181
|
Backup will now exit.
|
180
182
|
The following triggers will not be processed:
|
181
183
|
(#{ models.map {|m| m.trigger }.join(', ') })
|
@@ -227,7 +229,7 @@ module Backup
|
|
227
229
|
Config.update(options)
|
228
230
|
Config.load_config!
|
229
231
|
rescue Exception => err
|
230
|
-
Logger.error
|
232
|
+
Logger.error Error.wrap(err)
|
231
233
|
end
|
232
234
|
|
233
235
|
if Logger.has_warnings? || Logger.has_errors?
|
@@ -0,0 +1,41 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
|
3
|
+
module Backup
|
4
|
+
module CloudIO
|
5
|
+
class Error < Backup::Error; end
|
6
|
+
class FileSizeError < Backup::Error; end
|
7
|
+
|
8
|
+
class Base
|
9
|
+
attr_reader :max_retries, :retry_waitsec
|
10
|
+
|
11
|
+
def initialize(options = {})
|
12
|
+
@max_retries = options[:max_retries]
|
13
|
+
@retry_waitsec = options[:retry_waitsec]
|
14
|
+
end
|
15
|
+
|
16
|
+
private
|
17
|
+
|
18
|
+
def with_retries(operation)
|
19
|
+
retries = 0
|
20
|
+
begin
|
21
|
+
yield
|
22
|
+
rescue => err
|
23
|
+
retries += 1
|
24
|
+
raise Error.wrap(err, <<-EOS) if retries > max_retries
|
25
|
+
Max Retries (#{ max_retries }) Exceeded!
|
26
|
+
Operation: #{ operation }
|
27
|
+
Be sure to check the log messages for each retry attempt.
|
28
|
+
EOS
|
29
|
+
|
30
|
+
Logger.info Error.wrap(err, <<-EOS)
|
31
|
+
Retry ##{ retries } of #{ max_retries }
|
32
|
+
Operation: #{ operation }
|
33
|
+
EOS
|
34
|
+
sleep(retry_waitsec)
|
35
|
+
retry
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
@@ -0,0 +1,296 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require 'backup/cloud_io/base'
|
3
|
+
require 'fog'
|
4
|
+
require 'digest/md5'
|
5
|
+
|
6
|
+
module Backup
|
7
|
+
module CloudIO
|
8
|
+
class CloudFiles < Base
|
9
|
+
class Error < Backup::Error; end
|
10
|
+
|
11
|
+
MAX_FILE_SIZE = 1024**3 * 5 # 5 GiB
|
12
|
+
MAX_SLO_SIZE = 1024**3 * 5000 # 1000 segments @ 5 GiB
|
13
|
+
SEGMENT_BUFFER = 1024**2 # 1 MiB
|
14
|
+
|
15
|
+
attr_reader :username, :api_key, :auth_url, :region, :servicenet,
|
16
|
+
:container, :segments_container, :segment_size, :days_to_keep
|
17
|
+
|
18
|
+
def initialize(options = {})
|
19
|
+
super
|
20
|
+
|
21
|
+
@username = options[:username]
|
22
|
+
@api_key = options[:api_key]
|
23
|
+
@auth_url = options[:auth_url]
|
24
|
+
@region = options[:region]
|
25
|
+
@servicenet = options[:servicenet]
|
26
|
+
@container = options[:container]
|
27
|
+
@segments_container = options[:segments_container]
|
28
|
+
@segment_size = options[:segment_size]
|
29
|
+
@days_to_keep = options[:days_to_keep]
|
30
|
+
end
|
31
|
+
|
32
|
+
# The Syncer may call this method in multiple threads,
|
33
|
+
# but #objects is always called before this occurs.
|
34
|
+
def upload(src, dest)
|
35
|
+
create_containers
|
36
|
+
|
37
|
+
file_size = File.size(src)
|
38
|
+
segment_bytes = segment_size * 1024**2
|
39
|
+
if segment_bytes > 0 && file_size > segment_bytes
|
40
|
+
raise FileSizeError, <<-EOS if file_size > MAX_SLO_SIZE
|
41
|
+
File Too Large
|
42
|
+
File: #{ src }
|
43
|
+
Size: #{ file_size }
|
44
|
+
Max SLO Size is #{ MAX_SLO_SIZE } (5 GiB * 1000 segments)
|
45
|
+
EOS
|
46
|
+
|
47
|
+
segment_bytes = adjusted_segment_bytes(segment_bytes, file_size)
|
48
|
+
segments = upload_segments(src, dest, segment_bytes, file_size)
|
49
|
+
upload_manifest(dest, segments)
|
50
|
+
else
|
51
|
+
raise FileSizeError, <<-EOS if file_size > MAX_FILE_SIZE
|
52
|
+
File Too Large
|
53
|
+
File: #{ src }
|
54
|
+
Size: #{ file_size }
|
55
|
+
Max File Size is #{ MAX_FILE_SIZE } (5 GiB)
|
56
|
+
EOS
|
57
|
+
|
58
|
+
put_object(src, dest)
|
59
|
+
end
|
60
|
+
end
|
61
|
+
|
62
|
+
# Returns all objects in the container with the given prefix.
|
63
|
+
#
|
64
|
+
# - #get_container returns a max of 10000 objects per request.
|
65
|
+
# - Returns objects sorted using a sqlite binary collating function.
|
66
|
+
# - If marker is given, only objects after the marker are in the response.
|
67
|
+
def objects(prefix)
|
68
|
+
objects = []
|
69
|
+
resp = nil
|
70
|
+
prefix = prefix.chomp('/')
|
71
|
+
opts = { :prefix => prefix + '/' }
|
72
|
+
|
73
|
+
create_containers
|
74
|
+
|
75
|
+
while resp.nil? || resp.body.count == 10000
|
76
|
+
opts.merge!(:marker => objects.last.name) unless objects.empty?
|
77
|
+
with_retries("GET '#{ container }/#{ prefix }/*'") do
|
78
|
+
resp = connection.get_container(container, opts)
|
79
|
+
end
|
80
|
+
resp.body.each do |obj_data|
|
81
|
+
objects << Object.new(self, obj_data)
|
82
|
+
end
|
83
|
+
end
|
84
|
+
|
85
|
+
objects
|
86
|
+
end
|
87
|
+
|
88
|
+
# Used by Object to fetch metadata if needed.
|
89
|
+
def head_object(object)
|
90
|
+
resp = nil
|
91
|
+
with_retries("HEAD '#{ container }/#{ object.name }'") do
|
92
|
+
resp = connection.head_object(container, object.name)
|
93
|
+
end
|
94
|
+
resp
|
95
|
+
end
|
96
|
+
|
97
|
+
# Delete non-SLO object(s) from the container.
|
98
|
+
#
|
99
|
+
# - Called by the Storage (with objects) and the Syncer (with names)
|
100
|
+
# - Deletes 10,000 objects per request.
|
101
|
+
# - Missing objects will be ignored.
|
102
|
+
def delete(objects_or_names)
|
103
|
+
names = Array(objects_or_names).dup
|
104
|
+
names.map!(&:name) if names.first.is_a?(Object)
|
105
|
+
|
106
|
+
until names.empty?
|
107
|
+
_names = names.slice!(0, 10000)
|
108
|
+
with_retries('DELETE Multiple Objects') do
|
109
|
+
resp = connection.delete_multiple_objects(container, _names)
|
110
|
+
resp_status = resp.body['Response Status']
|
111
|
+
raise Error, <<-EOS unless resp_status == '200 OK'
|
112
|
+
#{ resp_status }
|
113
|
+
The server returned the following:
|
114
|
+
#{ resp.body.inspect }
|
115
|
+
EOS
|
116
|
+
end
|
117
|
+
end
|
118
|
+
end
|
119
|
+
|
120
|
+
# Delete an SLO object(s) from the container.
|
121
|
+
#
|
122
|
+
# - Used only by the Storage. The Syncer cannot use SLOs.
|
123
|
+
# - Removes the SLO manifest object and all associated segments.
|
124
|
+
# - Missing segments will be ignored.
|
125
|
+
def delete_slo(objects)
|
126
|
+
Array(objects).each do |object|
|
127
|
+
with_retries("DELETE SLO Manifest '#{ container }/#{ object.name }'") do
|
128
|
+
resp = connection.delete_static_large_object(container, object.name)
|
129
|
+
resp_status = resp.body['Response Status']
|
130
|
+
raise Error, <<-EOS unless resp_status == '200 OK'
|
131
|
+
#{ resp_status }
|
132
|
+
The server returned the following:
|
133
|
+
#{ resp.body.inspect }
|
134
|
+
EOS
|
135
|
+
end
|
136
|
+
end
|
137
|
+
end
|
138
|
+
|
139
|
+
private
|
140
|
+
|
141
|
+
def connection
|
142
|
+
@connection ||= Fog::Storage.new(
|
143
|
+
:provider => 'Rackspace',
|
144
|
+
:rackspace_username => username,
|
145
|
+
:rackspace_api_key => api_key,
|
146
|
+
:rackspace_auth_url => auth_url,
|
147
|
+
:rackspace_region => region,
|
148
|
+
:rackspace_servicenet => servicenet
|
149
|
+
)
|
150
|
+
end
|
151
|
+
|
152
|
+
def create_containers
|
153
|
+
return if @containers_created
|
154
|
+
@containers_created = true
|
155
|
+
|
156
|
+
with_retries('Create Containers') do
|
157
|
+
connection.put_container(container)
|
158
|
+
connection.put_container(segments_container) if segments_container
|
159
|
+
end
|
160
|
+
end
|
161
|
+
|
162
|
+
def put_object(src, dest)
|
163
|
+
opts = headers.merge('ETag' => Digest::MD5.file(src).hexdigest)
|
164
|
+
with_retries("PUT '#{ container }/#{ dest }'") do
|
165
|
+
File.open(src, 'r') do |file|
|
166
|
+
connection.put_object(container, dest, file, opts)
|
167
|
+
end
|
168
|
+
end
|
169
|
+
end
|
170
|
+
|
171
|
+
# Each segment is uploaded using chunked transfer encoding using
|
172
|
+
# SEGMENT_BUFFER, and each segment's MD5 is sent to verify the transfer.
|
173
|
+
# Each segment's MD5 and byte_size will also be verified when the
|
174
|
+
# SLO manifest object is uploaded.
|
175
|
+
def upload_segments(src, dest, segment_bytes, file_size)
|
176
|
+
total_segments = (file_size / segment_bytes.to_f).ceil
|
177
|
+
progress = (0.1..0.9).step(0.1).map {|n| (total_segments * n).floor }
|
178
|
+
Logger.info "\s\sUploading #{ total_segments } SLO Segments..."
|
179
|
+
|
180
|
+
segments = []
|
181
|
+
File.open(src, 'r') do |file|
|
182
|
+
segment_number = 0
|
183
|
+
until file.eof?
|
184
|
+
segment_number += 1
|
185
|
+
object = "#{ dest }/#{ segment_number.to_s.rjust(4, '0') }"
|
186
|
+
pos = file.pos
|
187
|
+
md5 = segment_md5(file, segment_bytes)
|
188
|
+
opts = headers.merge('ETag' => md5)
|
189
|
+
|
190
|
+
with_retries("PUT '#{ segments_container }/#{ object }'") do
|
191
|
+
file.seek(pos)
|
192
|
+
offset = 0
|
193
|
+
connection.put_object(segments_container, object, nil, opts) do
|
194
|
+
# block is called to stream data until it returns ''
|
195
|
+
data = ''
|
196
|
+
if offset <= segment_bytes - SEGMENT_BUFFER
|
197
|
+
data = file.read(SEGMENT_BUFFER).to_s # nil => ''
|
198
|
+
offset += data.size
|
199
|
+
end
|
200
|
+
data
|
201
|
+
end
|
202
|
+
end
|
203
|
+
|
204
|
+
segments << {
|
205
|
+
:path => "#{ segments_container }/#{ object }",
|
206
|
+
:etag => md5,
|
207
|
+
:size_bytes => file.pos - pos
|
208
|
+
}
|
209
|
+
|
210
|
+
if i = progress.rindex(segment_number)
|
211
|
+
Logger.info "\s\s...#{ i + 1 }0% Complete..."
|
212
|
+
end
|
213
|
+
end
|
214
|
+
end
|
215
|
+
segments
|
216
|
+
end
|
217
|
+
|
218
|
+
def segment_md5(file, segment_bytes)
|
219
|
+
md5 = Digest::MD5.new
|
220
|
+
offset = 0
|
221
|
+
while offset <= segment_bytes - SEGMENT_BUFFER
|
222
|
+
data = file.read(SEGMENT_BUFFER)
|
223
|
+
break unless data
|
224
|
+
offset += data.size
|
225
|
+
md5 << data
|
226
|
+
end
|
227
|
+
md5.hexdigest
|
228
|
+
end
|
229
|
+
|
230
|
+
# Each segment's ETag and byte_size will be verified once uploaded.
|
231
|
+
# Request will raise an exception if verification fails or segments
|
232
|
+
# are not found. However, each segment's ETag was verified when we
|
233
|
+
# uploaded the segments, so this should only retry failed requests.
|
234
|
+
def upload_manifest(dest, segments)
|
235
|
+
Logger.info "\s\sStoring SLO Manifest '#{ container }/#{ dest }'"
|
236
|
+
|
237
|
+
with_retries("PUT SLO Manifest '#{ container }/#{ dest }'") do
|
238
|
+
connection.put_static_obj_manifest(container, dest, segments, headers)
|
239
|
+
end
|
240
|
+
end
|
241
|
+
|
242
|
+
# If :days_to_keep was set, each object will be scheduled for deletion.
|
243
|
+
# This includes non-SLO objects, the SLO manifest and all segments.
|
244
|
+
def headers
|
245
|
+
headers = {}
|
246
|
+
headers.merge!('X-Delete-At' => delete_at) if delete_at
|
247
|
+
headers
|
248
|
+
end
|
249
|
+
|
250
|
+
def delete_at
|
251
|
+
return unless days_to_keep
|
252
|
+
@delete_at ||= (Time.now.utc + days_to_keep * 60**2 * 24).to_i
|
253
|
+
end
|
254
|
+
|
255
|
+
def adjusted_segment_bytes(segment_bytes, file_size)
|
256
|
+
return segment_bytes if file_size / segment_bytes.to_f <= 1000
|
257
|
+
|
258
|
+
mb = orig_mb = segment_bytes / 1024**2
|
259
|
+
mb += 1 until file_size / (1024**2 * mb).to_f <= 1000
|
260
|
+
Logger.warn Error.new(<<-EOS)
|
261
|
+
Segment Size Adjusted
|
262
|
+
Your original #segment_size of #{ orig_mb } MiB has been adjusted
|
263
|
+
to #{ mb } MiB in order to satisfy the limit of 1000 segments.
|
264
|
+
To enforce your chosen #segment_size, you should use the Splitter.
|
265
|
+
e.g. split_into_chunks_of #{ mb * 1000 } (#segment_size * 1000)
|
266
|
+
EOS
|
267
|
+
1024**2 * mb
|
268
|
+
end
|
269
|
+
|
270
|
+
class Object
|
271
|
+
attr_reader :name, :hash
|
272
|
+
|
273
|
+
def initialize(cloud_io, data)
|
274
|
+
@cloud_io = cloud_io
|
275
|
+
@name = data['name']
|
276
|
+
@hash = data['hash']
|
277
|
+
end
|
278
|
+
|
279
|
+
def slo?
|
280
|
+
!!metadata['X-Static-Large-Object']
|
281
|
+
end
|
282
|
+
|
283
|
+
def marked_for_deletion?
|
284
|
+
!!metadata['X-Delete-At']
|
285
|
+
end
|
286
|
+
|
287
|
+
private
|
288
|
+
|
289
|
+
def metadata
|
290
|
+
@metadata ||= @cloud_io.head_object(self).headers
|
291
|
+
end
|
292
|
+
end
|
293
|
+
|
294
|
+
end
|
295
|
+
end
|
296
|
+
end
|