venet-backup 4.1.3
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/LICENSE.md +24 -0
- data/README.md +15 -0
- data/bin/backup +5 -0
- data/lib/backup.rb +141 -0
- data/lib/backup/archive.rb +170 -0
- data/lib/backup/binder.rb +22 -0
- data/lib/backup/cleaner.rb +116 -0
- data/lib/backup/cli.rb +374 -0
- data/lib/backup/cloud_io/base.rb +41 -0
- data/lib/backup/cloud_io/cloud_files.rb +298 -0
- data/lib/backup/cloud_io/s3.rb +260 -0
- data/lib/backup/compressor/base.rb +35 -0
- data/lib/backup/compressor/bzip2.rb +39 -0
- data/lib/backup/compressor/custom.rb +53 -0
- data/lib/backup/compressor/gzip.rb +74 -0
- data/lib/backup/config.rb +119 -0
- data/lib/backup/config/dsl.rb +103 -0
- data/lib/backup/config/helpers.rb +143 -0
- data/lib/backup/database/base.rb +85 -0
- data/lib/backup/database/mongodb.rb +186 -0
- data/lib/backup/database/mysql.rb +180 -0
- data/lib/backup/database/openldap.rb +95 -0
- data/lib/backup/database/postgresql.rb +133 -0
- data/lib/backup/database/redis.rb +179 -0
- data/lib/backup/database/riak.rb +82 -0
- data/lib/backup/database/sqlite.rb +57 -0
- data/lib/backup/encryptor/base.rb +29 -0
- data/lib/backup/encryptor/gpg.rb +747 -0
- data/lib/backup/encryptor/open_ssl.rb +72 -0
- data/lib/backup/errors.rb +58 -0
- data/lib/backup/logger.rb +199 -0
- data/lib/backup/logger/console.rb +51 -0
- data/lib/backup/logger/fog_adapter.rb +29 -0
- data/lib/backup/logger/logfile.rb +133 -0
- data/lib/backup/logger/syslog.rb +116 -0
- data/lib/backup/model.rb +454 -0
- data/lib/backup/notifier/base.rb +98 -0
- data/lib/backup/notifier/campfire.rb +69 -0
- data/lib/backup/notifier/datadog.rb +116 -0
- data/lib/backup/notifier/flowdock.rb +102 -0
- data/lib/backup/notifier/hipchat.rb +93 -0
- data/lib/backup/notifier/http_post.rb +122 -0
- data/lib/backup/notifier/mail.rb +238 -0
- data/lib/backup/notifier/nagios.rb +74 -0
- data/lib/backup/notifier/pagerduty.rb +81 -0
- data/lib/backup/notifier/prowl.rb +69 -0
- data/lib/backup/notifier/pushover.rb +80 -0
- data/lib/backup/notifier/slack.rb +158 -0
- data/lib/backup/notifier/twitter.rb +64 -0
- data/lib/backup/notifier/zabbix.rb +68 -0
- data/lib/backup/package.rb +51 -0
- data/lib/backup/packager.rb +101 -0
- data/lib/backup/pipeline.rb +124 -0
- data/lib/backup/splitter.rb +76 -0
- data/lib/backup/storage/base.rb +57 -0
- data/lib/backup/storage/cloud_files.rb +158 -0
- data/lib/backup/storage/cycler.rb +65 -0
- data/lib/backup/storage/dropbox.rb +236 -0
- data/lib/backup/storage/ftp.rb +98 -0
- data/lib/backup/storage/google/google_drive_auth.rb +96 -0
- data/lib/backup/storage/google/google_drive_transfer.rb +125 -0
- data/lib/backup/storage/google_drive.rb +62 -0
- data/lib/backup/storage/local.rb +64 -0
- data/lib/backup/storage/ninefold.rb +74 -0
- data/lib/backup/storage/rsync.rb +248 -0
- data/lib/backup/storage/s3.rb +154 -0
- data/lib/backup/storage/scp.rb +67 -0
- data/lib/backup/storage/sftp.rb +82 -0
- data/lib/backup/syncer/base.rb +70 -0
- data/lib/backup/syncer/cloud/base.rb +179 -0
- data/lib/backup/syncer/cloud/cloud_files.rb +83 -0
- data/lib/backup/syncer/cloud/local_file.rb +100 -0
- data/lib/backup/syncer/cloud/s3.rb +110 -0
- data/lib/backup/syncer/rsync/base.rb +48 -0
- data/lib/backup/syncer/rsync/local.rb +31 -0
- data/lib/backup/syncer/rsync/pull.rb +51 -0
- data/lib/backup/syncer/rsync/push.rb +205 -0
- data/lib/backup/template.rb +46 -0
- data/lib/backup/utilities.rb +224 -0
- data/lib/backup/version.rb +5 -0
- data/templates/cli/archive +28 -0
- data/templates/cli/compressor/bzip2 +4 -0
- data/templates/cli/compressor/custom +7 -0
- data/templates/cli/compressor/gzip +4 -0
- data/templates/cli/config +123 -0
- data/templates/cli/databases/mongodb +15 -0
- data/templates/cli/databases/mysql +18 -0
- data/templates/cli/databases/openldap +24 -0
- data/templates/cli/databases/postgresql +16 -0
- data/templates/cli/databases/redis +16 -0
- data/templates/cli/databases/riak +17 -0
- data/templates/cli/databases/sqlite +12 -0
- data/templates/cli/encryptor/gpg +27 -0
- data/templates/cli/encryptor/openssl +9 -0
- data/templates/cli/model +26 -0
- data/templates/cli/notifier/zabbix +15 -0
- data/templates/cli/notifiers/campfire +12 -0
- data/templates/cli/notifiers/datadog +57 -0
- data/templates/cli/notifiers/flowdock +16 -0
- data/templates/cli/notifiers/hipchat +15 -0
- data/templates/cli/notifiers/http_post +32 -0
- data/templates/cli/notifiers/mail +21 -0
- data/templates/cli/notifiers/nagios +13 -0
- data/templates/cli/notifiers/pagerduty +12 -0
- data/templates/cli/notifiers/prowl +11 -0
- data/templates/cli/notifiers/pushover +11 -0
- data/templates/cli/notifiers/slack +23 -0
- data/templates/cli/notifiers/twitter +13 -0
- data/templates/cli/splitter +7 -0
- data/templates/cli/storages/cloud_files +11 -0
- data/templates/cli/storages/dropbox +19 -0
- data/templates/cli/storages/ftp +12 -0
- data/templates/cli/storages/local +7 -0
- data/templates/cli/storages/ninefold +9 -0
- data/templates/cli/storages/rsync +17 -0
- data/templates/cli/storages/s3 +14 -0
- data/templates/cli/storages/scp +14 -0
- data/templates/cli/storages/sftp +14 -0
- data/templates/cli/syncers/cloud_files +22 -0
- data/templates/cli/syncers/rsync_local +20 -0
- data/templates/cli/syncers/rsync_pull +28 -0
- data/templates/cli/syncers/rsync_push +28 -0
- data/templates/cli/syncers/s3 +27 -0
- data/templates/general/links +3 -0
- data/templates/general/version.erb +2 -0
- data/templates/notifier/mail/failure.erb +16 -0
- data/templates/notifier/mail/success.erb +16 -0
- data/templates/notifier/mail/warning.erb +16 -0
- data/templates/storage/dropbox/authorization_url.erb +6 -0
- data/templates/storage/dropbox/authorized.erb +4 -0
- data/templates/storage/dropbox/cache_file_written.erb +10 -0
- data/templates/storage/google_drive/authorization_url.erb +6 -0
- data/templates/storage/google_drive/authorized.erb +4 -0
- data/templates/storage/google_drive/cache_file_written.erb +10 -0
- metadata +957 -0
@@ -0,0 +1,101 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
|
3
|
+
module Backup
|
4
|
+
module Packager
|
5
|
+
class Error < Backup::Error; end
|
6
|
+
|
7
|
+
class << self
|
8
|
+
include Utilities::Helpers
|
9
|
+
|
10
|
+
##
|
11
|
+
# Build the final package for the backup model.
|
12
|
+
def package!(model)
|
13
|
+
@package = model.package
|
14
|
+
@encryptor = model.encryptor
|
15
|
+
@splitter = model.splitter
|
16
|
+
@pipeline = Pipeline.new
|
17
|
+
|
18
|
+
Logger.info "Packaging the backup files..."
|
19
|
+
procedure.call
|
20
|
+
|
21
|
+
if @pipeline.success?
|
22
|
+
Logger.info "Packaging Complete!"
|
23
|
+
else
|
24
|
+
raise Error, "Failed to Create Backup Package\n" +
|
25
|
+
@pipeline.error_messages
|
26
|
+
end
|
27
|
+
end
|
28
|
+
|
29
|
+
private
|
30
|
+
|
31
|
+
##
|
32
|
+
# Builds a chain of nested Procs which adds each command to a Pipeline
|
33
|
+
# needed to package the final command to package the backup.
|
34
|
+
# This is done so that the Encryptor and Splitter have the ability
|
35
|
+
# to perform actions before and after the final command is executed.
|
36
|
+
# No Encryptors currently utilize this, however the Splitter does.
|
37
|
+
def procedure
|
38
|
+
stack = []
|
39
|
+
|
40
|
+
##
|
41
|
+
# Initial `tar` command to package the temporary backup folder.
|
42
|
+
# The command's output will then be either piped to the Encryptor
|
43
|
+
# or the Splitter (if no Encryptor), or through `cat` into the final
|
44
|
+
# output file if neither are configured.
|
45
|
+
@pipeline << "#{ utility(:tar) } -cf - " +
|
46
|
+
"-C '#{ Config.tmp_path }' '#{ @package.trigger }'"
|
47
|
+
|
48
|
+
##
|
49
|
+
# If an Encryptor was configured, it will be called first
|
50
|
+
# to add the encryption utility command to be piped through,
|
51
|
+
# and amend the final package extension.
|
52
|
+
# It's output will then be either piped into a Splitter,
|
53
|
+
# or through `cat` into the final output file.
|
54
|
+
if @encryptor
|
55
|
+
stack << lambda do
|
56
|
+
@encryptor.encrypt_with do |command, ext|
|
57
|
+
@pipeline << command
|
58
|
+
@package.extension << ext
|
59
|
+
stack.shift.call
|
60
|
+
end
|
61
|
+
end
|
62
|
+
end
|
63
|
+
|
64
|
+
##
|
65
|
+
# If a Splitter was configured, the `split` utility command will be
|
66
|
+
# added to the Pipeline to split the final output into multiple files.
|
67
|
+
# Once the Proc executing the Pipeline has completed and returns back
|
68
|
+
# to the Splitter, it will check the final output files to determine
|
69
|
+
# if the backup was indeed split.
|
70
|
+
# If so, it will set the package's chunk_suffixes. If not, it will
|
71
|
+
# remove the '-aa' suffix from the only file created by `split`.
|
72
|
+
#
|
73
|
+
# If no Splitter was configured, the final file output will be
|
74
|
+
# piped through `cat` into the final output file.
|
75
|
+
if @splitter
|
76
|
+
stack << lambda do
|
77
|
+
@splitter.split_with do |command|
|
78
|
+
@pipeline << command
|
79
|
+
stack.shift.call
|
80
|
+
end
|
81
|
+
end
|
82
|
+
else
|
83
|
+
stack << lambda do
|
84
|
+
outfile = File.join(Config.tmp_path, @package.basename)
|
85
|
+
@pipeline << "#{ utility(:cat) } > #{ outfile }"
|
86
|
+
stack.shift.call
|
87
|
+
end
|
88
|
+
end
|
89
|
+
|
90
|
+
##
|
91
|
+
# Last Proc to be called runs the Pipeline the procedure built.
|
92
|
+
# Once complete, the call stack will unwind back through the
|
93
|
+
# preceeding Procs in the stack (if any)
|
94
|
+
stack << lambda { @pipeline.run }
|
95
|
+
|
96
|
+
stack.shift
|
97
|
+
end
|
98
|
+
|
99
|
+
end
|
100
|
+
end
|
101
|
+
end
|
@@ -0,0 +1,124 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
|
3
|
+
module Backup
|
4
|
+
class Pipeline
|
5
|
+
class Error < Backup::Error; end
|
6
|
+
|
7
|
+
include Utilities::Helpers
|
8
|
+
|
9
|
+
attr_reader :stderr, :errors
|
10
|
+
|
11
|
+
def initialize
|
12
|
+
@commands = []
|
13
|
+
@success_codes = []
|
14
|
+
@errors = []
|
15
|
+
@stderr = ''
|
16
|
+
end
|
17
|
+
|
18
|
+
##
|
19
|
+
# Adds a command to be executed in the pipeline.
|
20
|
+
# Each command will be run in the order in which it was added,
|
21
|
+
# with it's output being piped to the next command.
|
22
|
+
#
|
23
|
+
# +success_codes+ must be an Array of Integer exit codes that will
|
24
|
+
# be considered successful for the +command+.
|
25
|
+
def add(command, success_codes)
|
26
|
+
@commands << command
|
27
|
+
@success_codes << success_codes
|
28
|
+
end
|
29
|
+
|
30
|
+
##
|
31
|
+
# Commands added using this method will only be considered successful
|
32
|
+
# if their exit status is 0.
|
33
|
+
#
|
34
|
+
# Use #add if successful exit status codes need to be specified.
|
35
|
+
def <<(command)
|
36
|
+
add(command, [0])
|
37
|
+
end
|
38
|
+
|
39
|
+
##
|
40
|
+
# Runs the command line from `#pipeline` and collects STDOUT/STDERR.
|
41
|
+
# STDOUT is then parsed to determine the exit status of each command.
|
42
|
+
# For each command with a non-zero exit status, a SystemCallError is
|
43
|
+
# created and added to @errors. All STDERR output is set in @stderr.
|
44
|
+
#
|
45
|
+
# Note that there is no accumulated STDOUT from the commands themselves.
|
46
|
+
# Also, the last command should not attempt to write to STDOUT.
|
47
|
+
# Any output on STDOUT from the final command will be sent to STDERR.
|
48
|
+
# This in itself will not cause #run to fail, but will log warnings
|
49
|
+
# when all commands exit with non-zero status.
|
50
|
+
#
|
51
|
+
# Use `#success?` to determine if all commands in the pipeline succeeded.
|
52
|
+
# If `#success?` returns `false`, use `#error_messages` to get an error report.
|
53
|
+
def run
|
54
|
+
Open4.popen4(pipeline) do |pid, stdin, stdout, stderr|
|
55
|
+
pipestatus = stdout.read.gsub("\n", '').split(':').sort
|
56
|
+
pipestatus.each do |status|
|
57
|
+
index, exitstatus = status.split('|').map(&:to_i)
|
58
|
+
unless @success_codes[index].include?(exitstatus)
|
59
|
+
command = command_name(@commands[index])
|
60
|
+
@errors << SystemCallError.new(
|
61
|
+
"'#{ command }' returned exit code: #{ exitstatus }", exitstatus
|
62
|
+
)
|
63
|
+
end
|
64
|
+
end
|
65
|
+
@stderr = stderr.read.strip
|
66
|
+
end
|
67
|
+
Logger.warn(stderr_messages) if success? && stderr_messages
|
68
|
+
rescue Exception => err
|
69
|
+
raise Error.wrap(err, 'Pipeline failed to execute')
|
70
|
+
end
|
71
|
+
|
72
|
+
def success?
|
73
|
+
@errors.empty?
|
74
|
+
end
|
75
|
+
|
76
|
+
##
|
77
|
+
# Returns a multi-line String, reporting all STDERR messages received
|
78
|
+
# from the commands in the pipeline (if any), along with the SystemCallError
|
79
|
+
# (Errno) message for each command which had a non-zero exit status.
|
80
|
+
def error_messages
|
81
|
+
@error_messages ||= (stderr_messages || '') +
|
82
|
+
"The following system errors were returned:\n" +
|
83
|
+
@errors.map {|err| "#{ err.class }: #{ err.message }" }.join("\n")
|
84
|
+
end
|
85
|
+
|
86
|
+
private
|
87
|
+
|
88
|
+
##
|
89
|
+
# Each command is added as part of the pipeline, grouped with an `echo`
|
90
|
+
# command to pass along the command's index in @commands and it's exit status.
|
91
|
+
# The command's STDERR is redirected to FD#4, and the `echo` command to
|
92
|
+
# report the "index|exit status" is redirected to FD#3.
|
93
|
+
# Each command's STDOUT will be connected to the STDIN of the next subshell.
|
94
|
+
# The entire pipeline is run within a container group, which redirects
|
95
|
+
# FD#3 to STDOUT and FD#4 to STDERR so these can be collected.
|
96
|
+
# FD#1 is redirected to STDERR so that any output from the final command
|
97
|
+
# on STDOUT will generate warnings, since the final command should not
|
98
|
+
# attempt to write to STDOUT, as this would interfere with collecting
|
99
|
+
# the exit statuses.
|
100
|
+
#
|
101
|
+
# There is no guarantee as to the order of this output, which is why the
|
102
|
+
# command's index in @commands is passed along with it's exit status.
|
103
|
+
# And, if multiple commands output messages on STDERR, those messages
|
104
|
+
# may be interleaved. Interleaving of the "index|exit status" outputs
|
105
|
+
# should not be an issue, given the small byte size of the data being written.
|
106
|
+
def pipeline
|
107
|
+
parts = []
|
108
|
+
@commands.each_with_index do |command, index|
|
109
|
+
parts << %Q[{ #{ command } 2>&4 ; echo "#{ index }|$?:" >&3 ; }]
|
110
|
+
end
|
111
|
+
%Q[{ #{ parts.join(' | ') } } 3>&1 1>&2 4>&2]
|
112
|
+
end
|
113
|
+
|
114
|
+
def stderr_messages
|
115
|
+
@stderr_messages ||= @stderr.empty? ? false : <<-EOS.gsub(/^ +/, ' ')
|
116
|
+
Pipeline STDERR Messages:
|
117
|
+
(Note: may be interleaved if multiple commands returned error messages)
|
118
|
+
|
119
|
+
#{ @stderr }
|
120
|
+
EOS
|
121
|
+
end
|
122
|
+
|
123
|
+
end
|
124
|
+
end
|
@@ -0,0 +1,76 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
|
3
|
+
module Backup
|
4
|
+
class Splitter
|
5
|
+
include Utilities::Helpers
|
6
|
+
|
7
|
+
attr_reader :package, :chunk_size, :suffix_length
|
8
|
+
|
9
|
+
def initialize(model, chunk_size, suffix_length)
|
10
|
+
@package = model.package
|
11
|
+
@chunk_size = chunk_size
|
12
|
+
@suffix_length = suffix_length
|
13
|
+
end
|
14
|
+
|
15
|
+
##
|
16
|
+
# This is called as part of the procedure used to build the final
|
17
|
+
# backup package file(s). It yields it's portion of the command line
|
18
|
+
# for this procedure, which will split the data being piped into it
|
19
|
+
# into multiple files, based on the @chunk_size, using a suffix length as
|
20
|
+
# specified by @suffix_length.
|
21
|
+
# Once the packaging procedure is complete, it will return and
|
22
|
+
# @package.chunk_suffixes will be set based on the resulting files.
|
23
|
+
def split_with
|
24
|
+
Logger.info "Splitter configured with a chunk size of #{ chunk_size }MB " +
|
25
|
+
"and suffix length of #{ suffix_length }."
|
26
|
+
yield split_command
|
27
|
+
after_packaging
|
28
|
+
end
|
29
|
+
|
30
|
+
private
|
31
|
+
|
32
|
+
##
|
33
|
+
# The `split` command reads from $stdin and will store it's output in
|
34
|
+
# multiple files, based on @chunk_size and @suffix_length, using the full
|
35
|
+
# path to the final @package.basename, plus a '-' separator as the `prefix`.
|
36
|
+
def split_command
|
37
|
+
"#{ utility(:split) } -a #{ suffix_length } -b #{ chunk_size }m - " +
|
38
|
+
"'#{ File.join(Config.tmp_path, package.basename + '-') }'"
|
39
|
+
end
|
40
|
+
|
41
|
+
##
|
42
|
+
# Finds the resulting files from the packaging procedure
|
43
|
+
# and stores an Array of suffixes used in @package.chunk_suffixes.
|
44
|
+
# If the @chunk_size was never reached and only one file
|
45
|
+
# was written, that file will be suffixed with '-aa' (or -a; -aaa; etc
|
46
|
+
# depending upon suffix_length). In which case, it will simply
|
47
|
+
# remove the suffix from the filename.
|
48
|
+
def after_packaging
|
49
|
+
suffixes = chunk_suffixes
|
50
|
+
first_suffix = 'a' * suffix_length
|
51
|
+
if suffixes == [first_suffix]
|
52
|
+
FileUtils.mv(
|
53
|
+
File.join(Config.tmp_path, "#{ package.basename }-#{ first_suffix }"),
|
54
|
+
File.join(Config.tmp_path, package.basename)
|
55
|
+
)
|
56
|
+
else
|
57
|
+
package.chunk_suffixes = suffixes
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
##
|
62
|
+
# Returns an array of suffixes for each chunk, in alphabetical order.
|
63
|
+
# For example: [aa, ab, ac, ad, ae] or [aaa, aab, aac aad]
|
64
|
+
def chunk_suffixes
|
65
|
+
chunks.map {|chunk| File.extname(chunk).split('-').last }.sort
|
66
|
+
end
|
67
|
+
|
68
|
+
##
|
69
|
+
# Returns an array of full paths to the backup chunks.
|
70
|
+
# Chunks are sorted in alphabetical order.
|
71
|
+
def chunks
|
72
|
+
Dir[File.join(Config.tmp_path, package.basename + '-*')].sort
|
73
|
+
end
|
74
|
+
|
75
|
+
end
|
76
|
+
end
|
@@ -0,0 +1,57 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
|
3
|
+
module Backup
|
4
|
+
module Storage
|
5
|
+
class Base
|
6
|
+
include Config::Helpers
|
7
|
+
|
8
|
+
##
|
9
|
+
# Base path on the remote where backup package files will be stored.
|
10
|
+
attr_accessor :path
|
11
|
+
|
12
|
+
##
|
13
|
+
# Sets the limit to how many backups to keep in the remote location.
|
14
|
+
# If exceeded, the oldest will be removed to make room for the newest
|
15
|
+
attr_accessor :keep
|
16
|
+
|
17
|
+
attr_reader :model, :package, :storage_id
|
18
|
+
|
19
|
+
##
|
20
|
+
# +storage_id+ is a user-defined string used to uniquely identify
|
21
|
+
# multiple storages of the same type. If multiple storages of the same
|
22
|
+
# type are added to a single backup model, this identifier must be set.
|
23
|
+
# This will be appended to the YAML storage file used for cycling backups.
|
24
|
+
def initialize(model, storage_id = nil, &block)
|
25
|
+
@model = model
|
26
|
+
@package = model.package
|
27
|
+
@storage_id = storage_id.to_s.gsub(/\W/, '_') if storage_id
|
28
|
+
|
29
|
+
load_defaults!
|
30
|
+
instance_eval(&block) if block_given?
|
31
|
+
end
|
32
|
+
|
33
|
+
def perform!
|
34
|
+
Logger.info "#{ storage_name } Started..."
|
35
|
+
transfer!
|
36
|
+
cycle! if respond_to?(:cycle!, true) && keep.to_i > 0
|
37
|
+
Logger.info "#{ storage_name } Finished!"
|
38
|
+
end
|
39
|
+
|
40
|
+
private
|
41
|
+
|
42
|
+
##
|
43
|
+
# Return the remote path for the current or given package.
|
44
|
+
def remote_path(pkg = package)
|
45
|
+
path.empty? ? File.join(pkg.trigger, pkg.time) :
|
46
|
+
File.join(path, pkg.trigger, pkg.time)
|
47
|
+
end
|
48
|
+
alias :remote_path_for :remote_path
|
49
|
+
|
50
|
+
def storage_name
|
51
|
+
@storage_name ||= self.class.to_s.sub('Backup::', '') +
|
52
|
+
(storage_id ? " (#{ storage_id })" : '')
|
53
|
+
end
|
54
|
+
|
55
|
+
end
|
56
|
+
end
|
57
|
+
end
|
@@ -0,0 +1,158 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require 'backup/cloud_io/cloud_files'
|
3
|
+
|
4
|
+
module Backup
|
5
|
+
module Storage
|
6
|
+
class CloudFiles < Base
|
7
|
+
include Storage::Cycler
|
8
|
+
class Error < Backup::Error; end
|
9
|
+
|
10
|
+
##
|
11
|
+
# Rackspace CloudFiles Credentials
|
12
|
+
attr_accessor :username, :api_key
|
13
|
+
|
14
|
+
##
|
15
|
+
# Rackspace Auth URL (optional)
|
16
|
+
attr_accessor :auth_url
|
17
|
+
|
18
|
+
##
|
19
|
+
# Rackspace Service Net
|
20
|
+
# (LAN-based transfers to avoid charges and improve performance)
|
21
|
+
attr_accessor :servicenet
|
22
|
+
|
23
|
+
##
|
24
|
+
# Rackspace Region (optional)
|
25
|
+
attr_accessor :region
|
26
|
+
|
27
|
+
##
|
28
|
+
# Rackspace Container Name
|
29
|
+
attr_accessor :container
|
30
|
+
|
31
|
+
##
|
32
|
+
# Rackspace Container Name for SLO Segments
|
33
|
+
# Required if #segment_size is set. Must be different from #container.
|
34
|
+
attr_accessor :segments_container
|
35
|
+
|
36
|
+
##
|
37
|
+
# SLO Segment size, specified in MiB.
|
38
|
+
#
|
39
|
+
# Each package file larger than +segment_size+
|
40
|
+
# will be uploaded as a Static Large Objects (SLO).
|
41
|
+
#
|
42
|
+
# Defaults to 0 for backward compatibility (pre v.3.7.0),
|
43
|
+
# since #segments_container would be required.
|
44
|
+
#
|
45
|
+
# Minimum: 1 (0 disables SLO support)
|
46
|
+
# Maximum: 5120 (5 GiB)
|
47
|
+
attr_accessor :segment_size
|
48
|
+
|
49
|
+
##
|
50
|
+
# If set, all backup package files (including SLO segments) will be
|
51
|
+
# scheduled for automatic removal by the server.
|
52
|
+
#
|
53
|
+
# The `keep` option should not be used if this is set,
|
54
|
+
# unless you're transitioning from the `keep` option.
|
55
|
+
attr_accessor :days_to_keep
|
56
|
+
|
57
|
+
##
|
58
|
+
# Number of times to retry failed operations.
|
59
|
+
#
|
60
|
+
# Default: 10
|
61
|
+
attr_accessor :max_retries
|
62
|
+
|
63
|
+
##
|
64
|
+
# Time in seconds to pause before each retry.
|
65
|
+
#
|
66
|
+
# Default: 30
|
67
|
+
attr_accessor :retry_waitsec
|
68
|
+
|
69
|
+
##
|
70
|
+
# Additional options to pass along to fog.
|
71
|
+
# e.g. Fog::Storage.new({ :provider => 'Rackspace' }.merge(fog_options))
|
72
|
+
attr_accessor :fog_options
|
73
|
+
|
74
|
+
def initialize(model, storage_id = nil)
|
75
|
+
super
|
76
|
+
|
77
|
+
@servicenet ||= false
|
78
|
+
@segment_size ||= 0
|
79
|
+
@max_retries ||= 10
|
80
|
+
@retry_waitsec ||= 30
|
81
|
+
|
82
|
+
@path ||= 'backups'
|
83
|
+
path.sub!(/^\//, '')
|
84
|
+
|
85
|
+
check_configuration
|
86
|
+
end
|
87
|
+
|
88
|
+
private
|
89
|
+
|
90
|
+
def cloud_io
|
91
|
+
@cloud_io ||= CloudIO::CloudFiles.new(
|
92
|
+
:username => username,
|
93
|
+
:api_key => api_key,
|
94
|
+
:auth_url => auth_url,
|
95
|
+
:region => region,
|
96
|
+
:servicenet => servicenet,
|
97
|
+
:container => container,
|
98
|
+
:segments_container => segments_container,
|
99
|
+
:segment_size => segment_size,
|
100
|
+
:days_to_keep => days_to_keep,
|
101
|
+
:max_retries => max_retries,
|
102
|
+
:retry_waitsec => retry_waitsec,
|
103
|
+
:fog_options => fog_options
|
104
|
+
)
|
105
|
+
end
|
106
|
+
|
107
|
+
def transfer!
|
108
|
+
package.filenames.each do |filename|
|
109
|
+
src = File.join(Config.tmp_path, filename)
|
110
|
+
dest = File.join(remote_path, filename)
|
111
|
+
Logger.info "Storing '#{ container }/#{ dest }'..."
|
112
|
+
cloud_io.upload(src, dest)
|
113
|
+
end
|
114
|
+
|
115
|
+
package.no_cycle = true if days_to_keep
|
116
|
+
end
|
117
|
+
|
118
|
+
# Called by the Cycler.
|
119
|
+
# Any error raised will be logged as a warning.
|
120
|
+
def remove!(package)
|
121
|
+
Logger.info "Removing backup package dated #{ package.time }..."
|
122
|
+
|
123
|
+
remote_path = remote_path_for(package)
|
124
|
+
objects = cloud_io.objects(remote_path)
|
125
|
+
|
126
|
+
raise Error, "Package at '#{ remote_path }' not found" if objects.empty?
|
127
|
+
|
128
|
+
slo_objects, objects = objects.partition(&:slo?)
|
129
|
+
cloud_io.delete_slo(slo_objects)
|
130
|
+
cloud_io.delete(objects)
|
131
|
+
end
|
132
|
+
|
133
|
+
def check_configuration
|
134
|
+
required = %w{ username api_key container }
|
135
|
+
raise Error, <<-EOS if required.map {|name| send(name) }.any?(&:nil?)
|
136
|
+
Configuration Error
|
137
|
+
#{ required.map {|name| "##{ name }"}.join(', ') } are all required
|
138
|
+
EOS
|
139
|
+
|
140
|
+
raise Error, <<-EOS if segment_size > 0 && segments_container.to_s.empty?
|
141
|
+
Configuration Error
|
142
|
+
#segments_container is required if #segment_size is > 0
|
143
|
+
EOS
|
144
|
+
|
145
|
+
raise Error, <<-EOS if container == segments_container
|
146
|
+
Configuration Error
|
147
|
+
#container and #segments_container must not be the same container.
|
148
|
+
EOS
|
149
|
+
|
150
|
+
raise Error, <<-EOS if segment_size > 5120
|
151
|
+
Configuration Error
|
152
|
+
#segment_size is too large (max 5120)
|
153
|
+
EOS
|
154
|
+
end
|
155
|
+
|
156
|
+
end
|
157
|
+
end
|
158
|
+
end
|