backedup 5.0.0.beta.3

Sign up to get free protection for your applications and to get access to all the features.
Files changed (144) hide show
  1. checksums.yaml +7 -0
  2. data/LICENSE +19 -0
  3. data/README.md +33 -0
  4. data/bin/backedup +5 -0
  5. data/bin/docker_test +24 -0
  6. data/lib/backup/archive.rb +169 -0
  7. data/lib/backup/binder.rb +18 -0
  8. data/lib/backup/cleaner.rb +112 -0
  9. data/lib/backup/cli.rb +370 -0
  10. data/lib/backup/cloud_io/base.rb +38 -0
  11. data/lib/backup/cloud_io/cloud_files.rb +296 -0
  12. data/lib/backup/cloud_io/gcs.rb +121 -0
  13. data/lib/backup/cloud_io/s3.rb +253 -0
  14. data/lib/backup/cloud_io/swift.rb +96 -0
  15. data/lib/backup/compressor/base.rb +32 -0
  16. data/lib/backup/compressor/bzip2.rb +35 -0
  17. data/lib/backup/compressor/custom.rb +49 -0
  18. data/lib/backup/compressor/gzip.rb +73 -0
  19. data/lib/backup/compressor/pbzip2.rb +45 -0
  20. data/lib/backup/config/dsl.rb +102 -0
  21. data/lib/backup/config/helpers.rb +137 -0
  22. data/lib/backup/config.rb +118 -0
  23. data/lib/backup/database/base.rb +86 -0
  24. data/lib/backup/database/mongodb.rb +186 -0
  25. data/lib/backup/database/mysql.rb +191 -0
  26. data/lib/backup/database/openldap.rb +93 -0
  27. data/lib/backup/database/postgresql.rb +164 -0
  28. data/lib/backup/database/redis.rb +176 -0
  29. data/lib/backup/database/riak.rb +79 -0
  30. data/lib/backup/database/sqlite.rb +55 -0
  31. data/lib/backup/encryptor/base.rb +27 -0
  32. data/lib/backup/encryptor/gpg.rb +737 -0
  33. data/lib/backup/encryptor/open_ssl.rb +74 -0
  34. data/lib/backup/errors.rb +53 -0
  35. data/lib/backup/logger/console.rb +48 -0
  36. data/lib/backup/logger/fog_adapter.rb +25 -0
  37. data/lib/backup/logger/logfile.rb +131 -0
  38. data/lib/backup/logger/syslog.rb +114 -0
  39. data/lib/backup/logger.rb +197 -0
  40. data/lib/backup/model.rb +472 -0
  41. data/lib/backup/notifier/base.rb +126 -0
  42. data/lib/backup/notifier/campfire.rb +61 -0
  43. data/lib/backup/notifier/command.rb +99 -0
  44. data/lib/backup/notifier/datadog.rb +104 -0
  45. data/lib/backup/notifier/flowdock.rb +99 -0
  46. data/lib/backup/notifier/hipchat.rb +116 -0
  47. data/lib/backup/notifier/http_post.rb +114 -0
  48. data/lib/backup/notifier/mail.rb +232 -0
  49. data/lib/backup/notifier/nagios.rb +65 -0
  50. data/lib/backup/notifier/pagerduty.rb +79 -0
  51. data/lib/backup/notifier/prowl.rb +68 -0
  52. data/lib/backup/notifier/pushover.rb +71 -0
  53. data/lib/backup/notifier/ses.rb +123 -0
  54. data/lib/backup/notifier/slack.rb +147 -0
  55. data/lib/backup/notifier/twitter.rb +55 -0
  56. data/lib/backup/notifier/zabbix.rb +60 -0
  57. data/lib/backup/package.rb +51 -0
  58. data/lib/backup/packager.rb +106 -0
  59. data/lib/backup/pipeline.rb +120 -0
  60. data/lib/backup/splitter.rb +73 -0
  61. data/lib/backup/storage/base.rb +66 -0
  62. data/lib/backup/storage/cloud_files.rb +156 -0
  63. data/lib/backup/storage/cycler.rb +70 -0
  64. data/lib/backup/storage/dropbox.rb +206 -0
  65. data/lib/backup/storage/ftp.rb +116 -0
  66. data/lib/backup/storage/gcs.rb +93 -0
  67. data/lib/backup/storage/local.rb +61 -0
  68. data/lib/backup/storage/qiniu.rb +65 -0
  69. data/lib/backup/storage/rsync.rb +246 -0
  70. data/lib/backup/storage/s3.rb +155 -0
  71. data/lib/backup/storage/scp.rb +65 -0
  72. data/lib/backup/storage/sftp.rb +80 -0
  73. data/lib/backup/storage/swift.rb +124 -0
  74. data/lib/backup/storage/webdav.rb +102 -0
  75. data/lib/backup/syncer/base.rb +67 -0
  76. data/lib/backup/syncer/cloud/base.rb +176 -0
  77. data/lib/backup/syncer/cloud/cloud_files.rb +81 -0
  78. data/lib/backup/syncer/cloud/local_file.rb +97 -0
  79. data/lib/backup/syncer/cloud/s3.rb +109 -0
  80. data/lib/backup/syncer/rsync/base.rb +50 -0
  81. data/lib/backup/syncer/rsync/local.rb +27 -0
  82. data/lib/backup/syncer/rsync/pull.rb +47 -0
  83. data/lib/backup/syncer/rsync/push.rb +201 -0
  84. data/lib/backup/template.rb +41 -0
  85. data/lib/backup/utilities.rb +234 -0
  86. data/lib/backup/version.rb +3 -0
  87. data/lib/backup.rb +145 -0
  88. data/templates/cli/archive +28 -0
  89. data/templates/cli/compressor/bzip2 +4 -0
  90. data/templates/cli/compressor/custom +7 -0
  91. data/templates/cli/compressor/gzip +4 -0
  92. data/templates/cli/config +123 -0
  93. data/templates/cli/databases/mongodb +15 -0
  94. data/templates/cli/databases/mysql +18 -0
  95. data/templates/cli/databases/openldap +24 -0
  96. data/templates/cli/databases/postgresql +16 -0
  97. data/templates/cli/databases/redis +16 -0
  98. data/templates/cli/databases/riak +17 -0
  99. data/templates/cli/databases/sqlite +11 -0
  100. data/templates/cli/encryptor/gpg +27 -0
  101. data/templates/cli/encryptor/openssl +9 -0
  102. data/templates/cli/model +26 -0
  103. data/templates/cli/notifier/zabbix +15 -0
  104. data/templates/cli/notifiers/campfire +12 -0
  105. data/templates/cli/notifiers/command +32 -0
  106. data/templates/cli/notifiers/datadog +57 -0
  107. data/templates/cli/notifiers/flowdock +16 -0
  108. data/templates/cli/notifiers/hipchat +16 -0
  109. data/templates/cli/notifiers/http_post +32 -0
  110. data/templates/cli/notifiers/mail +24 -0
  111. data/templates/cli/notifiers/nagios +13 -0
  112. data/templates/cli/notifiers/pagerduty +12 -0
  113. data/templates/cli/notifiers/prowl +11 -0
  114. data/templates/cli/notifiers/pushover +11 -0
  115. data/templates/cli/notifiers/ses +15 -0
  116. data/templates/cli/notifiers/slack +22 -0
  117. data/templates/cli/notifiers/twitter +13 -0
  118. data/templates/cli/splitter +7 -0
  119. data/templates/cli/storages/cloud_files +11 -0
  120. data/templates/cli/storages/dropbox +20 -0
  121. data/templates/cli/storages/ftp +13 -0
  122. data/templates/cli/storages/gcs +8 -0
  123. data/templates/cli/storages/local +8 -0
  124. data/templates/cli/storages/qiniu +12 -0
  125. data/templates/cli/storages/rsync +17 -0
  126. data/templates/cli/storages/s3 +16 -0
  127. data/templates/cli/storages/scp +15 -0
  128. data/templates/cli/storages/sftp +15 -0
  129. data/templates/cli/storages/swift +19 -0
  130. data/templates/cli/storages/webdav +13 -0
  131. data/templates/cli/syncers/cloud_files +22 -0
  132. data/templates/cli/syncers/rsync_local +20 -0
  133. data/templates/cli/syncers/rsync_pull +28 -0
  134. data/templates/cli/syncers/rsync_push +28 -0
  135. data/templates/cli/syncers/s3 +27 -0
  136. data/templates/general/links +3 -0
  137. data/templates/general/version.erb +2 -0
  138. data/templates/notifier/mail/failure.erb +16 -0
  139. data/templates/notifier/mail/success.erb +16 -0
  140. data/templates/notifier/mail/warning.erb +16 -0
  141. data/templates/storage/dropbox/authorization_url.erb +6 -0
  142. data/templates/storage/dropbox/authorized.erb +4 -0
  143. data/templates/storage/dropbox/cache_file_written.erb +10 -0
  144. metadata +1255 -0
@@ -0,0 +1,121 @@
1
+ # encoding: utf-8
2
+ require "backup/cloud_io/base"
3
+ require "fog"
4
+
5
+ module Backup
6
+ module CloudIO
7
+ class GCS < Base
8
+ class Error < Backup::Error; end
9
+
10
+ MAX_FILE_SIZE = 1024**5 * 5 # 5 TiB
11
+
12
+ attr_reader :google_storage_access_key_id, :google_storage_secret_access_key,
13
+ :bucket, :fog_options
14
+
15
+ def initialize(options = {})
16
+ super
17
+
18
+ @google_storage_access_key_id = options[:google_storage_access_key_id]
19
+ @google_storage_secret_access_key = options[:google_storage_secret_access_key]
20
+ @bucket = options[:bucket]
21
+ @fog_options = options[:fog_options]
22
+ end
23
+
24
+ # The Syncer may call this method in multiple threads.
25
+ # However, #objects is always called prior to multithreading.
26
+ def upload(src, dest)
27
+ file_size = File.size(src)
28
+ raise FileSizeError, <<-EOS if file_size > MAX_FILE_SIZE
29
+ File Too Large
30
+ File: #{src}
31
+ Size: #{file_size}
32
+ Max File Size is #{MAX_FILE_SIZE} (5 GiB)
33
+ EOS
34
+
35
+ put_object(src, dest)
36
+ end
37
+
38
+ # Returns all objects in the bucket with the given prefix.
39
+ #
40
+ # - #get_bucket returns a max of 1000 objects per request.
41
+ # - Returns objects in alphabetical order.
42
+ # - If marker is given, only objects after the marker are in the response.
43
+ def objects(prefix)
44
+ objects = []
45
+ resp = nil
46
+ prefix = prefix.chomp("/")
47
+ opts = { "prefix" => prefix + "/" }
48
+
49
+ while resp.nil? || resp.body["IsTruncated"]
50
+ opts["marker"] = objects.last.key unless objects.empty?
51
+ with_retries("GET '#{bucket}/#{prefix}/*'") do
52
+ resp = connection.get_bucket(bucket, opts)
53
+ end
54
+ resp.body["Contents"].each do |obj_data|
55
+ objects << Object.new(self, obj_data)
56
+ end
57
+ end
58
+
59
+ objects
60
+ end
61
+
62
+ # Delete object(s) from the bucket.
63
+ #
64
+ # - Called by the Storage (with objects) and the Syncer (with keys)
65
+ # - Missing objects will be ignored.
66
+ def delete(objects_or_keys)
67
+ keys = Array(objects_or_keys).dup
68
+ keys.map!(&:key) if keys.first.is_a?(Object)
69
+ keys.each do |key|
70
+ with_retries("DELETE object") do
71
+ begin
72
+ connection.delete(bucket, key)
73
+ rescue StandardError => e
74
+ raise Error, "The server returned the following:\n#{e.message}"
75
+ end
76
+ end
77
+ end
78
+ end
79
+
80
+ private
81
+
82
+ def connection
83
+ @connection ||=
84
+ begin
85
+ opts = { provider: "Google",
86
+ google_storage_access_key_id: google_storage_access_key_id,
87
+ google_storage_secret_access_key: google_storage_secret_access_key }
88
+
89
+ opts.merge!(fog_options || {})
90
+ conn = Fog::Storage.new(opts)
91
+ conn
92
+ end
93
+ end
94
+
95
+ def put_object(src, dest)
96
+ md5 = Base64.encode64(Digest::MD5.file(src).digest).chomp
97
+ options = { "Content-MD5" => md5 }
98
+ with_retries("PUT '#{bucket}/#{dest}'") do
99
+ File.open(src, "r") do |file|
100
+ begin
101
+ connection.put_object(bucket, dest, file, options)
102
+ rescue StandardError => e
103
+ raise Error, "The server returned the following:\n#{e.message}\n"
104
+ end
105
+ end
106
+ end
107
+ end
108
+
109
+ class Object
110
+ attr_reader :key, :etag, :storage_class
111
+
112
+ def initialize(cloud_io, data)
113
+ @cloud_io = cloud_io
114
+ @key = data["Key"]
115
+ @etag = data["ETag"]
116
+ @storage_class = data["StorageClass"]
117
+ end
118
+ end
119
+ end
120
+ end
121
+ end
@@ -0,0 +1,253 @@
1
+ require "backup/cloud_io/base"
2
+ require "fog"
3
+ require "digest/md5"
4
+ require "base64"
5
+ require "stringio"
6
+
7
+ module Backup
8
+ module CloudIO
9
+ class S3 < Base
10
+ class Error < Backup::Error; end
11
+
12
+ MAX_FILE_SIZE = 1024**3 * 5 # 5 GiB
13
+ MAX_MULTIPART_SIZE = 1024**4 * 5 # 5 TiB
14
+
15
+ attr_reader :access_key_id, :secret_access_key, :use_iam_profile,
16
+ :region, :bucket, :chunk_size, :encryption, :storage_class,
17
+ :fog_options
18
+
19
+ def initialize(options = {})
20
+ super
21
+
22
+ @access_key_id = options[:access_key_id]
23
+ @secret_access_key = options[:secret_access_key]
24
+ @use_iam_profile = options[:use_iam_profile]
25
+ @region = options[:region]
26
+ @bucket = options[:bucket]
27
+ @chunk_size = options[:chunk_size]
28
+ @encryption = options[:encryption]
29
+ @storage_class = options[:storage_class]
30
+ @fog_options = options[:fog_options]
31
+ end
32
+
33
+ # The Syncer may call this method in multiple threads.
34
+ # However, #objects is always called prior to multithreading.
35
+ def upload(src, dest)
36
+ file_size = File.size(src)
37
+ chunk_bytes = chunk_size * 1024**2
38
+ if chunk_bytes > 0 && file_size > chunk_bytes
39
+ raise FileSizeError, <<-EOS if file_size > MAX_MULTIPART_SIZE
40
+ File Too Large
41
+ File: #{src}
42
+ Size: #{file_size}
43
+ Max Multipart Upload Size is #{MAX_MULTIPART_SIZE} (5 TiB)
44
+ EOS
45
+
46
+ chunk_bytes = adjusted_chunk_bytes(chunk_bytes, file_size)
47
+ upload_id = initiate_multipart(dest)
48
+ parts = upload_parts(src, dest, upload_id, chunk_bytes, file_size)
49
+ complete_multipart(dest, upload_id, parts)
50
+ else
51
+ raise FileSizeError, <<-EOS if file_size > MAX_FILE_SIZE
52
+ File Too Large
53
+ File: #{src}
54
+ Size: #{file_size}
55
+ Max File Size is #{MAX_FILE_SIZE} (5 GiB)
56
+ EOS
57
+
58
+ put_object(src, dest)
59
+ end
60
+ end
61
+
62
+ # Returns all objects in the bucket with the given prefix.
63
+ #
64
+ # - #get_bucket returns a max of 1000 objects per request.
65
+ # - Returns objects in alphabetical order.
66
+ # - If marker is given, only objects after the marker are in the response.
67
+ def objects(prefix)
68
+ objects = []
69
+ resp = nil
70
+ prefix = prefix.chomp("/")
71
+ opts = { "prefix" => prefix + "/" }
72
+
73
+ while resp.nil? || resp.body["IsTruncated"]
74
+ opts["marker"] = objects.last.key unless objects.empty?
75
+ with_retries("GET '#{bucket}/#{prefix}/*'") do
76
+ resp = connection.get_bucket(bucket, opts)
77
+ end
78
+ resp.body["Contents"].each do |obj_data|
79
+ objects << Object.new(self, obj_data)
80
+ end
81
+ end
82
+
83
+ objects
84
+ end
85
+
86
+ # Used by Object to fetch metadata if needed.
87
+ def head_object(object)
88
+ resp = nil
89
+ with_retries("HEAD '#{bucket}/#{object.key}'") do
90
+ resp = connection.head_object(bucket, object.key)
91
+ end
92
+ resp
93
+ end
94
+
95
+ # Delete object(s) from the bucket.
96
+ #
97
+ # - Called by the Storage (with objects) and the Syncer (with keys)
98
+ # - Deletes 1000 objects per request.
99
+ # - Missing objects will be ignored.
100
+ def delete(objects_or_keys)
101
+ keys = Array(objects_or_keys).dup
102
+ keys.map!(&:key) if keys.first.is_a?(Object)
103
+
104
+ opts = { quiet: true } # only report Errors in DeleteResult
105
+ until keys.empty?
106
+ keys_partial = keys.slice!(0, 1000)
107
+ with_retries("DELETE Multiple Objects") do
108
+ resp = connection.delete_multiple_objects(bucket, keys_partial, opts.dup)
109
+ unless resp.body["DeleteResult"].empty?
110
+ errors = resp.body["DeleteResult"].map do |result|
111
+ error = result["Error"]
112
+ "Failed to delete: #{error["Key"]}\n" \
113
+ "Reason: #{error["Code"]}: #{error["Message"]}"
114
+ end.join("\n")
115
+ raise Error, "The server returned the following:\n#{errors}"
116
+ end
117
+ end
118
+ end
119
+ end
120
+
121
+ private
122
+
123
+ def connection
124
+ @connection ||=
125
+ begin
126
+ opts = { provider: "AWS", region: region }
127
+ if use_iam_profile
128
+ opts[:use_iam_profile] = true
129
+ else
130
+ opts[:aws_access_key_id] = access_key_id
131
+ opts[:aws_secret_access_key] = secret_access_key
132
+ end
133
+ opts.merge!(fog_options || {})
134
+ conn = Fog::Storage.new(opts)
135
+ conn.sync_clock
136
+ conn
137
+ end
138
+ end
139
+
140
+ def put_object(src, dest)
141
+ md5 = Base64.encode64(Digest::MD5.file(src).digest).chomp
142
+ options = headers.merge("Content-MD5" => md5)
143
+ with_retries("PUT '#{bucket}/#{dest}'") do
144
+ File.open(src, "r") do |file|
145
+ connection.put_object(bucket, dest, file, options)
146
+ end
147
+ end
148
+ end
149
+
150
+ def initiate_multipart(dest)
151
+ Logger.info "\s\sInitiate Multipart '#{bucket}/#{dest}'"
152
+
153
+ resp = nil
154
+ with_retries("POST '#{bucket}/#{dest}' (Initiate)") do
155
+ resp = connection.initiate_multipart_upload(bucket, dest, headers)
156
+ end
157
+ resp.body["UploadId"]
158
+ end
159
+
160
+ # Each part's MD5 is sent to verify the transfer.
161
+ # AWS will concatenate all parts into a single object
162
+ # once the multipart upload is completed.
163
+ def upload_parts(src, dest, upload_id, chunk_bytes, file_size)
164
+ total_parts = (file_size / chunk_bytes.to_f).ceil
165
+ progress = (0.1..0.9).step(0.1).map { |n| (total_parts * n).floor }
166
+ Logger.info "\s\sUploading #{total_parts} Parts..."
167
+
168
+ parts = []
169
+ File.open(src, "r") do |file|
170
+ part_number = 0
171
+ while data = file.read(chunk_bytes)
172
+ part_number += 1
173
+ md5 = Base64.encode64(Digest::MD5.digest(data)).chomp
174
+
175
+ with_retries("PUT '#{bucket}/#{dest}' Part ##{part_number}") do
176
+ resp = connection.upload_part(
177
+ bucket, dest, upload_id, part_number, StringIO.new(data),
178
+ "Content-MD5" => md5
179
+ )
180
+ parts << resp.headers["ETag"]
181
+ end
182
+
183
+ if i = progress.rindex(part_number)
184
+ Logger.info "\s\s...#{i + 1}0% Complete..."
185
+ end
186
+ end
187
+ end
188
+ parts
189
+ end
190
+
191
+ def complete_multipart(dest, upload_id, parts)
192
+ Logger.info "\s\sComplete Multipart '#{bucket}/#{dest}'"
193
+
194
+ with_retries("POST '#{bucket}/#{dest}' (Complete)") do
195
+ resp = connection.complete_multipart_upload(bucket, dest, upload_id, parts)
196
+ raise Error, <<-EOS if resp.body["Code"]
197
+ The server returned the following error:
198
+ #{resp.body["Code"]}: #{resp.body["Message"]}
199
+ EOS
200
+ end
201
+ end
202
+
203
+ def headers
204
+ headers = {}
205
+
206
+ enc = encryption.to_s.upcase
207
+ headers["x-amz-server-side-encryption"] = enc unless enc.empty?
208
+
209
+ sc = storage_class.to_s.upcase
210
+ headers["x-amz-storage-class"] = sc unless sc.empty? || sc == "STANDARD"
211
+
212
+ headers
213
+ end
214
+
215
+ def adjusted_chunk_bytes(chunk_bytes, file_size)
216
+ return chunk_bytes if file_size / chunk_bytes.to_f <= 10_000
217
+
218
+ mb = orig_mb = chunk_bytes / 1024**2
219
+ mb += 1 until file_size / (1024**2 * mb).to_f <= 10_000
220
+ Logger.warn Error.new(<<-EOS)
221
+ Chunk Size Adjusted
222
+ Your original #chunk_size of #{orig_mb} MiB has been adjusted
223
+ to #{mb} MiB in order to satisfy the limit of 10,000 chunks.
224
+ To enforce your chosen #chunk_size, you should use the Splitter.
225
+ e.g. split_into_chunks_of #{mb * 10_000} (#chunk_size * 10_000)
226
+ EOS
227
+ 1024**2 * mb
228
+ end
229
+
230
+ class Object
231
+ attr_reader :key, :etag, :storage_class
232
+
233
+ def initialize(cloud_io, data)
234
+ @cloud_io = cloud_io
235
+ @key = data["Key"]
236
+ @etag = data["ETag"]
237
+ @storage_class = data["StorageClass"]
238
+ end
239
+
240
+ # currently 'AES256' or nil
241
+ def encryption
242
+ metadata["x-amz-server-side-encryption"]
243
+ end
244
+
245
+ private
246
+
247
+ def metadata
248
+ @metadata ||= @cloud_io.head_object(self).headers
249
+ end
250
+ end
251
+ end
252
+ end
253
+ end
@@ -0,0 +1,96 @@
1
+ require "backup/cloud_io/base"
2
+ require "fog/openstack"
3
+
4
+ LARGE_FILE = 5 * 1024**3 - 1
5
+
6
+ module Backup
7
+ module CloudIO
8
+ class Swift < Base
9
+ class Error < Backup::Error; end
10
+ # Handle fog-openstack namespace change, as they moved everything under
11
+ # the OpenStack namespace starting at version 1.0
12
+ Storage = if Fog::Storage.const_defined? :OpenStack
13
+ Fog::Storage::OpenStack
14
+ else
15
+ Fog::OpenStack::Storage
16
+ end
17
+
18
+ attr_reader :username, :password, :tenant, :region,
19
+ :container, :auth_url, :max_retries,
20
+ :retry_waitsec, :fog_options, :batch_size
21
+
22
+ def initialize(opts = {})
23
+ super
24
+
25
+ @username = opts[:username]
26
+ @password = opts[:password]
27
+ @tenant = opts[:tenant_name]
28
+ @container = opts[:container]
29
+ @auth_url = opts[:auth_url]
30
+ @region = opts[:region]
31
+ @max_retries = opts[:max_retries]
32
+ @retry_waitsec = opts[:retry_waitsec]
33
+ @batch_size = opts[:batch_size]
34
+ @fog_options = opts[:fog_options]
35
+ end
36
+
37
+ def upload(src, dest)
38
+ file_size = File.size(src)
39
+
40
+ raise FileSizeError, <<-EOS if file_size > LARGE_FILE
41
+ [FIXME] File Too Large
42
+ File: #{src}
43
+ Size: #{file_size}
44
+ Max Swift Upload Size is #{LARGE_FILE} (5 Gb) (FIXME)
45
+ EOS
46
+
47
+ directory.files.create key: dest, body: File.open(src)
48
+ end
49
+
50
+ def delete(objects_or_keys)
51
+ keys = Array(objects_or_keys).dup
52
+ keys = keys.map(&:key) unless keys.first.is_a?(String)
53
+
54
+ until keys.empty?
55
+ key = keys.slice!(0, batch_size)
56
+ with_retries("DELETE Multiple Objects") do
57
+ resp = connection.delete_multiple_objects(container, key)
58
+ if resp.data[:status] != 200
59
+ raise Error, <<-EOS
60
+ Failed to delete.
61
+ Status = #{resp.data[:status]}
62
+ Reason = #{resp.data[:reason_phrase]}
63
+ Body = #{resp.data[:body]}
64
+ EOS
65
+ end
66
+ end
67
+ end
68
+ end
69
+
70
+ def objects(prefix)
71
+ directory.files.all(prefix: prefix.chomp("/") + "/")
72
+ end
73
+
74
+ private
75
+
76
+ def directory
77
+ @directory ||= connection.directories.get container
78
+ end
79
+
80
+ def connection
81
+ @connection ||= begin
82
+ opts = {
83
+ openstack_auth_url: auth_url,
84
+ openstack_username: username,
85
+ openstack_api_key: password
86
+ }
87
+ opts[:openstack_region] = region unless region.nil?
88
+ opts[:openstack_tenant] = tenant unless tenant.nil?
89
+
90
+ opts.merge!(fog_options || {})
91
+ Storage.new(opts)
92
+ end
93
+ end
94
+ end
95
+ end
96
+ end
@@ -0,0 +1,32 @@
1
+ module Backup
2
+ module Compressor
3
+ class Base
4
+ include Utilities::Helpers
5
+ include Config::Helpers
6
+
7
+ ##
8
+ # Yields to the block the compressor command and filename extension.
9
+ def compress_with
10
+ log!
11
+ yield @cmd, @ext
12
+ end
13
+
14
+ private
15
+
16
+ ##
17
+ # Return the compressor name, with Backup namespace removed
18
+ def compressor_name
19
+ self.class.to_s.sub("Backup::", "")
20
+ end
21
+
22
+ ##
23
+ # Logs a message to the console and log file to inform
24
+ # the client that Backup is using the compressor
25
+ def log!
26
+ Logger.info "Using #{compressor_name} for compression.\n" \
27
+ " Command: '#{@cmd}'\n" \
28
+ " Ext: '#{@ext}'"
29
+ end
30
+ end
31
+ end
32
+ end
@@ -0,0 +1,35 @@
1
+ module Backup
2
+ module Compressor
3
+ class Bzip2 < Base
4
+ ##
5
+ # Specify the level of compression to use.
6
+ #
7
+ # Values should be a single digit from 1 to 9.
8
+ # Note that setting the level to either extreme may or may not
9
+ # give the desired result. Be sure to check the documentation
10
+ # for the compressor being used.
11
+ #
12
+ # The default `level` is 9.
13
+ attr_accessor :level
14
+
15
+ ##
16
+ # Creates a new instance of Backup::Compressor::Bzip2
17
+ def initialize(&block)
18
+ load_defaults!
19
+
20
+ @level ||= false
21
+
22
+ instance_eval(&block) if block_given?
23
+
24
+ @cmd = "#{utility(:bzip2)}#{options}"
25
+ @ext = ".bz2"
26
+ end
27
+
28
+ private
29
+
30
+ def options
31
+ " -#{@level}" if @level
32
+ end
33
+ end
34
+ end
35
+ end
@@ -0,0 +1,49 @@
1
+ module Backup
2
+ module Compressor
3
+ class Custom < Base
4
+ ##
5
+ # Specify the system command to invoke a compressor,
6
+ # including any command-line arguments.
7
+ # e.g. @compressor.command = 'pbzip2 -p2 -4'
8
+ #
9
+ # The data to be compressed will be piped to the command's STDIN,
10
+ # and it should write the compressed data to STDOUT.
11
+ # i.e. `cat file.tar | %command% > file.tar.%extension%`
12
+ attr_accessor :command
13
+
14
+ ##
15
+ # File extension to append to the compressed file's filename.
16
+ # e.g. @compressor.extension = '.bz2'
17
+ attr_accessor :extension
18
+
19
+ ##
20
+ # Initializes a new custom compressor.
21
+ def initialize(&block)
22
+ load_defaults!
23
+
24
+ instance_eval(&block) if block_given?
25
+
26
+ @cmd = set_cmd
27
+ @ext = set_ext
28
+ end
29
+
30
+ private
31
+
32
+ ##
33
+ # Return the command line using the full path.
34
+ # Ensures the command exists and is executable.
35
+ def set_cmd
36
+ parts = @command.to_s.split(" ")
37
+ parts[0] = utility(parts[0])
38
+ parts.join(" ")
39
+ end
40
+
41
+ ##
42
+ # Return the extension given without whitespace.
43
+ # If extension was not set, return an empty string
44
+ def set_ext
45
+ @extension.to_s.strip
46
+ end
47
+ end
48
+ end
49
+ end
@@ -0,0 +1,73 @@
1
+ module Backup
2
+ module Compressor
3
+ class Gzip < Base
4
+ class Error < Backup::Error; end
5
+ extend Utilities::Helpers
6
+
7
+ ##
8
+ # Specify the level of compression to use.
9
+ #
10
+ # Values should be a single digit from 1 to 9.
11
+ # Note that setting the level to either extreme may or may not
12
+ # give the desired result. Be sure to check the documentation
13
+ # for the compressor being used.
14
+ #
15
+ # The default `level` is 6.
16
+ attr_accessor :level
17
+
18
+ ##
19
+ # Use the `--rsyncable` option with `gzip`.
20
+ #
21
+ # This option directs `gzip` to compress data using an algorithm that
22
+ # allows `rsync` to efficiently detect changes. This is especially useful
23
+ # when used to compress `Archive` or `Database` backups that will be
24
+ # stored using Backup's `RSync` Storage option.
25
+ #
26
+ # The `--rsyncable` option is only available on patched versions of `gzip`.
27
+ # While most distributions apply this patch, this option may not be
28
+ # available on your system. If it's not available, Backup will log a
29
+ # warning and continue to use the compressor without this option.
30
+ attr_accessor :rsyncable
31
+
32
+ ##
33
+ # Determine if +--rsyncable+ is supported and cache the result.
34
+ def self.has_rsyncable?
35
+ return @has_rsyncable unless @has_rsyncable.nil?
36
+ cmd = "#{utility(:gzip)} --rsyncable --version >/dev/null 2>&1; echo $?"
37
+ @has_rsyncable = `#{cmd}`.chomp == "0"
38
+ end
39
+
40
+ ##
41
+ # Creates a new instance of Backup::Compressor::Gzip
42
+ def initialize(&block)
43
+ load_defaults!
44
+
45
+ @level ||= false
46
+ @rsyncable ||= false
47
+
48
+ instance_eval(&block) if block_given?
49
+
50
+ @cmd = "#{utility(:gzip)}#{options}"
51
+ @ext = ".gz"
52
+ end
53
+
54
+ private
55
+
56
+ def options
57
+ opts = ""
58
+ opts << " -#{@level}" if @level
59
+ if @rsyncable
60
+ if self.class.has_rsyncable?
61
+ opts << " --rsyncable"
62
+ else
63
+ Logger.warn Error.new(<<-EOS)
64
+ 'rsyncable' option ignored.
65
+ Your system's 'gzip' does not support the `--rsyncable` option.
66
+ EOS
67
+ end
68
+ end
69
+ opts
70
+ end
71
+ end
72
+ end
73
+ end
@@ -0,0 +1,45 @@
1
+ module Backup
2
+ module Compressor
3
+ class PBzip2 < Base
4
+ ##
5
+ # Specify the level of compression to use.
6
+ #
7
+ # Values should be a single digit from 1 to 9.
8
+ # Note that setting the level to either extreme may or may not
9
+ # give the desired result. Be sure to check the documentation
10
+ # for the compressor being used.
11
+ #
12
+ # The default `level` is 9.
13
+ attr_accessor :level
14
+
15
+ ##
16
+ # The number of processors to be used with Parallel BZIP2
17
+ # (pbzip2).
18
+ #
19
+ # Default is autodetection to use all.
20
+ attr_accessor :processors
21
+
22
+ ##
23
+ # Creates a new instance of Backup::Compressor::PBzip2
24
+ def initialize(&block)
25
+ load_defaults!
26
+
27
+ @level ||= false
28
+
29
+ instance_eval(&block) if block_given?
30
+
31
+ @cmd = "#{utility(:pbzip2)}#{options}"
32
+ @ext = ".bz2"
33
+ end
34
+
35
+ private
36
+
37
+ def options
38
+ o = ""
39
+ o << " -#{@level}" if @level
40
+ o << " -p#{@processors}" if @processors
41
+ o
42
+ end
43
+ end
44
+ end
45
+ end