backup-remote 0.0.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (135) hide show
  1. checksums.yaml +7 -0
  2. data/README.md +112 -0
  3. data/bin/backup-remote +5 -0
  4. data/lib/backup.rb +155 -0
  5. data/lib/backup/archive.rb +170 -0
  6. data/lib/backup/binder.rb +22 -0
  7. data/lib/backup/cleaner.rb +116 -0
  8. data/lib/backup/cli.rb +374 -0
  9. data/lib/backup/cloud_io/base.rb +41 -0
  10. data/lib/backup/cloud_io/cloud_files.rb +298 -0
  11. data/lib/backup/cloud_io/s3.rb +260 -0
  12. data/lib/backup/compressor/base.rb +35 -0
  13. data/lib/backup/compressor/bzip2.rb +39 -0
  14. data/lib/backup/compressor/custom.rb +53 -0
  15. data/lib/backup/compressor/gzip.rb +74 -0
  16. data/lib/backup/config.rb +121 -0
  17. data/lib/backup/config/dsl.rb +106 -0
  18. data/lib/backup/config/helpers.rb +143 -0
  19. data/lib/backup/database/base.rb +85 -0
  20. data/lib/backup/database/mongodb.rb +187 -0
  21. data/lib/backup/database/mysql.rb +192 -0
  22. data/lib/backup/database/openldap.rb +95 -0
  23. data/lib/backup/database/postgresql.rb +133 -0
  24. data/lib/backup/database/redis.rb +179 -0
  25. data/lib/backup/database/remote_mysql.rb +248 -0
  26. data/lib/backup/database/riak.rb +82 -0
  27. data/lib/backup/database/sqlite.rb +57 -0
  28. data/lib/backup/encryptor/base.rb +29 -0
  29. data/lib/backup/encryptor/gpg.rb +747 -0
  30. data/lib/backup/encryptor/open_ssl.rb +77 -0
  31. data/lib/backup/errors.rb +58 -0
  32. data/lib/backup/logger.rb +199 -0
  33. data/lib/backup/logger/console.rb +51 -0
  34. data/lib/backup/logger/fog_adapter.rb +29 -0
  35. data/lib/backup/logger/logfile.rb +133 -0
  36. data/lib/backup/logger/syslog.rb +116 -0
  37. data/lib/backup/model.rb +479 -0
  38. data/lib/backup/notifier/base.rb +128 -0
  39. data/lib/backup/notifier/campfire.rb +63 -0
  40. data/lib/backup/notifier/command.rb +102 -0
  41. data/lib/backup/notifier/datadog.rb +107 -0
  42. data/lib/backup/notifier/flowdock.rb +103 -0
  43. data/lib/backup/notifier/hipchat.rb +118 -0
  44. data/lib/backup/notifier/http_post.rb +117 -0
  45. data/lib/backup/notifier/mail.rb +249 -0
  46. data/lib/backup/notifier/nagios.rb +69 -0
  47. data/lib/backup/notifier/pagerduty.rb +81 -0
  48. data/lib/backup/notifier/prowl.rb +68 -0
  49. data/lib/backup/notifier/pushover.rb +74 -0
  50. data/lib/backup/notifier/ses.rb +105 -0
  51. data/lib/backup/notifier/slack.rb +148 -0
  52. data/lib/backup/notifier/twitter.rb +58 -0
  53. data/lib/backup/notifier/zabbix.rb +63 -0
  54. data/lib/backup/package.rb +55 -0
  55. data/lib/backup/packager.rb +107 -0
  56. data/lib/backup/pipeline.rb +128 -0
  57. data/lib/backup/remote/command.rb +82 -0
  58. data/lib/backup/splitter.rb +76 -0
  59. data/lib/backup/storage/base.rb +69 -0
  60. data/lib/backup/storage/cloud_files.rb +158 -0
  61. data/lib/backup/storage/cycler.rb +75 -0
  62. data/lib/backup/storage/dropbox.rb +212 -0
  63. data/lib/backup/storage/ftp.rb +112 -0
  64. data/lib/backup/storage/local.rb +64 -0
  65. data/lib/backup/storage/qiniu.rb +65 -0
  66. data/lib/backup/storage/rsync.rb +248 -0
  67. data/lib/backup/storage/s3.rb +156 -0
  68. data/lib/backup/storage/scp.rb +67 -0
  69. data/lib/backup/storage/sftp.rb +82 -0
  70. data/lib/backup/syncer/base.rb +70 -0
  71. data/lib/backup/syncer/cloud/base.rb +179 -0
  72. data/lib/backup/syncer/cloud/cloud_files.rb +83 -0
  73. data/lib/backup/syncer/cloud/local_file.rb +100 -0
  74. data/lib/backup/syncer/cloud/s3.rb +110 -0
  75. data/lib/backup/syncer/rsync/base.rb +54 -0
  76. data/lib/backup/syncer/rsync/local.rb +31 -0
  77. data/lib/backup/syncer/rsync/pull.rb +51 -0
  78. data/lib/backup/syncer/rsync/push.rb +205 -0
  79. data/lib/backup/template.rb +46 -0
  80. data/lib/backup/utilities.rb +224 -0
  81. data/lib/backup/version.rb +5 -0
  82. data/templates/cli/archive +28 -0
  83. data/templates/cli/compressor/bzip2 +4 -0
  84. data/templates/cli/compressor/custom +7 -0
  85. data/templates/cli/compressor/gzip +4 -0
  86. data/templates/cli/config +123 -0
  87. data/templates/cli/databases/mongodb +15 -0
  88. data/templates/cli/databases/mysql +18 -0
  89. data/templates/cli/databases/openldap +24 -0
  90. data/templates/cli/databases/postgresql +16 -0
  91. data/templates/cli/databases/redis +16 -0
  92. data/templates/cli/databases/riak +17 -0
  93. data/templates/cli/databases/sqlite +11 -0
  94. data/templates/cli/encryptor/gpg +27 -0
  95. data/templates/cli/encryptor/openssl +9 -0
  96. data/templates/cli/model +26 -0
  97. data/templates/cli/notifier/zabbix +15 -0
  98. data/templates/cli/notifiers/campfire +12 -0
  99. data/templates/cli/notifiers/command +32 -0
  100. data/templates/cli/notifiers/datadog +57 -0
  101. data/templates/cli/notifiers/flowdock +16 -0
  102. data/templates/cli/notifiers/hipchat +16 -0
  103. data/templates/cli/notifiers/http_post +32 -0
  104. data/templates/cli/notifiers/mail +24 -0
  105. data/templates/cli/notifiers/nagios +13 -0
  106. data/templates/cli/notifiers/pagerduty +12 -0
  107. data/templates/cli/notifiers/prowl +11 -0
  108. data/templates/cli/notifiers/pushover +11 -0
  109. data/templates/cli/notifiers/ses +15 -0
  110. data/templates/cli/notifiers/slack +22 -0
  111. data/templates/cli/notifiers/twitter +13 -0
  112. data/templates/cli/splitter +7 -0
  113. data/templates/cli/storages/cloud_files +11 -0
  114. data/templates/cli/storages/dropbox +20 -0
  115. data/templates/cli/storages/ftp +13 -0
  116. data/templates/cli/storages/local +8 -0
  117. data/templates/cli/storages/qiniu +12 -0
  118. data/templates/cli/storages/rsync +17 -0
  119. data/templates/cli/storages/s3 +16 -0
  120. data/templates/cli/storages/scp +15 -0
  121. data/templates/cli/storages/sftp +15 -0
  122. data/templates/cli/syncers/cloud_files +22 -0
  123. data/templates/cli/syncers/rsync_local +20 -0
  124. data/templates/cli/syncers/rsync_pull +28 -0
  125. data/templates/cli/syncers/rsync_push +28 -0
  126. data/templates/cli/syncers/s3 +27 -0
  127. data/templates/general/links +3 -0
  128. data/templates/general/version.erb +2 -0
  129. data/templates/notifier/mail/failure.erb +16 -0
  130. data/templates/notifier/mail/success.erb +16 -0
  131. data/templates/notifier/mail/warning.erb +16 -0
  132. data/templates/storage/dropbox/authorization_url.erb +6 -0
  133. data/templates/storage/dropbox/authorized.erb +4 -0
  134. data/templates/storage/dropbox/cache_file_written.erb +10 -0
  135. metadata +1122 -0
@@ -0,0 +1,41 @@
1
+ # encoding: utf-8
2
+
3
+ module Backup
4
+ module CloudIO
5
+ class Error < Backup::Error; end
6
+ class FileSizeError < Backup::Error; end
7
+
8
+ class Base
9
+ attr_reader :max_retries, :retry_waitsec
10
+
11
+ def initialize(options = {})
12
+ @max_retries = options[:max_retries]
13
+ @retry_waitsec = options[:retry_waitsec]
14
+ end
15
+
16
+ private
17
+
18
+ def with_retries(operation)
19
+ retries = 0
20
+ begin
21
+ yield
22
+ rescue => err
23
+ retries += 1
24
+ raise Error.wrap(err, <<-EOS) if retries > max_retries
25
+ Max Retries (#{ max_retries }) Exceeded!
26
+ Operation: #{ operation }
27
+ Be sure to check the log messages for each retry attempt.
28
+ EOS
29
+
30
+ Logger.info Error.wrap(err, <<-EOS)
31
+ Retry ##{ retries } of #{ max_retries }
32
+ Operation: #{ operation }
33
+ EOS
34
+ sleep(retry_waitsec)
35
+ retry
36
+ end
37
+ end
38
+
39
+ end
40
+ end
41
+ end
@@ -0,0 +1,298 @@
1
+ # encoding: utf-8
2
+ require 'backup/cloud_io/base'
3
+ require 'fog'
4
+ require 'digest/md5'
5
+
6
+ module Backup
7
+ module CloudIO
8
+ class CloudFiles < Base
9
+ class Error < Backup::Error; end
10
+
11
+ MAX_FILE_SIZE = 1024**3 * 5 # 5 GiB
12
+ MAX_SLO_SIZE = 1024**3 * 5000 # 1000 segments @ 5 GiB
13
+ SEGMENT_BUFFER = 1024**2 # 1 MiB
14
+
15
+ attr_reader :username, :api_key, :auth_url, :region, :servicenet,
16
+ :container, :segments_container, :segment_size, :days_to_keep,
17
+ :fog_options
18
+
19
+ def initialize(options = {})
20
+ super
21
+
22
+ @username = options[:username]
23
+ @api_key = options[:api_key]
24
+ @auth_url = options[:auth_url]
25
+ @region = options[:region]
26
+ @servicenet = options[:servicenet]
27
+ @container = options[:container]
28
+ @segments_container = options[:segments_container]
29
+ @segment_size = options[:segment_size]
30
+ @days_to_keep = options[:days_to_keep]
31
+ @fog_options = options[:fog_options]
32
+ end
33
+
34
+ # The Syncer may call this method in multiple threads,
35
+ # but #objects is always called before this occurs.
36
+ def upload(src, dest)
37
+ create_containers
38
+
39
+ file_size = File.size(src)
40
+ segment_bytes = segment_size * 1024**2
41
+ if segment_bytes > 0 && file_size > segment_bytes
42
+ raise FileSizeError, <<-EOS if file_size > MAX_SLO_SIZE
43
+ File Too Large
44
+ File: #{ src }
45
+ Size: #{ file_size }
46
+ Max SLO Size is #{ MAX_SLO_SIZE } (5 GiB * 1000 segments)
47
+ EOS
48
+
49
+ segment_bytes = adjusted_segment_bytes(segment_bytes, file_size)
50
+ segments = upload_segments(src, dest, segment_bytes, file_size)
51
+ upload_manifest(dest, segments)
52
+ else
53
+ raise FileSizeError, <<-EOS if file_size > MAX_FILE_SIZE
54
+ File Too Large
55
+ File: #{ src }
56
+ Size: #{ file_size }
57
+ Max File Size is #{ MAX_FILE_SIZE } (5 GiB)
58
+ EOS
59
+
60
+ put_object(src, dest)
61
+ end
62
+ end
63
+
64
+ # Returns all objects in the container with the given prefix.
65
+ #
66
+ # - #get_container returns a max of 10000 objects per request.
67
+ # - Returns objects sorted using a sqlite binary collating function.
68
+ # - If marker is given, only objects after the marker are in the response.
69
+ def objects(prefix)
70
+ objects = []
71
+ resp = nil
72
+ prefix = prefix.chomp('/')
73
+ opts = { :prefix => prefix + '/' }
74
+
75
+ create_containers
76
+
77
+ while resp.nil? || resp.body.count == 10000
78
+ opts.merge!(:marker => objects.last.name) unless objects.empty?
79
+ with_retries("GET '#{ container }/#{ prefix }/*'") do
80
+ resp = connection.get_container(container, opts)
81
+ end
82
+ resp.body.each do |obj_data|
83
+ objects << Object.new(self, obj_data)
84
+ end
85
+ end
86
+
87
+ objects
88
+ end
89
+
90
+ # Used by Object to fetch metadata if needed.
91
+ def head_object(object)
92
+ resp = nil
93
+ with_retries("HEAD '#{ container }/#{ object.name }'") do
94
+ resp = connection.head_object(container, object.name)
95
+ end
96
+ resp
97
+ end
98
+
99
+ # Delete non-SLO object(s) from the container.
100
+ #
101
+ # - Called by the Storage (with objects) and the Syncer (with names)
102
+ # - Deletes 10,000 objects per request.
103
+ # - Missing objects will be ignored.
104
+ def delete(objects_or_names)
105
+ names = Array(objects_or_names).dup
106
+ names.map!(&:name) if names.first.is_a?(Object)
107
+
108
+ until names.empty?
109
+ _names = names.slice!(0, 10000)
110
+ with_retries('DELETE Multiple Objects') do
111
+ resp = connection.delete_multiple_objects(container, _names)
112
+ resp_status = resp.body['Response Status']
113
+ raise Error, <<-EOS unless resp_status == '200 OK'
114
+ #{ resp_status }
115
+ The server returned the following:
116
+ #{ resp.body.inspect }
117
+ EOS
118
+ end
119
+ end
120
+ end
121
+
122
+ # Delete an SLO object(s) from the container.
123
+ #
124
+ # - Used only by the Storage. The Syncer cannot use SLOs.
125
+ # - Removes the SLO manifest object and all associated segments.
126
+ # - Missing segments will be ignored.
127
+ def delete_slo(objects)
128
+ Array(objects).each do |object|
129
+ with_retries("DELETE SLO Manifest '#{ container }/#{ object.name }'") do
130
+ resp = connection.delete_static_large_object(container, object.name)
131
+ resp_status = resp.body['Response Status']
132
+ raise Error, <<-EOS unless resp_status == '200 OK'
133
+ #{ resp_status }
134
+ The server returned the following:
135
+ #{ resp.body.inspect }
136
+ EOS
137
+ end
138
+ end
139
+ end
140
+
141
+ private
142
+
143
+ def connection
144
+ @connection ||= Fog::Storage.new({
145
+ :provider => 'Rackspace',
146
+ :rackspace_username => username,
147
+ :rackspace_api_key => api_key,
148
+ :rackspace_auth_url => auth_url,
149
+ :rackspace_region => region,
150
+ :rackspace_servicenet => servicenet
151
+ }.merge(fog_options || {}))
152
+ end
153
+
154
+ def create_containers
155
+ return if @containers_created
156
+ @containers_created = true
157
+
158
+ with_retries('Create Containers') do
159
+ connection.put_container(container)
160
+ connection.put_container(segments_container) if segments_container
161
+ end
162
+ end
163
+
164
+ def put_object(src, dest)
165
+ opts = headers.merge('ETag' => Digest::MD5.file(src).hexdigest)
166
+ with_retries("PUT '#{ container }/#{ dest }'") do
167
+ File.open(src, 'r') do |file|
168
+ connection.put_object(container, dest, file, opts)
169
+ end
170
+ end
171
+ end
172
+
173
+ # Each segment is uploaded using chunked transfer encoding using
174
+ # SEGMENT_BUFFER, and each segment's MD5 is sent to verify the transfer.
175
+ # Each segment's MD5 and byte_size will also be verified when the
176
+ # SLO manifest object is uploaded.
177
+ def upload_segments(src, dest, segment_bytes, file_size)
178
+ total_segments = (file_size / segment_bytes.to_f).ceil
179
+ progress = (0.1..0.9).step(0.1).map {|n| (total_segments * n).floor }
180
+ Logger.info "\s\sUploading #{ total_segments } SLO Segments..."
181
+
182
+ segments = []
183
+ File.open(src, 'r') do |file|
184
+ segment_number = 0
185
+ until file.eof?
186
+ segment_number += 1
187
+ object = "#{ dest }/#{ segment_number.to_s.rjust(4, '0') }"
188
+ pos = file.pos
189
+ md5 = segment_md5(file, segment_bytes)
190
+ opts = headers.merge('ETag' => md5)
191
+
192
+ with_retries("PUT '#{ segments_container }/#{ object }'") do
193
+ file.seek(pos)
194
+ offset = 0
195
+ connection.put_object(segments_container, object, nil, opts) do
196
+ # block is called to stream data until it returns ''
197
+ data = ''
198
+ if offset <= segment_bytes - SEGMENT_BUFFER
199
+ data = file.read(SEGMENT_BUFFER).to_s # nil => ''
200
+ offset += data.size
201
+ end
202
+ data
203
+ end
204
+ end
205
+
206
+ segments << {
207
+ :path => "#{ segments_container }/#{ object }",
208
+ :etag => md5,
209
+ :size_bytes => file.pos - pos
210
+ }
211
+
212
+ if i = progress.rindex(segment_number)
213
+ Logger.info "\s\s...#{ i + 1 }0% Complete..."
214
+ end
215
+ end
216
+ end
217
+ segments
218
+ end
219
+
220
+ def segment_md5(file, segment_bytes)
221
+ md5 = Digest::MD5.new
222
+ offset = 0
223
+ while offset <= segment_bytes - SEGMENT_BUFFER
224
+ data = file.read(SEGMENT_BUFFER)
225
+ break unless data
226
+ offset += data.size
227
+ md5 << data
228
+ end
229
+ md5.hexdigest
230
+ end
231
+
232
+ # Each segment's ETag and byte_size will be verified once uploaded.
233
+ # Request will raise an exception if verification fails or segments
234
+ # are not found. However, each segment's ETag was verified when we
235
+ # uploaded the segments, so this should only retry failed requests.
236
+ def upload_manifest(dest, segments)
237
+ Logger.info "\s\sStoring SLO Manifest '#{ container }/#{ dest }'"
238
+
239
+ with_retries("PUT SLO Manifest '#{ container }/#{ dest }'") do
240
+ connection.put_static_obj_manifest(container, dest, segments, headers)
241
+ end
242
+ end
243
+
244
+ # If :days_to_keep was set, each object will be scheduled for deletion.
245
+ # This includes non-SLO objects, the SLO manifest and all segments.
246
+ def headers
247
+ headers = {}
248
+ headers.merge!('X-Delete-At' => delete_at) if delete_at
249
+ headers
250
+ end
251
+
252
+ def delete_at
253
+ return unless days_to_keep
254
+ @delete_at ||= (Time.now.utc + days_to_keep * 60**2 * 24).to_i
255
+ end
256
+
257
+ def adjusted_segment_bytes(segment_bytes, file_size)
258
+ return segment_bytes if file_size / segment_bytes.to_f <= 1000
259
+
260
+ mb = orig_mb = segment_bytes / 1024**2
261
+ mb += 1 until file_size / (1024**2 * mb).to_f <= 1000
262
+ Logger.warn Error.new(<<-EOS)
263
+ Segment Size Adjusted
264
+ Your original #segment_size of #{ orig_mb } MiB has been adjusted
265
+ to #{ mb } MiB in order to satisfy the limit of 1000 segments.
266
+ To enforce your chosen #segment_size, you should use the Splitter.
267
+ e.g. split_into_chunks_of #{ mb * 1000 } (#segment_size * 1000)
268
+ EOS
269
+ 1024**2 * mb
270
+ end
271
+
272
+ class Object
273
+ attr_reader :name, :hash
274
+
275
+ def initialize(cloud_io, data)
276
+ @cloud_io = cloud_io
277
+ @name = data['name']
278
+ @hash = data['hash']
279
+ end
280
+
281
+ def slo?
282
+ !!metadata['X-Static-Large-Object']
283
+ end
284
+
285
+ def marked_for_deletion?
286
+ !!metadata['X-Delete-At']
287
+ end
288
+
289
+ private
290
+
291
+ def metadata
292
+ @metadata ||= @cloud_io.head_object(self).headers
293
+ end
294
+ end
295
+
296
+ end
297
+ end
298
+ end
@@ -0,0 +1,260 @@
1
+ # encoding: utf-8
2
+ require 'backup/cloud_io/base'
3
+ require 'fog'
4
+ require 'digest/md5'
5
+ require 'base64'
6
+ require 'stringio'
7
+
8
+ module Backup
9
+ module CloudIO
10
+ class S3 < Base
11
+ class Error < Backup::Error; end
12
+
13
+ MAX_FILE_SIZE = 1024**3 * 5 # 5 GiB
14
+ MAX_MULTIPART_SIZE = 1024**4 * 5 # 5 TiB
15
+
16
+ attr_reader :access_key_id, :secret_access_key, :use_iam_profile,
17
+ :region, :bucket, :chunk_size, :encryption, :storage_class,
18
+ :fog_options
19
+
20
+ def initialize(options = {})
21
+ super
22
+
23
+ @access_key_id = options[:access_key_id]
24
+ @secret_access_key = options[:secret_access_key]
25
+ @use_iam_profile = options[:use_iam_profile]
26
+ @region = options[:region]
27
+ @bucket = options[:bucket]
28
+ @chunk_size = options[:chunk_size]
29
+ @encryption = options[:encryption]
30
+ @storage_class = options[:storage_class]
31
+ @fog_options = options[:fog_options]
32
+ end
33
+
34
+ # The Syncer may call this method in multiple threads.
35
+ # However, #objects is always called prior to multithreading.
36
+ def upload(src, dest)
37
+ file_size = File.size(src)
38
+ chunk_bytes = chunk_size * 1024**2
39
+ if chunk_bytes > 0 && file_size > chunk_bytes
40
+ raise FileSizeError, <<-EOS if file_size > MAX_MULTIPART_SIZE
41
+ File Too Large
42
+ File: #{ src }
43
+ Size: #{ file_size }
44
+ Max Multipart Upload Size is #{ MAX_MULTIPART_SIZE } (5 TiB)
45
+ EOS
46
+
47
+ chunk_bytes = adjusted_chunk_bytes(chunk_bytes, file_size)
48
+ upload_id = initiate_multipart(dest)
49
+ parts = upload_parts(src, dest, upload_id, chunk_bytes, file_size)
50
+ complete_multipart(dest, upload_id, parts)
51
+ else
52
+ raise FileSizeError, <<-EOS if file_size > MAX_FILE_SIZE
53
+ File Too Large
54
+ File: #{ src }
55
+ Size: #{ file_size }
56
+ Max File Size is #{ MAX_FILE_SIZE } (5 GiB)
57
+ EOS
58
+
59
+ put_object(src, dest)
60
+ end
61
+ end
62
+
63
+ # Returns all objects in the bucket with the given prefix.
64
+ #
65
+ # - #get_bucket returns a max of 1000 objects per request.
66
+ # - Returns objects in alphabetical order.
67
+ # - If marker is given, only objects after the marker are in the response.
68
+ def objects(prefix)
69
+ objects = []
70
+ resp = nil
71
+ prefix = prefix.chomp('/')
72
+ opts = { 'prefix' => prefix + '/' }
73
+
74
+ while resp.nil? || resp.body['IsTruncated']
75
+ opts.merge!('marker' => objects.last.key) unless objects.empty?
76
+ with_retries("GET '#{ bucket }/#{ prefix }/*'") do
77
+ resp = connection.get_bucket(bucket, opts)
78
+ end
79
+ resp.body['Contents'].each do |obj_data|
80
+ objects << Object.new(self, obj_data)
81
+ end
82
+ end
83
+
84
+ objects
85
+ end
86
+
87
+ # Used by Object to fetch metadata if needed.
88
+ def head_object(object)
89
+ resp = nil
90
+ with_retries("HEAD '#{ bucket }/#{ object.key }'") do
91
+ resp = connection.head_object(bucket, object.key)
92
+ end
93
+ resp
94
+ end
95
+
96
+ # Delete object(s) from the bucket.
97
+ #
98
+ # - Called by the Storage (with objects) and the Syncer (with keys)
99
+ # - Deletes 1000 objects per request.
100
+ # - Missing objects will be ignored.
101
+ def delete(objects_or_keys)
102
+ keys = Array(objects_or_keys).dup
103
+ keys.map!(&:key) if keys.first.is_a?(Object)
104
+
105
+ opts = { :quiet => true } # only report Errors in DeleteResult
106
+ until keys.empty?
107
+ _keys = keys.slice!(0, 1000)
108
+ with_retries('DELETE Multiple Objects') do
109
+ resp = connection.delete_multiple_objects(bucket, _keys, opts.dup)
110
+ unless resp.body['DeleteResult'].empty?
111
+ errors = resp.body['DeleteResult'].map do |result|
112
+ error = result['Error']
113
+ "Failed to delete: #{ error['Key'] }\n" +
114
+ "Reason: #{ error['Code'] }: #{ error['Message'] }"
115
+ end.join("\n")
116
+ raise Error, "The server returned the following:\n#{ errors }"
117
+ end
118
+ end
119
+ end
120
+ end
121
+
122
+ private
123
+
124
+ def connection
125
+ @connection ||= begin
126
+ opts = { :provider => 'AWS', :region => region }
127
+ if use_iam_profile
128
+ opts.merge!(:use_iam_profile => true)
129
+ else
130
+ opts.merge!(
131
+ :aws_access_key_id => access_key_id,
132
+ :aws_secret_access_key => secret_access_key
133
+ )
134
+ end
135
+ opts.merge!(fog_options || {})
136
+ conn = Fog::Storage.new(opts)
137
+ conn.sync_clock
138
+ conn
139
+ end
140
+ end
141
+
142
+ def put_object(src, dest)
143
+ md5 = Base64.encode64(Digest::MD5.file(src).digest).chomp
144
+ options = headers.merge('Content-MD5' => md5)
145
+ with_retries("PUT '#{ bucket }/#{ dest }'") do
146
+ File.open(src, 'r') do |file|
147
+ connection.put_object(bucket, dest, file, options)
148
+ end
149
+ end
150
+ end
151
+
152
+ def initiate_multipart(dest)
153
+ Logger.info "\s\sInitiate Multipart '#{ bucket }/#{ dest }'"
154
+
155
+ resp = nil
156
+ with_retries("POST '#{ bucket }/#{ dest }' (Initiate)") do
157
+ resp = connection.initiate_multipart_upload(bucket, dest, headers)
158
+ end
159
+ resp.body['UploadId']
160
+ end
161
+
162
+ # Each part's MD5 is sent to verify the transfer.
163
+ # AWS will concatenate all parts into a single object
164
+ # once the multipart upload is completed.
165
+ def upload_parts(src, dest, upload_id, chunk_bytes, file_size)
166
+ total_parts = (file_size / chunk_bytes.to_f).ceil
167
+ progress = (0.1..0.9).step(0.1).map {|n| (total_parts * n).floor }
168
+ Logger.info "\s\sUploading #{ total_parts } Parts..."
169
+
170
+ parts = []
171
+ File.open(src, 'r') do |file|
172
+ part_number = 0
173
+ while data = file.read(chunk_bytes)
174
+ part_number += 1
175
+ md5 = Base64.encode64(Digest::MD5.digest(data)).chomp
176
+
177
+ with_retries("PUT '#{ bucket }/#{ dest }' Part ##{ part_number }") do
178
+ resp = connection.upload_part(
179
+ bucket, dest, upload_id, part_number, StringIO.new(data),
180
+ { 'Content-MD5' => md5 }
181
+ )
182
+ parts << resp.headers['ETag']
183
+ end
184
+
185
+ if i = progress.rindex(part_number)
186
+ Logger.info "\s\s...#{ i + 1 }0% Complete..."
187
+ end
188
+ end
189
+ end
190
+ parts
191
+ end
192
+
193
+ def complete_multipart(dest, upload_id, parts)
194
+ Logger.info "\s\sComplete Multipart '#{ bucket }/#{ dest }'"
195
+
196
+ with_retries("POST '#{ bucket }/#{ dest }' (Complete)") do
197
+ resp = connection.complete_multipart_upload(bucket, dest, upload_id, parts)
198
+ raise Error, <<-EOS if resp.body['Code']
199
+ The server returned the following error:
200
+ #{ resp.body['Code'] }: #{ resp.body['Message'] }
201
+ EOS
202
+ end
203
+ end
204
+
205
+ def headers
206
+ headers = {}
207
+
208
+ enc = encryption.to_s.upcase
209
+ headers.merge!(
210
+ { 'x-amz-server-side-encryption' => enc}
211
+ ) unless enc.empty?
212
+
213
+ sc = storage_class.to_s.upcase
214
+ headers.merge!(
215
+ { 'x-amz-storage-class' => sc }
216
+ ) unless sc.empty? || sc == 'STANDARD'
217
+
218
+ headers
219
+ end
220
+
221
+ def adjusted_chunk_bytes(chunk_bytes, file_size)
222
+ return chunk_bytes if file_size / chunk_bytes.to_f <= 10_000
223
+
224
+ mb = orig_mb = chunk_bytes / 1024**2
225
+ mb += 1 until file_size / (1024**2 * mb).to_f <= 10_000
226
+ Logger.warn Error.new(<<-EOS)
227
+ Chunk Size Adjusted
228
+ Your original #chunk_size of #{ orig_mb } MiB has been adjusted
229
+ to #{ mb } MiB in order to satisfy the limit of 10,000 chunks.
230
+ To enforce your chosen #chunk_size, you should use the Splitter.
231
+ e.g. split_into_chunks_of #{ mb * 10_000 } (#chunk_size * 10_000)
232
+ EOS
233
+ 1024**2 * mb
234
+ end
235
+
236
+ class Object
237
+ attr_reader :key, :etag, :storage_class
238
+
239
+ def initialize(cloud_io, data)
240
+ @cloud_io = cloud_io
241
+ @key = data['Key']
242
+ @etag = data['ETag']
243
+ @storage_class = data['StorageClass']
244
+ end
245
+
246
+ # currently 'AES256' or nil
247
+ def encryption
248
+ metadata['x-amz-server-side-encryption']
249
+ end
250
+
251
+ private
252
+
253
+ def metadata
254
+ @metadata ||= @cloud_io.head_object(self).headers
255
+ end
256
+ end
257
+
258
+ end
259
+ end
260
+ end