ext_backup 5.0.0.beta.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (137) hide show
  1. checksums.yaml +7 -0
  2. data/LICENSE +19 -0
  3. data/README.md +33 -0
  4. data/bin/backup +5 -0
  5. data/bin/docker_test +24 -0
  6. data/lib/backup.rb +140 -0
  7. data/lib/backup/archive.rb +169 -0
  8. data/lib/backup/binder.rb +18 -0
  9. data/lib/backup/cleaner.rb +112 -0
  10. data/lib/backup/cli.rb +370 -0
  11. data/lib/backup/cloud_io/base.rb +38 -0
  12. data/lib/backup/cloud_io/cloud_files.rb +296 -0
  13. data/lib/backup/cloud_io/s3.rb +253 -0
  14. data/lib/backup/compressor/base.rb +32 -0
  15. data/lib/backup/compressor/bzip2.rb +35 -0
  16. data/lib/backup/compressor/custom.rb +49 -0
  17. data/lib/backup/compressor/gzip.rb +73 -0
  18. data/lib/backup/config.rb +128 -0
  19. data/lib/backup/config/dsl.rb +102 -0
  20. data/lib/backup/config/helpers.rb +137 -0
  21. data/lib/backup/database/base.rb +86 -0
  22. data/lib/backup/database/mongodb.rb +186 -0
  23. data/lib/backup/database/mysql.rb +191 -0
  24. data/lib/backup/database/openldap.rb +93 -0
  25. data/lib/backup/database/postgresql.rb +132 -0
  26. data/lib/backup/database/redis.rb +176 -0
  27. data/lib/backup/database/riak.rb +79 -0
  28. data/lib/backup/database/sqlite.rb +55 -0
  29. data/lib/backup/encryptor/base.rb +27 -0
  30. data/lib/backup/encryptor/gpg.rb +737 -0
  31. data/lib/backup/encryptor/open_ssl.rb +74 -0
  32. data/lib/backup/errors.rb +53 -0
  33. data/lib/backup/logger.rb +197 -0
  34. data/lib/backup/logger/console.rb +48 -0
  35. data/lib/backup/logger/fog_adapter.rb +25 -0
  36. data/lib/backup/logger/logfile.rb +131 -0
  37. data/lib/backup/logger/syslog.rb +114 -0
  38. data/lib/backup/model.rb +472 -0
  39. data/lib/backup/notifier/base.rb +126 -0
  40. data/lib/backup/notifier/campfire.rb +61 -0
  41. data/lib/backup/notifier/command.rb +99 -0
  42. data/lib/backup/notifier/datadog.rb +104 -0
  43. data/lib/backup/notifier/flowdock.rb +99 -0
  44. data/lib/backup/notifier/hipchat.rb +116 -0
  45. data/lib/backup/notifier/http_post.rb +114 -0
  46. data/lib/backup/notifier/mail.rb +232 -0
  47. data/lib/backup/notifier/nagios.rb +65 -0
  48. data/lib/backup/notifier/pagerduty.rb +79 -0
  49. data/lib/backup/notifier/prowl.rb +68 -0
  50. data/lib/backup/notifier/pushover.rb +71 -0
  51. data/lib/backup/notifier/ses.rb +123 -0
  52. data/lib/backup/notifier/slack.rb +147 -0
  53. data/lib/backup/notifier/twitter.rb +55 -0
  54. data/lib/backup/notifier/zabbix.rb +60 -0
  55. data/lib/backup/package.rb +51 -0
  56. data/lib/backup/packager.rb +106 -0
  57. data/lib/backup/pipeline.rb +120 -0
  58. data/lib/backup/splitter.rb +73 -0
  59. data/lib/backup/storage/base.rb +66 -0
  60. data/lib/backup/storage/cloud_files.rb +156 -0
  61. data/lib/backup/storage/cycler.rb +70 -0
  62. data/lib/backup/storage/dropbox.rb +206 -0
  63. data/lib/backup/storage/ftp.rb +116 -0
  64. data/lib/backup/storage/local.rb +61 -0
  65. data/lib/backup/storage/qiniu.rb +65 -0
  66. data/lib/backup/storage/rsync.rb +246 -0
  67. data/lib/backup/storage/s3.rb +155 -0
  68. data/lib/backup/storage/scp.rb +65 -0
  69. data/lib/backup/storage/sftp.rb +80 -0
  70. data/lib/backup/syncer/base.rb +67 -0
  71. data/lib/backup/syncer/cloud/base.rb +176 -0
  72. data/lib/backup/syncer/cloud/cloud_files.rb +81 -0
  73. data/lib/backup/syncer/cloud/local_file.rb +97 -0
  74. data/lib/backup/syncer/cloud/s3.rb +109 -0
  75. data/lib/backup/syncer/rsync/base.rb +50 -0
  76. data/lib/backup/syncer/rsync/local.rb +27 -0
  77. data/lib/backup/syncer/rsync/pull.rb +47 -0
  78. data/lib/backup/syncer/rsync/push.rb +201 -0
  79. data/lib/backup/template.rb +41 -0
  80. data/lib/backup/utilities.rb +233 -0
  81. data/lib/backup/version.rb +3 -0
  82. data/lib/ext_backup.rb +5 -0
  83. data/lib/ext_backup/version.rb +5 -0
  84. data/templates/cli/archive +28 -0
  85. data/templates/cli/compressor/bzip2 +4 -0
  86. data/templates/cli/compressor/custom +7 -0
  87. data/templates/cli/compressor/gzip +4 -0
  88. data/templates/cli/config +123 -0
  89. data/templates/cli/databases/mongodb +15 -0
  90. data/templates/cli/databases/mysql +18 -0
  91. data/templates/cli/databases/openldap +24 -0
  92. data/templates/cli/databases/postgresql +16 -0
  93. data/templates/cli/databases/redis +16 -0
  94. data/templates/cli/databases/riak +17 -0
  95. data/templates/cli/databases/sqlite +11 -0
  96. data/templates/cli/encryptor/gpg +27 -0
  97. data/templates/cli/encryptor/openssl +9 -0
  98. data/templates/cli/model +26 -0
  99. data/templates/cli/notifier/zabbix +15 -0
  100. data/templates/cli/notifiers/campfire +12 -0
  101. data/templates/cli/notifiers/command +32 -0
  102. data/templates/cli/notifiers/datadog +57 -0
  103. data/templates/cli/notifiers/flowdock +16 -0
  104. data/templates/cli/notifiers/hipchat +16 -0
  105. data/templates/cli/notifiers/http_post +32 -0
  106. data/templates/cli/notifiers/mail +24 -0
  107. data/templates/cli/notifiers/nagios +13 -0
  108. data/templates/cli/notifiers/pagerduty +12 -0
  109. data/templates/cli/notifiers/prowl +11 -0
  110. data/templates/cli/notifiers/pushover +11 -0
  111. data/templates/cli/notifiers/ses +15 -0
  112. data/templates/cli/notifiers/slack +22 -0
  113. data/templates/cli/notifiers/twitter +13 -0
  114. data/templates/cli/splitter +7 -0
  115. data/templates/cli/storages/cloud_files +11 -0
  116. data/templates/cli/storages/dropbox +20 -0
  117. data/templates/cli/storages/ftp +13 -0
  118. data/templates/cli/storages/local +8 -0
  119. data/templates/cli/storages/qiniu +12 -0
  120. data/templates/cli/storages/rsync +17 -0
  121. data/templates/cli/storages/s3 +16 -0
  122. data/templates/cli/storages/scp +15 -0
  123. data/templates/cli/storages/sftp +15 -0
  124. data/templates/cli/syncers/cloud_files +22 -0
  125. data/templates/cli/syncers/rsync_local +20 -0
  126. data/templates/cli/syncers/rsync_pull +28 -0
  127. data/templates/cli/syncers/rsync_push +28 -0
  128. data/templates/cli/syncers/s3 +27 -0
  129. data/templates/general/links +3 -0
  130. data/templates/general/version.erb +2 -0
  131. data/templates/notifier/mail/failure.erb +16 -0
  132. data/templates/notifier/mail/success.erb +16 -0
  133. data/templates/notifier/mail/warning.erb +16 -0
  134. data/templates/storage/dropbox/authorization_url.erb +6 -0
  135. data/templates/storage/dropbox/authorized.erb +4 -0
  136. data/templates/storage/dropbox/cache_file_written.erb +10 -0
  137. metadata +506 -0
@@ -0,0 +1,370 @@
1
+ ##
2
+ # Build the Backup Command Line Interface using Thor
3
+ module Backup
4
+ class CLI < Thor
5
+ class Error < Backup::Error; end
6
+ class FatalError < Backup::FatalError; end
7
+
8
+ ##
9
+ # [Perform]
10
+ #
11
+ # The only required option is the --trigger [-t].
12
+ # If --config-file, --data-path, --tmp-path or --log-path
13
+ # aren't specified they will fallback to defaults.
14
+ # If --root-path is given, it will be used as the base path for our defaults,
15
+ # as well as the base path for any option specified as a relative path.
16
+ # Any option given as an absolute path will be used "as-is".
17
+ #
18
+ # This command will exit with one of the following status codes:
19
+ #
20
+ # 0: All triggers were successful and no warnings were issued.
21
+ # 1: All triggers were successful, but some had warnings.
22
+ # 2: All triggers were processed, but some failed.
23
+ # 3: A fatal error caused Backup to exit.
24
+ # Some triggers may not have been processed.
25
+ #
26
+ # If the --check option is given, `backup check` will be run
27
+ # and no triggers will be performed.
28
+ desc "perform", "Performs the backup for the specified trigger(s)."
29
+
30
+ long_desc <<-EOS.gsub(/^ +/, "")
31
+ Performs the backup for the specified trigger(s).
32
+
33
+ You may perform multiple backups by providing multiple triggers,
34
+ separated by commas. Each will run in the order specified.
35
+
36
+ $ backup perform --triggers backup1,backup2,backup3,backup4
37
+
38
+ --root-path may be an absolute path or relative to the current directory.
39
+
40
+ To use the current directory, use: `--root-path .`
41
+
42
+ Relative paths given for --config-file, --data-path, --tmp-path,
43
+ and --log-path will be relative to --root-path.
44
+
45
+ Console log output may be forced using --no-quiet.
46
+
47
+ Logging to file or syslog may be disabled using --no-logfile or --no-syslog
48
+ respectively. This will override logging options set in `config.rb`.
49
+ EOS
50
+
51
+ method_option :trigger,
52
+ aliases: ["-t", "--triggers"],
53
+ required: true,
54
+ type: :string,
55
+ desc: "Triggers to perform. e.g. 'trigger_a,trigger_b'"
56
+
57
+ method_option :config_file,
58
+ aliases: "-c",
59
+ type: :string,
60
+ default: "",
61
+ desc: "Path to your config.rb file."
62
+
63
+ method_option :root_path,
64
+ aliases: "-r",
65
+ type: :string,
66
+ default: "",
67
+ desc: "Root path to base all relative path on."
68
+
69
+ method_option :data_path,
70
+ aliases: "-d",
71
+ type: :string,
72
+ default: "",
73
+ desc: "Path to store storage cycling data."
74
+
75
+ method_option :log_path,
76
+ aliases: "-l",
77
+ type: :string,
78
+ default: "",
79
+ desc: "Path to store Backup's log file."
80
+
81
+ method_option :tmp_path,
82
+ type: :string,
83
+ default: "",
84
+ desc: "Path to store temporary data during the backup."
85
+
86
+ # Note that :quiet, :syslog and :logfile are specified as :string types,
87
+ # so the --no-<option> usage will set the value to nil instead of false.
88
+ method_option :quiet,
89
+ aliases: "-q",
90
+ type: :boolean,
91
+ default: false,
92
+ banner: "",
93
+ desc: "Disable console log output."
94
+
95
+ method_option :syslog,
96
+ type: :boolean,
97
+ default: false,
98
+ banner: "",
99
+ desc: "Enable logging to syslog."
100
+
101
+ method_option :logfile,
102
+ type: :boolean,
103
+ default: true,
104
+ banner: "",
105
+ desc: "Enable Backup's log file."
106
+
107
+ method_option :check,
108
+ type: :boolean,
109
+ default: false,
110
+ desc: "Check configuration for errors or warnings."
111
+
112
+ def perform
113
+ check if options[:check] # this will exit()
114
+
115
+ models = nil
116
+ begin
117
+ # Set logger options
118
+ opts = options
119
+ Logger.configure do
120
+ console.quiet = opts[:quiet]
121
+ logfile.enabled = opts[:logfile]
122
+ logfile.log_path = opts[:log_path]
123
+ syslog.enabled = opts[:syslog]
124
+ end
125
+
126
+ # Load the user's +config.rb+ file and all their Models
127
+ Config.load(options)
128
+
129
+ # Identify all Models to be run for the given +triggers+.
130
+ triggers = options[:trigger].split(",").map(&:strip)
131
+ models = triggers.uniq.flat_map do |trigger|
132
+ Model.find_by_trigger(trigger)
133
+ end
134
+
135
+ if models.empty?
136
+ raise Error, "No Models found for trigger(s) " \
137
+ "'#{triggers.join(",")}'."
138
+ end
139
+
140
+ # Finalize Logger and begin real-time logging.
141
+ Logger.start!
142
+ rescue Exception => err
143
+ Logger.error Error.wrap(err)
144
+ unless Helpers.is_backup_error? err
145
+ Logger.error err.backtrace.join("\n")
146
+ end
147
+ # Logger configuration will be ignored
148
+ # and messages will be output to the console only.
149
+ Logger.abort!
150
+ exit 3
151
+ end
152
+
153
+ until models.empty?
154
+ model = models.shift
155
+ model.perform!
156
+
157
+ case model.exit_status
158
+ when 1
159
+ warnings = true
160
+ when 2
161
+ errors = true
162
+ unless models.empty?
163
+ Logger.info Error.new(<<-EOS)
164
+ Backup will now continue...
165
+ The following triggers will now be processed:
166
+ (#{models.map(&:trigger).join(", ")})
167
+ EOS
168
+ end
169
+ when 3
170
+ fatal = true
171
+ unless models.empty?
172
+ Logger.error FatalError.new(<<-EOS)
173
+ Backup will now exit.
174
+ The following triggers will not be processed:
175
+ (#{models.map(&:trigger).join(", ")})
176
+ EOS
177
+ end
178
+ end
179
+
180
+ model.notifiers.each(&:perform!)
181
+ exit(3) if fatal
182
+ Logger.clear!
183
+ end
184
+
185
+ exit(errors ? 2 : 1) if errors || warnings
186
+ end
187
+
188
+ ##
189
+ # [Check]
190
+ #
191
+ # Loads the user's `config.rb` (and all Model files) and reports any Errors
192
+ # or Warnings. This is primarily for checking for syntax errors, missing
193
+ # dependencies and deprecation warnings.
194
+ #
195
+ # This may also be invoked using the `--check` option to `backup perform`.
196
+ #
197
+ # This command only requires `Config.config_file` to be correct.
198
+ # All other Config paths are irrelevant.
199
+ #
200
+ # All output will be sent to the console only.
201
+ # Logger options will be ignored.
202
+ #
203
+ # If successful, this method with exit(0).
204
+ # If there are Errors or Warnings, it will exit(1).
205
+ desc "check", "Check for configuration errors or warnings"
206
+
207
+ long_desc <<-EOS.gsub(/^ +/, "")
208
+ Loads your 'config.rb' file and all models and reports any
209
+ errors or warnings with your configuration, including missing
210
+ dependencies and the use of any deprecated settings.
211
+ EOS
212
+
213
+ method_option :config_file,
214
+ aliases: "-c",
215
+ type: :string,
216
+ default: "",
217
+ desc: "Path to your config.rb file."
218
+
219
+ def check
220
+ begin
221
+ Config.load(options)
222
+ rescue Exception => err
223
+ Logger.error Error.wrap(err)
224
+ unless Helpers.is_backup_error? err
225
+ Logger.error err.backtrace.join("\n")
226
+ end
227
+ end
228
+
229
+ if Logger.has_warnings? || Logger.has_errors?
230
+ Logger.error "Configuration Check Failed."
231
+ exit_code = 1
232
+ else
233
+ Logger.info "Configuration Check Succeeded."
234
+ exit_code = 0
235
+ end
236
+
237
+ Logger.abort!
238
+ exit(exit_code)
239
+ end
240
+
241
+ ##
242
+ # [Generate:Model]
243
+ # Generates a model configuration file based on the arguments passed in.
244
+ # For example:
245
+ # $ backup generate:model --trigger my_backup --databases='mongodb'
246
+ # will generate a pre-populated model with a base MongoDB setup
247
+ desc "generate:model", "Generates a Backup model file."
248
+
249
+ long_desc <<-EOS.gsub(/^ +/, "")
250
+ Generates a Backup model file.
251
+
252
+ If your configuration file is not in the default location at
253
+ #{Config.config_file}
254
+ you must specify it's location using '--config-file'.
255
+ If no configuration file exists at this location, one will be created.
256
+
257
+ The model file will be created as '<config_path>/models/<trigger>.rb'
258
+ Your model file will be created in a 'models/' sub-directory
259
+ where your config file is located. The default location would be:
260
+ #{Config.root_path}/models/<trigger>.rb
261
+ EOS
262
+
263
+ method_option :trigger,
264
+ aliases: "-t",
265
+ required: true,
266
+ type: :string,
267
+ desc: "Trigger name for the Backup model"
268
+
269
+ method_option :config_file,
270
+ type: :string,
271
+ desc: "Path to your Backup configuration file"
272
+
273
+ # options with their available values
274
+ %w[databases storages syncers encryptor compressor notifiers].each do |name|
275
+ path = File.join(Backup::TEMPLATE_PATH, "cli", name)
276
+ opts = Dir[path + "/*"].sort.map { |p| File.basename(p) }.join(", ")
277
+ method_option name, type: :string, desc: "(#{opts})"
278
+ end
279
+
280
+ method_option :archives,
281
+ type: :boolean,
282
+ desc: "Model will include tar archives."
283
+
284
+ method_option :splitter,
285
+ type: :boolean,
286
+ default: false,
287
+ desc: "Add Splitter to the model"
288
+
289
+ define_method "generate:model" do
290
+ opts = options.merge(trigger: options[:trigger].gsub(/\W/, "_"))
291
+ config_file = opts[:config_file] ?
292
+ File.expand_path(opts.delete(:config_file)) : Config.config_file
293
+ models_path = File.join(File.dirname(config_file), "models")
294
+ model_file = File.join(models_path, "#{opts[:trigger]}.rb")
295
+
296
+ unless File.exist?(config_file)
297
+ invoke "generate:config", [], config_file: config_file
298
+ end
299
+
300
+ FileUtils.mkdir_p(models_path)
301
+ if Helpers.overwrite?(model_file)
302
+ File.open(model_file, "w") do |file|
303
+ file.write(Backup::Template.new(options: opts).result("cli/model"))
304
+ end
305
+ puts "Generated model file: '#{model_file}'."
306
+ end
307
+ end
308
+
309
+ ##
310
+ # [Generate:Config]
311
+ # Generates the main configuration file
312
+ desc "generate:config", "Generates the main Backup configuration file"
313
+
314
+ long_desc <<-EOS.gsub(/^ +/, "")
315
+ Path to the Backup configuration file to generate.
316
+
317
+ Defaults to:
318
+
319
+ #{Config.config_file}
320
+ EOS
321
+
322
+ method_option :config_file,
323
+ type: :string,
324
+ desc: "Path to the Backup configuration file to generate."
325
+
326
+ define_method "generate:config" do
327
+ config_file = options[:config_file] ?
328
+ File.expand_path(options[:config_file]) : Config.config_file
329
+
330
+ FileUtils.mkdir_p(File.dirname(config_file))
331
+ if Helpers.overwrite?(config_file)
332
+ File.open(config_file, "w") do |file|
333
+ file.write(Backup::Template.new.result("cli/config"))
334
+ end
335
+ puts "Generated configuration file: '#{config_file}'."
336
+ end
337
+ end
338
+
339
+ ##
340
+ # [Version]
341
+ # Returns the current version of the Backup gem
342
+ map "-v" => :version
343
+ desc "version", "Display installed Backup version"
344
+ def version
345
+ puts "Backup #{Backup::VERSION}"
346
+ end
347
+
348
+ # This is to avoid Thor's warnings when stubbing methods on the Thor class.
349
+ module Helpers
350
+ class << self
351
+ def overwrite?(path)
352
+ return true unless File.exist?(path)
353
+
354
+ $stderr.print "A file already exists at '#{path}'.\n" \
355
+ "Do you want to overwrite? [y/n] "
356
+ /^[Yy]/ =~ $stdin.gets
357
+ end
358
+
359
+ def exec!(cmd)
360
+ puts "Launching: #{cmd}"
361
+ exec(cmd)
362
+ end
363
+
364
+ def is_backup_error?(error)
365
+ error.class.ancestors.include? Backup::Error
366
+ end
367
+ end
368
+ end
369
+ end
370
+ end
@@ -0,0 +1,38 @@
1
+ module Backup
2
+ module CloudIO
3
+ class Error < Backup::Error; end
4
+ class FileSizeError < Backup::Error; end
5
+
6
+ class Base
7
+ attr_reader :max_retries, :retry_waitsec
8
+
9
+ def initialize(options = {})
10
+ @max_retries = options[:max_retries]
11
+ @retry_waitsec = options[:retry_waitsec]
12
+ end
13
+
14
+ private
15
+
16
+ def with_retries(operation)
17
+ retries = 0
18
+ begin
19
+ yield
20
+ rescue => err
21
+ retries += 1
22
+ raise Error.wrap(err, <<-EOS) if retries > max_retries
23
+ Max Retries (#{max_retries}) Exceeded!
24
+ Operation: #{operation}
25
+ Be sure to check the log messages for each retry attempt.
26
+ EOS
27
+
28
+ Logger.info Error.wrap(err, <<-EOS)
29
+ Retry ##{retries} of #{max_retries}
30
+ Operation: #{operation}
31
+ EOS
32
+ sleep(retry_waitsec)
33
+ retry
34
+ end
35
+ end
36
+ end
37
+ end
38
+ end
@@ -0,0 +1,296 @@
1
+ require "backup/cloud_io/base"
2
+ require "fog"
3
+ require "digest/md5"
4
+
5
+ module Backup
6
+ module CloudIO
7
+ class CloudFiles < Base
8
+ class Error < Backup::Error; end
9
+
10
+ MAX_FILE_SIZE = 1024**3 * 5 # 5 GiB
11
+ MAX_SLO_SIZE = 1024**3 * 5000 # 1000 segments @ 5 GiB
12
+ SEGMENT_BUFFER = 1024**2 # 1 MiB
13
+
14
+ attr_reader :username, :api_key, :auth_url, :region, :servicenet,
15
+ :container, :segments_container, :segment_size, :days_to_keep,
16
+ :fog_options
17
+
18
+ def initialize(options = {})
19
+ super
20
+
21
+ @username = options[:username]
22
+ @api_key = options[:api_key]
23
+ @auth_url = options[:auth_url]
24
+ @region = options[:region]
25
+ @servicenet = options[:servicenet]
26
+ @container = options[:container]
27
+ @segments_container = options[:segments_container]
28
+ @segment_size = options[:segment_size]
29
+ @days_to_keep = options[:days_to_keep]
30
+ @fog_options = options[:fog_options]
31
+ end
32
+
33
+ # The Syncer may call this method in multiple threads,
34
+ # but #objects is always called before this occurs.
35
+ def upload(src, dest)
36
+ create_containers
37
+
38
+ file_size = File.size(src)
39
+ segment_bytes = segment_size * 1024**2
40
+ if segment_bytes > 0 && file_size > segment_bytes
41
+ raise FileSizeError, <<-EOS if file_size > MAX_SLO_SIZE
42
+ File Too Large
43
+ File: #{src}
44
+ Size: #{file_size}
45
+ Max SLO Size is #{MAX_SLO_SIZE} (5 GiB * 1000 segments)
46
+ EOS
47
+
48
+ segment_bytes = adjusted_segment_bytes(segment_bytes, file_size)
49
+ segments = upload_segments(src, dest, segment_bytes, file_size)
50
+ upload_manifest(dest, segments)
51
+ else
52
+ raise FileSizeError, <<-EOS if file_size > MAX_FILE_SIZE
53
+ File Too Large
54
+ File: #{src}
55
+ Size: #{file_size}
56
+ Max File Size is #{MAX_FILE_SIZE} (5 GiB)
57
+ EOS
58
+
59
+ put_object(src, dest)
60
+ end
61
+ end
62
+
63
+ # Returns all objects in the container with the given prefix.
64
+ #
65
+ # - #get_container returns a max of 10000 objects per request.
66
+ # - Returns objects sorted using a sqlite binary collating function.
67
+ # - If marker is given, only objects after the marker are in the response.
68
+ def objects(prefix)
69
+ objects = []
70
+ resp = nil
71
+ prefix = prefix.chomp("/")
72
+ opts = { prefix: prefix + "/" }
73
+
74
+ create_containers
75
+
76
+ while resp.nil? || resp.body.count == 10_000
77
+ opts[:marker] = objects.last.name unless objects.empty?
78
+ with_retries("GET '#{container}/#{prefix}/*'") do
79
+ resp = connection.get_container(container, opts)
80
+ end
81
+ resp.body.each do |obj_data|
82
+ objects << Object.new(self, obj_data)
83
+ end
84
+ end
85
+
86
+ objects
87
+ end
88
+
89
+ # Used by Object to fetch metadata if needed.
90
+ def head_object(object)
91
+ resp = nil
92
+ with_retries("HEAD '#{container}/#{object.name}'") do
93
+ resp = connection.head_object(container, object.name)
94
+ end
95
+ resp
96
+ end
97
+
98
+ # Delete non-SLO object(s) from the container.
99
+ #
100
+ # - Called by the Storage (with objects) and the Syncer (with names)
101
+ # - Deletes 10,000 objects per request.
102
+ # - Missing objects will be ignored.
103
+ def delete(objects_or_names)
104
+ names = Array(objects_or_names).dup
105
+ names.map!(&:name) if names.first.is_a?(Object)
106
+
107
+ until names.empty?
108
+ names_partial = names.slice!(0, 10_000)
109
+ with_retries("DELETE Multiple Objects") do
110
+ resp = connection.delete_multiple_objects(container, names_partial)
111
+ resp_status = resp.body["Response Status"]
112
+ raise Error, <<-EOS unless resp_status == "200 OK"
113
+ #{resp_status}
114
+ The server returned the following:
115
+ #{resp.body.inspect}
116
+ EOS
117
+ end
118
+ end
119
+ end
120
+
121
+ # Delete an SLO object(s) from the container.
122
+ #
123
+ # - Used only by the Storage. The Syncer cannot use SLOs.
124
+ # - Removes the SLO manifest object and all associated segments.
125
+ # - Missing segments will be ignored.
126
+ def delete_slo(objects)
127
+ Array(objects).each do |object|
128
+ with_retries("DELETE SLO Manifest '#{container}/#{object.name}'") do
129
+ resp = connection.delete_static_large_object(container, object.name)
130
+ resp_status = resp.body["Response Status"]
131
+ raise Error, <<-EOS unless resp_status == "200 OK"
132
+ #{resp_status}
133
+ The server returned the following:
134
+ #{resp.body.inspect}
135
+ EOS
136
+ end
137
+ end
138
+ end
139
+
140
+ private
141
+
142
+ def connection
143
+ @connection ||= Fog::Storage.new({
144
+ provider: "Rackspace",
145
+ rackspace_username: username,
146
+ rackspace_api_key: api_key,
147
+ rackspace_auth_url: auth_url,
148
+ rackspace_region: region,
149
+ rackspace_servicenet: servicenet
150
+ }.merge(fog_options || {}))
151
+ end
152
+
153
+ def create_containers
154
+ return if @containers_created
155
+ @containers_created = true
156
+
157
+ with_retries("Create Containers") do
158
+ connection.put_container(container)
159
+ connection.put_container(segments_container) if segments_container
160
+ end
161
+ end
162
+
163
+ def put_object(src, dest)
164
+ opts = headers.merge("ETag" => Digest::MD5.file(src).hexdigest)
165
+ with_retries("PUT '#{container}/#{dest}'") do
166
+ File.open(src, "r") do |file|
167
+ connection.put_object(container, dest, file, opts)
168
+ end
169
+ end
170
+ end
171
+
172
+ # Each segment is uploaded using chunked transfer encoding using
173
+ # SEGMENT_BUFFER, and each segment's MD5 is sent to verify the transfer.
174
+ # Each segment's MD5 and byte_size will also be verified when the
175
+ # SLO manifest object is uploaded.
176
+ def upload_segments(src, dest, segment_bytes, file_size)
177
+ total_segments = (file_size / segment_bytes.to_f).ceil
178
+ progress = (0.1..0.9).step(0.1).map { |n| (total_segments * n).floor }
179
+ Logger.info "\s\sUploading #{total_segments} SLO Segments..."
180
+
181
+ segments = []
182
+ File.open(src, "r") do |file|
183
+ segment_number = 0
184
+ until file.eof?
185
+ segment_number += 1
186
+ object = "#{dest}/#{segment_number.to_s.rjust(4, "0")}"
187
+ pos = file.pos
188
+ md5 = segment_md5(file, segment_bytes)
189
+ opts = headers.merge("ETag" => md5)
190
+
191
+ with_retries("PUT '#{segments_container}/#{object}'") do
192
+ file.seek(pos)
193
+ offset = 0
194
+ connection.put_object(segments_container, object, nil, opts) do
195
+ # block is called to stream data until it returns ''
196
+ data = ""
197
+ if offset <= segment_bytes - SEGMENT_BUFFER
198
+ data = file.read(SEGMENT_BUFFER).to_s # nil => ''
199
+ offset += data.size
200
+ end
201
+ data
202
+ end
203
+ end
204
+
205
+ segments << {
206
+ path: "#{segments_container}/#{object}",
207
+ etag: md5,
208
+ size_bytes: file.pos - pos
209
+ }
210
+
211
+ if i = progress.rindex(segment_number)
212
+ Logger.info "\s\s...#{i + 1}0% Complete..."
213
+ end
214
+ end
215
+ end
216
+ segments
217
+ end
218
+
219
+ def segment_md5(file, segment_bytes)
220
+ md5 = Digest::MD5.new
221
+ offset = 0
222
+ while offset <= segment_bytes - SEGMENT_BUFFER
223
+ data = file.read(SEGMENT_BUFFER)
224
+ break unless data
225
+ offset += data.size
226
+ md5 << data
227
+ end
228
+ md5.hexdigest
229
+ end
230
+
231
+ # Each segment's ETag and byte_size will be verified once uploaded.
232
+ # Request will raise an exception if verification fails or segments
233
+ # are not found. However, each segment's ETag was verified when we
234
+ # uploaded the segments, so this should only retry failed requests.
235
+ def upload_manifest(dest, segments)
236
+ Logger.info "\s\sStoring SLO Manifest '#{container}/#{dest}'"
237
+
238
+ with_retries("PUT SLO Manifest '#{container}/#{dest}'") do
239
+ connection.put_static_obj_manifest(container, dest, segments, headers)
240
+ end
241
+ end
242
+
243
+ # If :days_to_keep was set, each object will be scheduled for deletion.
244
+ # This includes non-SLO objects, the SLO manifest and all segments.
245
+ def headers
246
+ headers = {}
247
+ headers["X-Delete-At"] = delete_at if delete_at
248
+ headers
249
+ end
250
+
251
+ def delete_at
252
+ return unless days_to_keep
253
+ @delete_at ||= (Time.now.utc + days_to_keep * 60**2 * 24).to_i
254
+ end
255
+
256
+ def adjusted_segment_bytes(segment_bytes, file_size)
257
+ return segment_bytes if file_size / segment_bytes.to_f <= 1000
258
+
259
+ mb = orig_mb = segment_bytes / 1024**2
260
+ mb += 1 until file_size / (1024**2 * mb).to_f <= 1000
261
+ Logger.warn Error.new(<<-EOS)
262
+ Segment Size Adjusted
263
+ Your original #segment_size of #{orig_mb} MiB has been adjusted
264
+ to #{mb} MiB in order to satisfy the limit of 1000 segments.
265
+ To enforce your chosen #segment_size, you should use the Splitter.
266
+ e.g. split_into_chunks_of #{mb * 1000} (#segment_size * 1000)
267
+ EOS
268
+ 1024**2 * mb
269
+ end
270
+
271
+ class Object
272
+ attr_reader :name, :hash
273
+
274
+ def initialize(cloud_io, data)
275
+ @cloud_io = cloud_io
276
+ @name = data["name"]
277
+ @hash = data["hash"]
278
+ end
279
+
280
+ def slo?
281
+ !!metadata["X-Static-Large-Object"]
282
+ end
283
+
284
+ def marked_for_deletion?
285
+ !!metadata["X-Delete-At"]
286
+ end
287
+
288
+ private
289
+
290
+ def metadata
291
+ @metadata ||= @cloud_io.head_object(self).headers
292
+ end
293
+ end
294
+ end
295
+ end
296
+ end