ext_backup 5.0.0.beta.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (137) hide show
  1. checksums.yaml +7 -0
  2. data/LICENSE +19 -0
  3. data/README.md +33 -0
  4. data/bin/backup +5 -0
  5. data/bin/docker_test +24 -0
  6. data/lib/backup.rb +140 -0
  7. data/lib/backup/archive.rb +169 -0
  8. data/lib/backup/binder.rb +18 -0
  9. data/lib/backup/cleaner.rb +112 -0
  10. data/lib/backup/cli.rb +370 -0
  11. data/lib/backup/cloud_io/base.rb +38 -0
  12. data/lib/backup/cloud_io/cloud_files.rb +296 -0
  13. data/lib/backup/cloud_io/s3.rb +253 -0
  14. data/lib/backup/compressor/base.rb +32 -0
  15. data/lib/backup/compressor/bzip2.rb +35 -0
  16. data/lib/backup/compressor/custom.rb +49 -0
  17. data/lib/backup/compressor/gzip.rb +73 -0
  18. data/lib/backup/config.rb +128 -0
  19. data/lib/backup/config/dsl.rb +102 -0
  20. data/lib/backup/config/helpers.rb +137 -0
  21. data/lib/backup/database/base.rb +86 -0
  22. data/lib/backup/database/mongodb.rb +186 -0
  23. data/lib/backup/database/mysql.rb +191 -0
  24. data/lib/backup/database/openldap.rb +93 -0
  25. data/lib/backup/database/postgresql.rb +132 -0
  26. data/lib/backup/database/redis.rb +176 -0
  27. data/lib/backup/database/riak.rb +79 -0
  28. data/lib/backup/database/sqlite.rb +55 -0
  29. data/lib/backup/encryptor/base.rb +27 -0
  30. data/lib/backup/encryptor/gpg.rb +737 -0
  31. data/lib/backup/encryptor/open_ssl.rb +74 -0
  32. data/lib/backup/errors.rb +53 -0
  33. data/lib/backup/logger.rb +197 -0
  34. data/lib/backup/logger/console.rb +48 -0
  35. data/lib/backup/logger/fog_adapter.rb +25 -0
  36. data/lib/backup/logger/logfile.rb +131 -0
  37. data/lib/backup/logger/syslog.rb +114 -0
  38. data/lib/backup/model.rb +472 -0
  39. data/lib/backup/notifier/base.rb +126 -0
  40. data/lib/backup/notifier/campfire.rb +61 -0
  41. data/lib/backup/notifier/command.rb +99 -0
  42. data/lib/backup/notifier/datadog.rb +104 -0
  43. data/lib/backup/notifier/flowdock.rb +99 -0
  44. data/lib/backup/notifier/hipchat.rb +116 -0
  45. data/lib/backup/notifier/http_post.rb +114 -0
  46. data/lib/backup/notifier/mail.rb +232 -0
  47. data/lib/backup/notifier/nagios.rb +65 -0
  48. data/lib/backup/notifier/pagerduty.rb +79 -0
  49. data/lib/backup/notifier/prowl.rb +68 -0
  50. data/lib/backup/notifier/pushover.rb +71 -0
  51. data/lib/backup/notifier/ses.rb +123 -0
  52. data/lib/backup/notifier/slack.rb +147 -0
  53. data/lib/backup/notifier/twitter.rb +55 -0
  54. data/lib/backup/notifier/zabbix.rb +60 -0
  55. data/lib/backup/package.rb +51 -0
  56. data/lib/backup/packager.rb +106 -0
  57. data/lib/backup/pipeline.rb +120 -0
  58. data/lib/backup/splitter.rb +73 -0
  59. data/lib/backup/storage/base.rb +66 -0
  60. data/lib/backup/storage/cloud_files.rb +156 -0
  61. data/lib/backup/storage/cycler.rb +70 -0
  62. data/lib/backup/storage/dropbox.rb +206 -0
  63. data/lib/backup/storage/ftp.rb +116 -0
  64. data/lib/backup/storage/local.rb +61 -0
  65. data/lib/backup/storage/qiniu.rb +65 -0
  66. data/lib/backup/storage/rsync.rb +246 -0
  67. data/lib/backup/storage/s3.rb +155 -0
  68. data/lib/backup/storage/scp.rb +65 -0
  69. data/lib/backup/storage/sftp.rb +80 -0
  70. data/lib/backup/syncer/base.rb +67 -0
  71. data/lib/backup/syncer/cloud/base.rb +176 -0
  72. data/lib/backup/syncer/cloud/cloud_files.rb +81 -0
  73. data/lib/backup/syncer/cloud/local_file.rb +97 -0
  74. data/lib/backup/syncer/cloud/s3.rb +109 -0
  75. data/lib/backup/syncer/rsync/base.rb +50 -0
  76. data/lib/backup/syncer/rsync/local.rb +27 -0
  77. data/lib/backup/syncer/rsync/pull.rb +47 -0
  78. data/lib/backup/syncer/rsync/push.rb +201 -0
  79. data/lib/backup/template.rb +41 -0
  80. data/lib/backup/utilities.rb +233 -0
  81. data/lib/backup/version.rb +3 -0
  82. data/lib/ext_backup.rb +5 -0
  83. data/lib/ext_backup/version.rb +5 -0
  84. data/templates/cli/archive +28 -0
  85. data/templates/cli/compressor/bzip2 +4 -0
  86. data/templates/cli/compressor/custom +7 -0
  87. data/templates/cli/compressor/gzip +4 -0
  88. data/templates/cli/config +123 -0
  89. data/templates/cli/databases/mongodb +15 -0
  90. data/templates/cli/databases/mysql +18 -0
  91. data/templates/cli/databases/openldap +24 -0
  92. data/templates/cli/databases/postgresql +16 -0
  93. data/templates/cli/databases/redis +16 -0
  94. data/templates/cli/databases/riak +17 -0
  95. data/templates/cli/databases/sqlite +11 -0
  96. data/templates/cli/encryptor/gpg +27 -0
  97. data/templates/cli/encryptor/openssl +9 -0
  98. data/templates/cli/model +26 -0
  99. data/templates/cli/notifier/zabbix +15 -0
  100. data/templates/cli/notifiers/campfire +12 -0
  101. data/templates/cli/notifiers/command +32 -0
  102. data/templates/cli/notifiers/datadog +57 -0
  103. data/templates/cli/notifiers/flowdock +16 -0
  104. data/templates/cli/notifiers/hipchat +16 -0
  105. data/templates/cli/notifiers/http_post +32 -0
  106. data/templates/cli/notifiers/mail +24 -0
  107. data/templates/cli/notifiers/nagios +13 -0
  108. data/templates/cli/notifiers/pagerduty +12 -0
  109. data/templates/cli/notifiers/prowl +11 -0
  110. data/templates/cli/notifiers/pushover +11 -0
  111. data/templates/cli/notifiers/ses +15 -0
  112. data/templates/cli/notifiers/slack +22 -0
  113. data/templates/cli/notifiers/twitter +13 -0
  114. data/templates/cli/splitter +7 -0
  115. data/templates/cli/storages/cloud_files +11 -0
  116. data/templates/cli/storages/dropbox +20 -0
  117. data/templates/cli/storages/ftp +13 -0
  118. data/templates/cli/storages/local +8 -0
  119. data/templates/cli/storages/qiniu +12 -0
  120. data/templates/cli/storages/rsync +17 -0
  121. data/templates/cli/storages/s3 +16 -0
  122. data/templates/cli/storages/scp +15 -0
  123. data/templates/cli/storages/sftp +15 -0
  124. data/templates/cli/syncers/cloud_files +22 -0
  125. data/templates/cli/syncers/rsync_local +20 -0
  126. data/templates/cli/syncers/rsync_pull +28 -0
  127. data/templates/cli/syncers/rsync_push +28 -0
  128. data/templates/cli/syncers/s3 +27 -0
  129. data/templates/general/links +3 -0
  130. data/templates/general/version.erb +2 -0
  131. data/templates/notifier/mail/failure.erb +16 -0
  132. data/templates/notifier/mail/success.erb +16 -0
  133. data/templates/notifier/mail/warning.erb +16 -0
  134. data/templates/storage/dropbox/authorization_url.erb +6 -0
  135. data/templates/storage/dropbox/authorized.erb +4 -0
  136. data/templates/storage/dropbox/cache_file_written.erb +10 -0
  137. metadata +506 -0
@@ -0,0 +1,253 @@
1
+ require "backup/cloud_io/base"
2
+ require "ext_fog_aws"
3
+ require "digest/md5"
4
+ require "base64"
5
+ require "stringio"
6
+
7
+ module Backup
8
+ module CloudIO
9
+ class S3 < Base
10
+ class Error < Backup::Error; end
11
+
12
+ MAX_FILE_SIZE = 1024**3 * 5 # 5 GiB
13
+ MAX_MULTIPART_SIZE = 1024**4 * 5 # 5 TiB
14
+
15
+ attr_reader :access_key_id, :secret_access_key, :use_iam_profile,
16
+ :region, :bucket, :chunk_size, :encryption, :storage_class,
17
+ :fog_options
18
+
19
+ def initialize(options = {})
20
+ super
21
+
22
+ @access_key_id = options[:access_key_id]
23
+ @secret_access_key = options[:secret_access_key]
24
+ @use_iam_profile = options[:use_iam_profile]
25
+ @region = options[:region]
26
+ @bucket = options[:bucket]
27
+ @chunk_size = options[:chunk_size]
28
+ @encryption = options[:encryption]
29
+ @storage_class = options[:storage_class]
30
+ @fog_options = options[:fog_options]
31
+ end
32
+
33
+ # The Syncer may call this method in multiple threads.
34
+ # However, #objects is always called prior to multithreading.
35
+ def upload(src, dest)
36
+ file_size = File.size(src)
37
+ chunk_bytes = chunk_size * 1024**2
38
+ if chunk_bytes > 0 && file_size > chunk_bytes
39
+ raise FileSizeError, <<-EOS if file_size > MAX_MULTIPART_SIZE
40
+ File Too Large
41
+ File: #{src}
42
+ Size: #{file_size}
43
+ Max Multipart Upload Size is #{MAX_MULTIPART_SIZE} (5 TiB)
44
+ EOS
45
+
46
+ chunk_bytes = adjusted_chunk_bytes(chunk_bytes, file_size)
47
+ upload_id = initiate_multipart(dest)
48
+ parts = upload_parts(src, dest, upload_id, chunk_bytes, file_size)
49
+ complete_multipart(dest, upload_id, parts)
50
+ else
51
+ raise FileSizeError, <<-EOS if file_size > MAX_FILE_SIZE
52
+ File Too Large
53
+ File: #{src}
54
+ Size: #{file_size}
55
+ Max File Size is #{MAX_FILE_SIZE} (5 GiB)
56
+ EOS
57
+
58
+ put_object(src, dest)
59
+ end
60
+ end
61
+
62
+ # Returns all objects in the bucket with the given prefix.
63
+ #
64
+ # - #get_bucket returns a max of 1000 objects per request.
65
+ # - Returns objects in alphabetical order.
66
+ # - If marker is given, only objects after the marker are in the response.
67
+ def objects(prefix)
68
+ objects = []
69
+ resp = nil
70
+ prefix = prefix.chomp("/")
71
+ opts = { "prefix" => prefix + "/" }
72
+
73
+ while resp.nil? || resp.body["IsTruncated"]
74
+ opts["marker"] = objects.last.key unless objects.empty?
75
+ with_retries("GET '#{bucket}/#{prefix}/*'") do
76
+ resp = connection.get_bucket(bucket, opts)
77
+ end
78
+ resp.body["Contents"].each do |obj_data|
79
+ objects << Object.new(self, obj_data)
80
+ end
81
+ end
82
+
83
+ objects
84
+ end
85
+
86
+ # Used by Object to fetch metadata if needed.
87
+ def head_object(object)
88
+ resp = nil
89
+ with_retries("HEAD '#{bucket}/#{object.key}'") do
90
+ resp = connection.head_object(bucket, object.key)
91
+ end
92
+ resp
93
+ end
94
+
95
+ # Delete object(s) from the bucket.
96
+ #
97
+ # - Called by the Storage (with objects) and the Syncer (with keys)
98
+ # - Deletes 1000 objects per request.
99
+ # - Missing objects will be ignored.
100
+ def delete(objects_or_keys)
101
+ keys = Array(objects_or_keys).dup
102
+ keys.map!(&:key) if keys.first.is_a?(Object)
103
+
104
+ opts = { quiet: true } # only report Errors in DeleteResult
105
+ until keys.empty?
106
+ keys_partial = keys.slice!(0, 1000)
107
+ with_retries("DELETE Multiple Objects") do
108
+ resp = connection.delete_multiple_objects(bucket, keys_partial, opts.dup)
109
+ unless resp.body["DeleteResult"].empty?
110
+ errors = resp.body["DeleteResult"].map do |result|
111
+ error = result["Error"]
112
+ "Failed to delete: #{error["Key"]}\n" \
113
+ "Reason: #{error["Code"]}: #{error["Message"]}"
114
+ end.join("\n")
115
+ raise Error, "The server returned the following:\n#{errors}"
116
+ end
117
+ end
118
+ end
119
+ end
120
+
121
+ private
122
+
123
+ def connection
124
+ @connection ||=
125
+ begin
126
+ opts = { provider: "AWS", region: region }
127
+ if use_iam_profile
128
+ opts[:use_iam_profile] = true
129
+ else
130
+ opts[:aws_access_key_id] = access_key_id
131
+ opts[:aws_secret_access_key] = secret_access_key
132
+ end
133
+ opts.merge!(fog_options || {})
134
+ conn = Fog::Storage.new(opts)
135
+ conn.sync_clock
136
+ conn
137
+ end
138
+ end
139
+
140
+ def put_object(src, dest)
141
+ md5 = Base64.encode64(Digest::MD5.file(src).digest).chomp
142
+ options = headers.merge("Content-MD5" => md5)
143
+ with_retries("PUT '#{bucket}/#{dest}'") do
144
+ File.open(src, "r") do |file|
145
+ connection.put_object(bucket, dest, file, options)
146
+ end
147
+ end
148
+ end
149
+
150
+ def initiate_multipart(dest)
151
+ Logger.info "\s\sInitiate Multipart '#{bucket}/#{dest}'"
152
+
153
+ resp = nil
154
+ with_retries("POST '#{bucket}/#{dest}' (Initiate)") do
155
+ resp = connection.initiate_multipart_upload(bucket, dest, headers)
156
+ end
157
+ resp.body["UploadId"]
158
+ end
159
+
160
+ # Each part's MD5 is sent to verify the transfer.
161
+ # AWS will concatenate all parts into a single object
162
+ # once the multipart upload is completed.
163
+ def upload_parts(src, dest, upload_id, chunk_bytes, file_size)
164
+ total_parts = (file_size / chunk_bytes.to_f).ceil
165
+ progress = (0.1..0.9).step(0.1).map { |n| (total_parts * n).floor }
166
+ Logger.info "\s\sUploading #{total_parts} Parts..."
167
+
168
+ parts = []
169
+ File.open(src, "r") do |file|
170
+ part_number = 0
171
+ while data = file.read(chunk_bytes)
172
+ part_number += 1
173
+ md5 = Base64.encode64(Digest::MD5.digest(data)).chomp
174
+
175
+ with_retries("PUT '#{bucket}/#{dest}' Part ##{part_number}") do
176
+ resp = connection.upload_part(
177
+ bucket, dest, upload_id, part_number, StringIO.new(data),
178
+ "Content-MD5" => md5
179
+ )
180
+ parts << resp.headers["ETag"]
181
+ end
182
+
183
+ if i = progress.rindex(part_number)
184
+ Logger.info "\s\s...#{i + 1}0% Complete..."
185
+ end
186
+ end
187
+ end
188
+ parts
189
+ end
190
+
191
+ def complete_multipart(dest, upload_id, parts)
192
+ Logger.info "\s\sComplete Multipart '#{bucket}/#{dest}'"
193
+
194
+ with_retries("POST '#{bucket}/#{dest}' (Complete)") do
195
+ resp = connection.complete_multipart_upload(bucket, dest, upload_id, parts)
196
+ raise Error, <<-EOS if resp.body["Code"]
197
+ The server returned the following error:
198
+ #{resp.body["Code"]}: #{resp.body["Message"]}
199
+ EOS
200
+ end
201
+ end
202
+
203
+ def headers
204
+ headers = {}
205
+
206
+ enc = encryption.to_s.upcase
207
+ headers["x-amz-server-side-encryption"] = enc unless enc.empty?
208
+
209
+ sc = storage_class.to_s.upcase
210
+ headers["x-amz-storage-class"] = sc unless sc.empty? || sc == "STANDARD"
211
+
212
+ headers
213
+ end
214
+
215
+ def adjusted_chunk_bytes(chunk_bytes, file_size)
216
+ return chunk_bytes if file_size / chunk_bytes.to_f <= 10_000
217
+
218
+ mb = orig_mb = chunk_bytes / 1024**2
219
+ mb += 1 until file_size / (1024**2 * mb).to_f <= 10_000
220
+ Logger.warn Error.new(<<-EOS)
221
+ Chunk Size Adjusted
222
+ Your original #chunk_size of #{orig_mb} MiB has been adjusted
223
+ to #{mb} MiB in order to satisfy the limit of 10,000 chunks.
224
+ To enforce your chosen #chunk_size, you should use the Splitter.
225
+ e.g. split_into_chunks_of #{mb * 10_000} (#chunk_size * 10_000)
226
+ EOS
227
+ 1024**2 * mb
228
+ end
229
+
230
+ class Object
231
+ attr_reader :key, :etag, :storage_class
232
+
233
+ def initialize(cloud_io, data)
234
+ @cloud_io = cloud_io
235
+ @key = data["Key"]
236
+ @etag = data["ETag"]
237
+ @storage_class = data["StorageClass"]
238
+ end
239
+
240
+ # currently 'AES256' or nil
241
+ def encryption
242
+ metadata["x-amz-server-side-encryption"]
243
+ end
244
+
245
+ private
246
+
247
+ def metadata
248
+ @metadata ||= @cloud_io.head_object(self).headers
249
+ end
250
+ end
251
+ end
252
+ end
253
+ end
@@ -0,0 +1,32 @@
1
+ module Backup
2
+ module Compressor
3
+ class Base
4
+ include Utilities::Helpers
5
+ include Config::Helpers
6
+
7
+ ##
8
+ # Yields to the block the compressor command and filename extension.
9
+ def compress_with
10
+ log!
11
+ yield @cmd, @ext
12
+ end
13
+
14
+ private
15
+
16
+ ##
17
+ # Return the compressor name, with Backup namespace removed
18
+ def compressor_name
19
+ self.class.to_s.sub("Backup::", "")
20
+ end
21
+
22
+ ##
23
+ # Logs a message to the console and log file to inform
24
+ # the client that Backup is using the compressor
25
+ def log!
26
+ Logger.info "Using #{compressor_name} for compression.\n" \
27
+ " Command: '#{@cmd}'\n" \
28
+ " Ext: '#{@ext}'"
29
+ end
30
+ end
31
+ end
32
+ end
@@ -0,0 +1,35 @@
1
+ module Backup
2
+ module Compressor
3
+ class Bzip2 < Base
4
+ ##
5
+ # Specify the level of compression to use.
6
+ #
7
+ # Values should be a single digit from 1 to 9.
8
+ # Note that setting the level to either extreme may or may not
9
+ # give the desired result. Be sure to check the documentation
10
+ # for the compressor being used.
11
+ #
12
+ # The default `level` is 9.
13
+ attr_accessor :level
14
+
15
+ ##
16
+ # Creates a new instance of Backup::Compressor::Bzip2
17
+ def initialize(&block)
18
+ load_defaults!
19
+
20
+ @level ||= false
21
+
22
+ instance_eval(&block) if block_given?
23
+
24
+ @cmd = "#{utility(:bzip2)}#{options}"
25
+ @ext = ".bz2"
26
+ end
27
+
28
+ private
29
+
30
+ def options
31
+ " -#{@level}" if @level
32
+ end
33
+ end
34
+ end
35
+ end
@@ -0,0 +1,49 @@
1
+ module Backup
2
+ module Compressor
3
+ class Custom < Base
4
+ ##
5
+ # Specify the system command to invoke a compressor,
6
+ # including any command-line arguments.
7
+ # e.g. @compressor.command = 'pbzip2 -p2 -4'
8
+ #
9
+ # The data to be compressed will be piped to the command's STDIN,
10
+ # and it should write the compressed data to STDOUT.
11
+ # i.e. `cat file.tar | %command% > file.tar.%extension%`
12
+ attr_accessor :command
13
+
14
+ ##
15
+ # File extension to append to the compressed file's filename.
16
+ # e.g. @compressor.extension = '.bz2'
17
+ attr_accessor :extension
18
+
19
+ ##
20
+ # Initializes a new custom compressor.
21
+ def initialize(&block)
22
+ load_defaults!
23
+
24
+ instance_eval(&block) if block_given?
25
+
26
+ @cmd = set_cmd
27
+ @ext = set_ext
28
+ end
29
+
30
+ private
31
+
32
+ ##
33
+ # Return the command line using the full path.
34
+ # Ensures the command exists and is executable.
35
+ def set_cmd
36
+ parts = @command.to_s.split(" ")
37
+ parts[0] = utility(parts[0])
38
+ parts.join(" ")
39
+ end
40
+
41
+ ##
42
+ # Return the extension given without whitespace.
43
+ # If extension was not set, return an empty string
44
+ def set_ext
45
+ @extension.to_s.strip
46
+ end
47
+ end
48
+ end
49
+ end
@@ -0,0 +1,73 @@
1
+ module Backup
2
+ module Compressor
3
+ class Gzip < Base
4
+ class Error < Backup::Error; end
5
+ extend Utilities::Helpers
6
+
7
+ ##
8
+ # Specify the level of compression to use.
9
+ #
10
+ # Values should be a single digit from 1 to 9.
11
+ # Note that setting the level to either extreme may or may not
12
+ # give the desired result. Be sure to check the documentation
13
+ # for the compressor being used.
14
+ #
15
+ # The default `level` is 6.
16
+ attr_accessor :level
17
+
18
+ ##
19
+ # Use the `--rsyncable` option with `gzip`.
20
+ #
21
+ # This option directs `gzip` to compress data using an algorithm that
22
+ # allows `rsync` to efficiently detect changes. This is especially useful
23
+ # when used to compress `Archive` or `Database` backups that will be
24
+ # stored using Backup's `RSync` Storage option.
25
+ #
26
+ # The `--rsyncable` option is only available on patched versions of `gzip`.
27
+ # While most distributions apply this patch, this option may not be
28
+ # available on your system. If it's not available, Backup will log a
29
+ # warning and continue to use the compressor without this option.
30
+ attr_accessor :rsyncable
31
+
32
+ ##
33
+ # Determine if +--rsyncable+ is supported and cache the result.
34
+ def self.has_rsyncable?
35
+ return @has_rsyncable unless @has_rsyncable.nil?
36
+ cmd = "#{utility(:gzip)} --rsyncable --version >/dev/null 2>&1; echo $?"
37
+ @has_rsyncable = `#{cmd}`.chomp == "0"
38
+ end
39
+
40
+ ##
41
+ # Creates a new instance of Backup::Compressor::Gzip
42
+ def initialize(&block)
43
+ load_defaults!
44
+
45
+ @level ||= false
46
+ @rsyncable ||= false
47
+
48
+ instance_eval(&block) if block_given?
49
+
50
+ @cmd = "#{utility(:gzip)}#{options}"
51
+ @ext = ".gz"
52
+ end
53
+
54
+ private
55
+
56
+ def options
57
+ opts = ""
58
+ opts << " -#{@level}" if @level
59
+ if @rsyncable
60
+ if self.class.has_rsyncable?
61
+ opts << " --rsyncable"
62
+ else
63
+ Logger.warn Error.new(<<-EOS)
64
+ 'rsyncable' option ignored.
65
+ Your system's 'gzip' does not support the `--rsyncable` option.
66
+ EOS
67
+ end
68
+ end
69
+ opts
70
+ end
71
+ end
72
+ end
73
+ end
@@ -0,0 +1,128 @@
1
+ require "backup/config/dsl"
2
+ require "backup/config/helpers"
3
+
4
+ module Backup
5
+ module Config
6
+ class Error < Backup::Error; end
7
+
8
+ DEFAULTS = {
9
+ config_file: "config.rb",
10
+ data_path: ".data",
11
+ tmp_path: ".tmp"
12
+ }
13
+
14
+ GEM_NAME = /\w+/
15
+ CONFIG_FILE = /[\w\/\.]+/
16
+
17
+ class << self
18
+ include Utilities::Helpers
19
+
20
+ attr_reader :user, :root_path, :data_path, :tmp_path
21
+ attr_accessor :config_file
22
+
23
+ # Loads the user's +config.rb+ and all model files.
24
+ def load(options = {})
25
+ update(options) # from the command line
26
+
27
+ unless File.exist?(config_file)
28
+ raise Error, "Could not find configuration file: '#{config_file}'."
29
+ end
30
+
31
+ config = File.read(config_file)
32
+ version = Backup::VERSION.split(".").first
33
+ models = File.join(File.dirname(config_file), "models", "*.rb")
34
+ if config =~ /^# Backup v#{ version }\.x Configuration\[(#{GEM_NAME})\]\[(#{CONFIG_FILE})\]$/
35
+ gem_name, self.config_file = $1, $2
36
+ spec = Bundler.load.specs.find{ |s| s.name == gem_name }
37
+ self.config_file = File.join(spec.full_gem_path, config_file)
38
+ config = File.read(config_file)
39
+ elsif !(config =~ /^# Backup v#{ version }\.x Configuration$/)
40
+ raise Error, <<-EOS
41
+ Invalid Configuration File
42
+ The configuration file at '#{config_file}'
43
+ does not appear to be a Backup v#{version}.x configuration file.
44
+ If you have upgraded to v#{version}.x from a previous version,
45
+ you need to upgrade your configuration file.
46
+ Please see the instructions for upgrading in the Backup documentation.
47
+ EOS
48
+ end
49
+
50
+ dsl = DSL.new
51
+ dsl.instance_eval(config, config_file)
52
+
53
+ update(dsl._config_options) # from config.rb
54
+ update(options) # command line takes precedence
55
+
56
+ Dir[models].each do |model|
57
+ dsl.instance_eval(File.read(model), model)
58
+ end
59
+ end
60
+
61
+ def hostname
62
+ @hostname ||= run(utility(:hostname))
63
+ end
64
+
65
+ private
66
+
67
+ # If :root_path is set in the options, all paths will be updated.
68
+ # Otherwise, only the paths given will be updated.
69
+ def update(options = {})
70
+ root_path = options[:root_path].to_s.strip
71
+ new_root = root_path.empty? ? false : set_root_path(root_path)
72
+
73
+ DEFAULTS.each do |name, ending|
74
+ set_path_variable(name, options[name], ending, new_root)
75
+ end
76
+ end
77
+
78
+ # Sets the @root_path to the given +path+ and returns it.
79
+ # Raises an error if the given +path+ does not exist.
80
+ def set_root_path(path)
81
+ # allows #reset! to set the default @root_path,
82
+ # then use #update to set all other paths,
83
+ # without requiring that @root_path exist.
84
+ return @root_path if path == @root_path
85
+
86
+ path = File.expand_path(path)
87
+ unless File.directory?(path)
88
+ raise Error, <<-EOS
89
+ Root Path Not Found
90
+ When specifying a --root-path, the path must exist.
91
+ Path was: #{path}
92
+ EOS
93
+ end
94
+ @root_path = path
95
+ end
96
+
97
+ def set_path_variable(name, path, ending, root_path)
98
+ # strip any trailing '/' in case the user supplied this as part of
99
+ # an absolute path, so we can match it against File.expand_path()
100
+ path = path.to_s.sub(/\/\s*$/, "").lstrip
101
+ new_path = false
102
+ # If no path is given, the variable will not be set/updated
103
+ # unless a root_path was given. In which case the value will
104
+ # be updated with our default ending.
105
+ if path.empty?
106
+ new_path = File.join(root_path, ending) if root_path
107
+ else
108
+ # When a path is given, the variable will be set/updated.
109
+ # If the path is relative, it will be joined with root_path (if given),
110
+ # or expanded relative to PWD.
111
+ new_path = File.expand_path(path)
112
+ unless path == new_path
113
+ new_path = File.join(root_path, path) if root_path
114
+ end
115
+ end
116
+ instance_variable_set(:"@#{name}", new_path) if new_path
117
+ end
118
+
119
+ def reset!
120
+ @user = ENV["USER"] || Etc.getpwuid.name
121
+ @root_path = File.join(File.expand_path(ENV["HOME"] || ""), "Backup")
122
+ update(root_path: @root_path)
123
+ end
124
+ end
125
+
126
+ reset! # set defaults on load
127
+ end
128
+ end