cm-backup 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (133) hide show
  1. checksums.yaml +7 -0
  2. data/README.md +20 -0
  3. data/bin/backup +5 -0
  4. data/lib/backup.rb +144 -0
  5. data/lib/backup/archive.rb +170 -0
  6. data/lib/backup/binder.rb +22 -0
  7. data/lib/backup/cleaner.rb +116 -0
  8. data/lib/backup/cli.rb +374 -0
  9. data/lib/backup/cloud_io/base.rb +41 -0
  10. data/lib/backup/cloud_io/cloud_files.rb +298 -0
  11. data/lib/backup/cloud_io/s3.rb +260 -0
  12. data/lib/backup/compressor/base.rb +35 -0
  13. data/lib/backup/compressor/bzip2.rb +39 -0
  14. data/lib/backup/compressor/custom.rb +53 -0
  15. data/lib/backup/compressor/gzip.rb +74 -0
  16. data/lib/backup/config.rb +119 -0
  17. data/lib/backup/config/dsl.rb +103 -0
  18. data/lib/backup/config/helpers.rb +143 -0
  19. data/lib/backup/database/base.rb +85 -0
  20. data/lib/backup/database/mongodb.rb +187 -0
  21. data/lib/backup/database/mysql.rb +192 -0
  22. data/lib/backup/database/openldap.rb +95 -0
  23. data/lib/backup/database/postgresql.rb +133 -0
  24. data/lib/backup/database/redis.rb +179 -0
  25. data/lib/backup/database/riak.rb +82 -0
  26. data/lib/backup/database/sqlite.rb +57 -0
  27. data/lib/backup/encryptor/base.rb +29 -0
  28. data/lib/backup/encryptor/gpg.rb +747 -0
  29. data/lib/backup/encryptor/open_ssl.rb +77 -0
  30. data/lib/backup/errors.rb +58 -0
  31. data/lib/backup/logger.rb +199 -0
  32. data/lib/backup/logger/console.rb +51 -0
  33. data/lib/backup/logger/fog_adapter.rb +29 -0
  34. data/lib/backup/logger/logfile.rb +133 -0
  35. data/lib/backup/logger/syslog.rb +116 -0
  36. data/lib/backup/model.rb +479 -0
  37. data/lib/backup/notifier/base.rb +128 -0
  38. data/lib/backup/notifier/campfire.rb +63 -0
  39. data/lib/backup/notifier/command.rb +102 -0
  40. data/lib/backup/notifier/datadog.rb +107 -0
  41. data/lib/backup/notifier/flowdock.rb +103 -0
  42. data/lib/backup/notifier/hipchat.rb +118 -0
  43. data/lib/backup/notifier/http_post.rb +117 -0
  44. data/lib/backup/notifier/mail.rb +249 -0
  45. data/lib/backup/notifier/nagios.rb +69 -0
  46. data/lib/backup/notifier/pagerduty.rb +81 -0
  47. data/lib/backup/notifier/prowl.rb +68 -0
  48. data/lib/backup/notifier/pushover.rb +74 -0
  49. data/lib/backup/notifier/ses.rb +105 -0
  50. data/lib/backup/notifier/slack.rb +148 -0
  51. data/lib/backup/notifier/twitter.rb +58 -0
  52. data/lib/backup/notifier/zabbix.rb +63 -0
  53. data/lib/backup/package.rb +55 -0
  54. data/lib/backup/packager.rb +107 -0
  55. data/lib/backup/pipeline.rb +124 -0
  56. data/lib/backup/splitter.rb +76 -0
  57. data/lib/backup/storage/base.rb +69 -0
  58. data/lib/backup/storage/cloud_files.rb +158 -0
  59. data/lib/backup/storage/cycler.rb +75 -0
  60. data/lib/backup/storage/dropbox.rb +212 -0
  61. data/lib/backup/storage/ftp.rb +112 -0
  62. data/lib/backup/storage/local.rb +64 -0
  63. data/lib/backup/storage/qiniu.rb +65 -0
  64. data/lib/backup/storage/rsync.rb +248 -0
  65. data/lib/backup/storage/s3.rb +156 -0
  66. data/lib/backup/storage/scp.rb +67 -0
  67. data/lib/backup/storage/sftp.rb +82 -0
  68. data/lib/backup/syncer/base.rb +70 -0
  69. data/lib/backup/syncer/cloud/base.rb +179 -0
  70. data/lib/backup/syncer/cloud/cloud_files.rb +83 -0
  71. data/lib/backup/syncer/cloud/local_file.rb +100 -0
  72. data/lib/backup/syncer/cloud/s3.rb +110 -0
  73. data/lib/backup/syncer/rsync/base.rb +54 -0
  74. data/lib/backup/syncer/rsync/local.rb +31 -0
  75. data/lib/backup/syncer/rsync/pull.rb +51 -0
  76. data/lib/backup/syncer/rsync/push.rb +205 -0
  77. data/lib/backup/template.rb +46 -0
  78. data/lib/backup/utilities.rb +224 -0
  79. data/lib/backup/version.rb +5 -0
  80. data/templates/cli/archive +28 -0
  81. data/templates/cli/compressor/bzip2 +4 -0
  82. data/templates/cli/compressor/custom +7 -0
  83. data/templates/cli/compressor/gzip +4 -0
  84. data/templates/cli/config +123 -0
  85. data/templates/cli/databases/mongodb +15 -0
  86. data/templates/cli/databases/mysql +18 -0
  87. data/templates/cli/databases/openldap +24 -0
  88. data/templates/cli/databases/postgresql +16 -0
  89. data/templates/cli/databases/redis +16 -0
  90. data/templates/cli/databases/riak +17 -0
  91. data/templates/cli/databases/sqlite +11 -0
  92. data/templates/cli/encryptor/gpg +27 -0
  93. data/templates/cli/encryptor/openssl +9 -0
  94. data/templates/cli/model +26 -0
  95. data/templates/cli/notifier/zabbix +15 -0
  96. data/templates/cli/notifiers/campfire +12 -0
  97. data/templates/cli/notifiers/command +32 -0
  98. data/templates/cli/notifiers/datadog +57 -0
  99. data/templates/cli/notifiers/flowdock +16 -0
  100. data/templates/cli/notifiers/hipchat +16 -0
  101. data/templates/cli/notifiers/http_post +32 -0
  102. data/templates/cli/notifiers/mail +24 -0
  103. data/templates/cli/notifiers/nagios +13 -0
  104. data/templates/cli/notifiers/pagerduty +12 -0
  105. data/templates/cli/notifiers/prowl +11 -0
  106. data/templates/cli/notifiers/pushover +11 -0
  107. data/templates/cli/notifiers/ses +15 -0
  108. data/templates/cli/notifiers/slack +22 -0
  109. data/templates/cli/notifiers/twitter +13 -0
  110. data/templates/cli/splitter +7 -0
  111. data/templates/cli/storages/cloud_files +11 -0
  112. data/templates/cli/storages/dropbox +20 -0
  113. data/templates/cli/storages/ftp +13 -0
  114. data/templates/cli/storages/local +8 -0
  115. data/templates/cli/storages/qiniu +12 -0
  116. data/templates/cli/storages/rsync +17 -0
  117. data/templates/cli/storages/s3 +16 -0
  118. data/templates/cli/storages/scp +15 -0
  119. data/templates/cli/storages/sftp +15 -0
  120. data/templates/cli/syncers/cloud_files +22 -0
  121. data/templates/cli/syncers/rsync_local +20 -0
  122. data/templates/cli/syncers/rsync_pull +28 -0
  123. data/templates/cli/syncers/rsync_push +28 -0
  124. data/templates/cli/syncers/s3 +27 -0
  125. data/templates/general/links +3 -0
  126. data/templates/general/version.erb +2 -0
  127. data/templates/notifier/mail/failure.erb +16 -0
  128. data/templates/notifier/mail/success.erb +16 -0
  129. data/templates/notifier/mail/warning.erb +16 -0
  130. data/templates/storage/dropbox/authorization_url.erb +6 -0
  131. data/templates/storage/dropbox/authorized.erb +4 -0
  132. data/templates/storage/dropbox/cache_file_written.erb +10 -0
  133. metadata +1077 -0
@@ -0,0 +1,260 @@
1
+ # encoding: utf-8
2
+ require 'backup/cloud_io/base'
3
+ require 'fog'
4
+ require 'digest/md5'
5
+ require 'base64'
6
+ require 'stringio'
7
+
8
+ module Backup
9
+ module CloudIO
10
+ class S3 < Base
11
+ class Error < Backup::Error; end
12
+
13
+ MAX_FILE_SIZE = 1024**3 * 5 # 5 GiB
14
+ MAX_MULTIPART_SIZE = 1024**4 * 5 # 5 TiB
15
+
16
+ attr_reader :access_key_id, :secret_access_key, :use_iam_profile,
17
+ :region, :bucket, :chunk_size, :encryption, :storage_class,
18
+ :fog_options
19
+
20
+ def initialize(options = {})
21
+ super
22
+
23
+ @access_key_id = options[:access_key_id]
24
+ @secret_access_key = options[:secret_access_key]
25
+ @use_iam_profile = options[:use_iam_profile]
26
+ @region = options[:region]
27
+ @bucket = options[:bucket]
28
+ @chunk_size = options[:chunk_size]
29
+ @encryption = options[:encryption]
30
+ @storage_class = options[:storage_class]
31
+ @fog_options = options[:fog_options]
32
+ end
33
+
34
+ # The Syncer may call this method in multiple threads.
35
+ # However, #objects is always called prior to multithreading.
36
+ def upload(src, dest)
37
+ file_size = File.size(src)
38
+ chunk_bytes = chunk_size * 1024**2
39
+ if chunk_bytes > 0 && file_size > chunk_bytes
40
+ raise FileSizeError, <<-EOS if file_size > MAX_MULTIPART_SIZE
41
+ File Too Large
42
+ File: #{ src }
43
+ Size: #{ file_size }
44
+ Max Multipart Upload Size is #{ MAX_MULTIPART_SIZE } (5 TiB)
45
+ EOS
46
+
47
+ chunk_bytes = adjusted_chunk_bytes(chunk_bytes, file_size)
48
+ upload_id = initiate_multipart(dest)
49
+ parts = upload_parts(src, dest, upload_id, chunk_bytes, file_size)
50
+ complete_multipart(dest, upload_id, parts)
51
+ else
52
+ raise FileSizeError, <<-EOS if file_size > MAX_FILE_SIZE
53
+ File Too Large
54
+ File: #{ src }
55
+ Size: #{ file_size }
56
+ Max File Size is #{ MAX_FILE_SIZE } (5 GiB)
57
+ EOS
58
+
59
+ put_object(src, dest)
60
+ end
61
+ end
62
+
63
+ # Returns all objects in the bucket with the given prefix.
64
+ #
65
+ # - #get_bucket returns a max of 1000 objects per request.
66
+ # - Returns objects in alphabetical order.
67
+ # - If marker is given, only objects after the marker are in the response.
68
+ def objects(prefix)
69
+ objects = []
70
+ resp = nil
71
+ prefix = prefix.chomp('/')
72
+ opts = { 'prefix' => prefix + '/' }
73
+
74
+ while resp.nil? || resp.body['IsTruncated']
75
+ opts.merge!('marker' => objects.last.key) unless objects.empty?
76
+ with_retries("GET '#{ bucket }/#{ prefix }/*'") do
77
+ resp = connection.get_bucket(bucket, opts)
78
+ end
79
+ resp.body['Contents'].each do |obj_data|
80
+ objects << Object.new(self, obj_data)
81
+ end
82
+ end
83
+
84
+ objects
85
+ end
86
+
87
+ # Used by Object to fetch metadata if needed.
88
+ def head_object(object)
89
+ resp = nil
90
+ with_retries("HEAD '#{ bucket }/#{ object.key }'") do
91
+ resp = connection.head_object(bucket, object.key)
92
+ end
93
+ resp
94
+ end
95
+
96
+ # Delete object(s) from the bucket.
97
+ #
98
+ # - Called by the Storage (with objects) and the Syncer (with keys)
99
+ # - Deletes 1000 objects per request.
100
+ # - Missing objects will be ignored.
101
+ def delete(objects_or_keys)
102
+ keys = Array(objects_or_keys).dup
103
+ keys.map!(&:key) if keys.first.is_a?(Object)
104
+
105
+ opts = { :quiet => true } # only report Errors in DeleteResult
106
+ until keys.empty?
107
+ _keys = keys.slice!(0, 1000)
108
+ with_retries('DELETE Multiple Objects') do
109
+ resp = connection.delete_multiple_objects(bucket, _keys, opts.dup)
110
+ unless resp.body['DeleteResult'].empty?
111
+ errors = resp.body['DeleteResult'].map do |result|
112
+ error = result['Error']
113
+ "Failed to delete: #{ error['Key'] }\n" +
114
+ "Reason: #{ error['Code'] }: #{ error['Message'] }"
115
+ end.join("\n")
116
+ raise Error, "The server returned the following:\n#{ errors }"
117
+ end
118
+ end
119
+ end
120
+ end
121
+
122
+ private
123
+
124
+ def connection
125
+ @connection ||= begin
126
+ opts = { :provider => 'AWS', :region => region }
127
+ if use_iam_profile
128
+ opts.merge!(:use_iam_profile => true)
129
+ else
130
+ opts.merge!(
131
+ :aws_access_key_id => access_key_id,
132
+ :aws_secret_access_key => secret_access_key
133
+ )
134
+ end
135
+ opts.merge!(fog_options || {})
136
+ conn = Fog::Storage.new(opts)
137
+ conn.sync_clock
138
+ conn
139
+ end
140
+ end
141
+
142
+ def put_object(src, dest)
143
+ md5 = Base64.encode64(Digest::MD5.file(src).digest).chomp
144
+ options = headers.merge('Content-MD5' => md5)
145
+ with_retries("PUT '#{ bucket }/#{ dest }'") do
146
+ File.open(src, 'r') do |file|
147
+ connection.put_object(bucket, dest, file, options)
148
+ end
149
+ end
150
+ end
151
+
152
+ def initiate_multipart(dest)
153
+ Logger.info "\s\sInitiate Multipart '#{ bucket }/#{ dest }'"
154
+
155
+ resp = nil
156
+ with_retries("POST '#{ bucket }/#{ dest }' (Initiate)") do
157
+ resp = connection.initiate_multipart_upload(bucket, dest, headers)
158
+ end
159
+ resp.body['UploadId']
160
+ end
161
+
162
+ # Each part's MD5 is sent to verify the transfer.
163
+ # AWS will concatenate all parts into a single object
164
+ # once the multipart upload is completed.
165
+ def upload_parts(src, dest, upload_id, chunk_bytes, file_size)
166
+ total_parts = (file_size / chunk_bytes.to_f).ceil
167
+ progress = (0.1..0.9).step(0.1).map {|n| (total_parts * n).floor }
168
+ Logger.info "\s\sUploading #{ total_parts } Parts..."
169
+
170
+ parts = []
171
+ File.open(src, 'r') do |file|
172
+ part_number = 0
173
+ while data = file.read(chunk_bytes)
174
+ part_number += 1
175
+ md5 = Base64.encode64(Digest::MD5.digest(data)).chomp
176
+
177
+ with_retries("PUT '#{ bucket }/#{ dest }' Part ##{ part_number }") do
178
+ resp = connection.upload_part(
179
+ bucket, dest, upload_id, part_number, StringIO.new(data),
180
+ { 'Content-MD5' => md5 }
181
+ )
182
+ parts << resp.headers['ETag']
183
+ end
184
+
185
+ if i = progress.rindex(part_number)
186
+ Logger.info "\s\s...#{ i + 1 }0% Complete..."
187
+ end
188
+ end
189
+ end
190
+ parts
191
+ end
192
+
193
+ def complete_multipart(dest, upload_id, parts)
194
+ Logger.info "\s\sComplete Multipart '#{ bucket }/#{ dest }'"
195
+
196
+ with_retries("POST '#{ bucket }/#{ dest }' (Complete)") do
197
+ resp = connection.complete_multipart_upload(bucket, dest, upload_id, parts)
198
+ raise Error, <<-EOS if resp.body['Code']
199
+ The server returned the following error:
200
+ #{ resp.body['Code'] }: #{ resp.body['Message'] }
201
+ EOS
202
+ end
203
+ end
204
+
205
+ def headers
206
+ headers = {}
207
+
208
+ enc = encryption.to_s.upcase
209
+ headers.merge!(
210
+ { 'x-amz-server-side-encryption' => enc}
211
+ ) unless enc.empty?
212
+
213
+ sc = storage_class.to_s.upcase
214
+ headers.merge!(
215
+ { 'x-amz-storage-class' => sc }
216
+ ) unless sc.empty? || sc == 'STANDARD'
217
+
218
+ headers
219
+ end
220
+
221
+ def adjusted_chunk_bytes(chunk_bytes, file_size)
222
+ return chunk_bytes if file_size / chunk_bytes.to_f <= 10_000
223
+
224
+ mb = orig_mb = chunk_bytes / 1024**2
225
+ mb += 1 until file_size / (1024**2 * mb).to_f <= 10_000
226
+ Logger.warn Error.new(<<-EOS)
227
+ Chunk Size Adjusted
228
+ Your original #chunk_size of #{ orig_mb } MiB has been adjusted
229
+ to #{ mb } MiB in order to satisfy the limit of 10,000 chunks.
230
+ To enforce your chosen #chunk_size, you should use the Splitter.
231
+ e.g. split_into_chunks_of #{ mb * 10_000 } (#chunk_size * 10_000)
232
+ EOS
233
+ 1024**2 * mb
234
+ end
235
+
236
+ class Object
237
+ attr_reader :key, :etag, :storage_class
238
+
239
+ def initialize(cloud_io, data)
240
+ @cloud_io = cloud_io
241
+ @key = data['Key']
242
+ @etag = data['ETag']
243
+ @storage_class = data['StorageClass']
244
+ end
245
+
246
+ # currently 'AES256' or nil
247
+ def encryption
248
+ metadata['x-amz-server-side-encryption']
249
+ end
250
+
251
+ private
252
+
253
+ def metadata
254
+ @metadata ||= @cloud_io.head_object(self).headers
255
+ end
256
+ end
257
+
258
+ end
259
+ end
260
+ end
@@ -0,0 +1,35 @@
1
+ # encoding: utf-8
2
+
3
+ module Backup
4
+ module Compressor
5
+ class Base
6
+ include Utilities::Helpers
7
+ include Config::Helpers
8
+
9
+ ##
10
+ # Yields to the block the compressor command and filename extension.
11
+ def compress_with
12
+ log!
13
+ yield @cmd, @ext
14
+ end
15
+
16
+ private
17
+
18
+ ##
19
+ # Return the compressor name, with Backup namespace removed
20
+ def compressor_name
21
+ self.class.to_s.sub('Backup::', '')
22
+ end
23
+
24
+ ##
25
+ # Logs a message to the console and log file to inform
26
+ # the client that Backup is using the compressor
27
+ def log!
28
+ Logger.info "Using #{ compressor_name } for compression.\n" +
29
+ " Command: '#{ @cmd }'\n" +
30
+ " Ext: '#{ @ext }'"
31
+ end
32
+
33
+ end
34
+ end
35
+ end
@@ -0,0 +1,39 @@
1
+ # encoding: utf-8
2
+
3
+ module Backup
4
+ module Compressor
5
+ class Bzip2 < Base
6
+
7
+ ##
8
+ # Specify the level of compression to use.
9
+ #
10
+ # Values should be a single digit from 1 to 9.
11
+ # Note that setting the level to either extreme may or may not
12
+ # give the desired result. Be sure to check the documentation
13
+ # for the compressor being used.
14
+ #
15
+ # The default `level` is 9.
16
+ attr_accessor :level
17
+
18
+ ##
19
+ # Creates a new instance of Backup::Compressor::Bzip2
20
+ def initialize(&block)
21
+ load_defaults!
22
+
23
+ @level ||= false
24
+
25
+ instance_eval(&block) if block_given?
26
+
27
+ @cmd = "#{ utility(:bzip2) }#{ options }"
28
+ @ext = '.bz2'
29
+ end
30
+
31
+ private
32
+
33
+ def options
34
+ " -#{ @level }" if @level
35
+ end
36
+
37
+ end
38
+ end
39
+ end
@@ -0,0 +1,53 @@
1
+ # encoding: utf-8
2
+
3
+ module Backup
4
+ module Compressor
5
+ class Custom < Base
6
+
7
+ ##
8
+ # Specify the system command to invoke a compressor,
9
+ # including any command-line arguments.
10
+ # e.g. @compressor.command = 'pbzip2 -p2 -4'
11
+ #
12
+ # The data to be compressed will be piped to the command's STDIN,
13
+ # and it should write the compressed data to STDOUT.
14
+ # i.e. `cat file.tar | %command% > file.tar.%extension%`
15
+ attr_accessor :command
16
+
17
+ ##
18
+ # File extension to append to the compressed file's filename.
19
+ # e.g. @compressor.extension = '.bz2'
20
+ attr_accessor :extension
21
+
22
+ ##
23
+ # Initializes a new custom compressor.
24
+ def initialize(&block)
25
+ load_defaults!
26
+
27
+ instance_eval(&block) if block_given?
28
+
29
+ @cmd = set_cmd
30
+ @ext = set_ext
31
+ end
32
+
33
+ private
34
+
35
+ ##
36
+ # Return the command line using the full path.
37
+ # Ensures the command exists and is executable.
38
+ def set_cmd
39
+ parts = @command.to_s.split(' ')
40
+ parts[0] = utility(parts[0])
41
+ parts.join(' ')
42
+ end
43
+
44
+ ##
45
+ # Return the extension given without whitespace.
46
+ # If extension was not set, return an empty string
47
+ def set_ext
48
+ @extension.to_s.strip
49
+ end
50
+
51
+ end
52
+ end
53
+ end
@@ -0,0 +1,74 @@
1
+ # encoding: utf-8
2
+
3
+ module Backup
4
+ module Compressor
5
+ class Gzip < Base
6
+ class Error < Backup::Error; end
7
+ extend Utilities::Helpers
8
+
9
+ ##
10
+ # Specify the level of compression to use.
11
+ #
12
+ # Values should be a single digit from 1 to 9.
13
+ # Note that setting the level to either extreme may or may not
14
+ # give the desired result. Be sure to check the documentation
15
+ # for the compressor being used.
16
+ #
17
+ # The default `level` is 6.
18
+ attr_accessor :level
19
+
20
+ ##
21
+ # Use the `--rsyncable` option with `gzip`.
22
+ #
23
+ # This option directs `gzip` to compress data using an algorithm that
24
+ # allows `rsync` to efficiently detect changes. This is especially useful
25
+ # when used to compress `Archive` or `Database` backups that will be
26
+ # stored using Backup's `RSync` Storage option.
27
+ #
28
+ # The `--rsyncable` option is only available on patched versions of `gzip`.
29
+ # While most distributions apply this patch, this option may not be
30
+ # available on your system. If it's not available, Backup will log a
31
+ # warning and continue to use the compressor without this option.
32
+ attr_accessor :rsyncable
33
+
34
+ ##
35
+ # Determine if +--rsyncable+ is supported and cache the result.
36
+ def self.has_rsyncable?
37
+ return @has_rsyncable unless @has_rsyncable.nil?
38
+ cmd = "#{ utility(:gzip) } --rsyncable --version >/dev/null 2>&1; echo $?"
39
+ @has_rsyncable = %x[#{ cmd }].chomp == '0'
40
+ end
41
+
42
+ ##
43
+ # Creates a new instance of Backup::Compressor::Gzip
44
+ def initialize(&block)
45
+ load_defaults!
46
+
47
+ @level ||= false
48
+ @rsyncable ||= false
49
+
50
+ instance_eval(&block) if block_given?
51
+
52
+ @cmd = "#{ utility(:gzip) }#{ options }"
53
+ @ext = '.gz'
54
+ end
55
+
56
+ private
57
+
58
+ def options
59
+ opts = ''
60
+ opts << " -#{ @level }" if @level
61
+ if self.class.has_rsyncable?
62
+ opts << ' --rsyncable'
63
+ else
64
+ Logger.warn Error.new(<<-EOS)
65
+ 'rsyncable' option ignored.
66
+ Your system's 'gzip' does not support the `--rsyncable` option.
67
+ EOS
68
+ end if @rsyncable
69
+ opts
70
+ end
71
+
72
+ end
73
+ end
74
+ end