rtbackup 0.1.21

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,14 @@
1
+ # frozen_string_literal: true
2
+
3
+ module ServerBackups
4
+ class BackupCreationError < StandardError
5
+ attr_reader :backup_class, :backup_type
6
+ def initialize(msg, backup_class, backup_type)
7
+ @backup_class = backup_class
8
+ @backup_type = backup_type
9
+ super(msg)
10
+ end
11
+ end
12
+
13
+ class RestoreTarError < BackupCreationError; end
14
+ end
@@ -0,0 +1,90 @@
1
+ # frozen_string_literal: true
2
+
3
+ module ServerBackups
4
+ class MysqlBackup < BackupBase
5
+ attr_reader :database_name
6
+
7
+ SYSTEM_DATABASES = %w[sys information_schema mysql performance_schema].freeze
8
+
9
+ def initialize(config_file, working_directory, backup_type, database_name)
10
+ @database_name = database_name
11
+ super(config_file, working_directory, backup_type)
12
+ end
13
+
14
+ def do_backup
15
+ if database_name == 'all'
16
+ backup_all_databases
17
+ else
18
+ super
19
+ end
20
+ end
21
+
22
+ def backup_all_databases
23
+ @database_name = 'mysql'
24
+ all_databases.each do |database|
25
+ self.class.send(backup_type,
26
+ config.config_file,
27
+ working_directory,
28
+ database).do_backup
29
+ end
30
+ end
31
+
32
+ class << self
33
+ def daily(config_file, working_directory, database_name)
34
+ new(config_file, working_directory, :daily, database_name)
35
+ end
36
+
37
+ def weekly(config_file, working_directory, database_name)
38
+ new(config_file, working_directory, :weekly, database_name)
39
+ end
40
+
41
+ def monthly(config_file, working_directory, database_name)
42
+ new(config_file, working_directory, :monthly, database_name)
43
+ end
44
+
45
+ def incremental(config_file, working_directory, database_name)
46
+ MysqlIncrementalBackup.new(config_file, working_directory, database_name)
47
+ end
48
+ end
49
+
50
+ def create_archive_command
51
+ cmd = config.mysqldump_bin + ' --quick --single-transaction --create-options '
52
+ cmd += ' --flush-logs --master-data=2 --delete-master-logs ' if binary_logging?
53
+ cmd + cli_options + ' | gzip > ' + backup_path
54
+ end
55
+
56
+ def s3_prefix
57
+ File.join(config.prefix, self.class.name.demodulize.underscore,
58
+ database_name, backup_type.to_s, '/')
59
+ end
60
+
61
+ def backup_filename
62
+ "mysql_backup.#{backup_type}.#{timestamp}.sql.gz"
63
+ end
64
+
65
+ def all_databases
66
+ execute_sql('show databases;').reject do |db_name|
67
+ db_name.in?(SYSTEM_DATABASES)
68
+ end
69
+ end
70
+
71
+ private
72
+
73
+ def binary_logging?
74
+ !config.bin_log.blank?
75
+ end
76
+
77
+ def cli_options
78
+ cmd = config.password.blank? ? '' : " -p'#{config.password}' "
79
+ cmd + " -u'#{config.user}' -h #{config.db_host} " + database_name
80
+ end
81
+
82
+ def execute_sql(sql)
83
+ cmd = "#{config.mysql_bin} --silent --skip-column-names -e \"#{sql}\" #{cli_options}"
84
+ logger.debug "Executing raw SQL against #{database_name}\n#{cmd}"
85
+ output = `#{cmd}`
86
+ logger.debug "Returned #{$CHILD_STATUS.inspect}. STDOUT was:\n#{output}"
87
+ output.split("\n") unless output.blank?
88
+ end
89
+ end
90
+ end
@@ -0,0 +1,89 @@
1
+ # frozen_string_literal: true
2
+
3
+ module ServerBackups
4
+ class MysqlIncrementalBackup < MysqlBackup
5
+ def initialize(config_file, working_directory, database_name)
6
+ database_name = 'mysql' if database_name == 'all'
7
+
8
+ super(config_file, working_directory, :incremental, database_name)
9
+ end
10
+
11
+ class BinlogFilename
12
+ attr_reader :path
13
+ def initialize(path)
14
+ @path = path
15
+ end
16
+
17
+ def log_index
18
+ /(\d{6})/.match(File.basename(path)).captures.first
19
+ end
20
+ end
21
+
22
+ def do_backup
23
+ load_resources
24
+ flush_logs
25
+ each_bin_log do |file|
26
+ index = BinlogFilename.new(file).log_index
27
+ next if index.in?(already_stored_log_indexes)
28
+ backup_single_bin_log(file)
29
+ end
30
+ end
31
+
32
+ def s3_prefix
33
+ File.join(config.prefix, 'mysql_backup', 'incremental', '/')
34
+ end
35
+
36
+ def flush_logs
37
+ execute_sql('flush logs;')
38
+ end
39
+
40
+ def each_bin_log
41
+ # execute 'flush logs'
42
+ logs = Dir.glob("#{config.bin_log}.[0-9]*").sort_by { |f| f[/\d+/].to_i }
43
+ logs_to_backup = logs[0..-2] # all logs except the last, which is in use
44
+ logs_to_backup.each do |log_file|
45
+ yield log_file
46
+ end
47
+ end
48
+
49
+ # def each_remote_bin_log
50
+ # remote_bin_logs.each do |file|
51
+ # yield(**parse_remote_binlog_filename(file))
52
+ # end
53
+ # end
54
+
55
+ # def parse_remote_binlog_filename(file)
56
+ # filename = File.basename(file.key)
57
+ # prefix, index, timestamp = REMOTE_FILENAME_REGEX.match(filename).captures
58
+ # {
59
+ # key: file.key,
60
+ # file: file,
61
+ # prefix: prefix,
62
+ # index: index,
63
+ # timestamp: timestamp,
64
+ # datetime: Time.zone.strptime(timestamp, TIMESTAMP_FORMAT)
65
+ # }
66
+ # end
67
+
68
+ private
69
+
70
+ REMOTE_FILENAME_REGEX = /(.*)\.(\d+)\.(.{15})/
71
+ def already_stored_log_indexes
72
+ remote_bin_logs.map do |s3object|
73
+ _, index = REMOTE_FILENAME_REGEX.match(s3object.key).captures
74
+ index
75
+ end
76
+ end
77
+
78
+ def remote_bin_logs
79
+ s3.bucket.objects(prefix: s3_prefix)
80
+ end
81
+
82
+ def backup_single_bin_log(file)
83
+ logger.debug "Backing up #{file}."
84
+ dest_filename = File.basename(file) + '.' + timestamp
85
+ logger.info "Storing #{file} to #{dest_filename}"
86
+ s3.save file, File.join(s3_prefix, dest_filename)
87
+ end
88
+ end
89
+ end
@@ -0,0 +1,86 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'tempfile'
4
+
5
+ module ServerBackups
6
+ class MysqlRestore < RestoreBase
7
+ ALL_DATABASES = 'all'
8
+
9
+ def initialize(config_file, working_dir, restore_point, database)
10
+ @database = database
11
+ super config_file, working_dir, restore_point
12
+ end
13
+
14
+ def full_backup_file
15
+ full_backup_prefix = File.join(config.prefix, 'mysql_backup', database)
16
+ s3.get_ordered_collection(full_backup_prefix).full_backup_for(restore_point)
17
+ end
18
+
19
+ def incremental_backups
20
+ incr_backup_prefix = File.join(config.prefix, 'mysql_backup')
21
+ s3.get_ordered_collection(incr_backup_prefix).incremental_backups_for(restore_point)
22
+ end
23
+
24
+ def restore_script_path
25
+ File.join(working_dir, "#{database}.sql")
26
+ end
27
+
28
+ ETC_TIMEZONE = '/etc/timezone'
29
+
30
+ def formatted_restore_point_in_system_time_zone
31
+ restore_point.in_time_zone(config.system_time_zone) \
32
+ .strftime('%Y-%m-%d %H:%M:%S')
33
+ end
34
+
35
+ def do_restore
36
+ full_backup_file.get response_target: (restore_script_path + '.gz')
37
+ system "gunzip #{restore_script_path}.gz"
38
+
39
+ incremental_backups.each do |s3object|
40
+ file = Tempfile.new('foo')
41
+ begin
42
+ s3object.get response_target: file
43
+ file.close
44
+ system config.mysqlbinlog_bin + ' ' + file.path + \
45
+ " --stop-datetime='#{formatted_restore_point_in_system_time_zone}'" \
46
+ " --database=#{database} >> " + restore_script_path
47
+ ensure
48
+ file.close
49
+ file.unlink # deletes the temp file
50
+ end
51
+ end
52
+
53
+ execute_script restore_script_path
54
+ end
55
+ # #{@config.bin_path}mysqlbinlog --database=#{@config.database} #{file}
56
+ class << self
57
+ def restore(config_file, working_dir, restore_point, database)
58
+ return new(config_file, working_dir, restore_point, database).do_restore \
59
+ if database != ALL_DATABASES
60
+
61
+ all_databases(config_file, working_dir).each do |db_name|
62
+ new(config_file, working_dir, restore_point, db_name).do_restore
63
+ end
64
+ end
65
+
66
+ def all_databases(config_file, working_dir)
67
+ MysqlBackup.new(config_file, working_dir, 'daily', 'mysql').all_databases
68
+ end
69
+ end
70
+
71
+ private
72
+
73
+ def cli_options
74
+ cmd = config.password.blank? ? '' : " -p'#{config.password}' "
75
+ cmd + " -u'#{config.user}' " + database
76
+ end
77
+
78
+ def execute_script(path)
79
+ cmd = "#{config.mysql_bin} --silent --skip-column-names #{cli_options}"
80
+ logger.debug "Executing raw SQL against #{ database}\n#{cmd}"
81
+ output = `#{cmd} < #{path}`
82
+ logger.debug "Returned #{$CHILD_STATUS.inspect}. STDOUT was:\n#{output}"
83
+ output.split("\n") unless output.blank?
84
+ end
85
+ end
86
+ end
@@ -0,0 +1,33 @@
1
+ require 'slack-notifier'
2
+
3
+ module ServerBackups
4
+ class Notifier
5
+ attr_reader :config
6
+
7
+ def initialize(config_path)
8
+ @config = Config.new(config_path)
9
+ end
10
+
11
+ def notify_success
12
+ return unless config.slack_webhook && config.notify_on_success
13
+
14
+ notifier = Slack::Notifier.new config.slack_webhook
15
+ message = "Backups at `#{config.prefix}` succeeded. "
16
+ message += config.slack_mention_on_success.map{|t| "<@#{t}>"}.to_sentence
17
+ notifier.post text: message, icon_emoji: ':100:'
18
+ end
19
+
20
+ def notify_failure(errors)
21
+ return unless config.slack_webhook
22
+
23
+ notifier = Slack::Notifier.new config.slack_webhook
24
+ message = "Backups at `#{config.prefix}` failed. "
25
+ message += config.slack_mention_on_failure.map{|t| "<@#{t}>"}.to_sentence
26
+ attachments = []
27
+ for error in errors do
28
+ attachments << {text: error.message + "\n" + error.backtrace.join("\n")}
29
+ end
30
+ notifier.post text: message, icon_emoji: ':bomb:', attachments: attachments
31
+ end
32
+ end
33
+ end
@@ -0,0 +1,54 @@
1
+ # frozen_string_literal: true
2
+
3
+ module ServerBackups
4
+ class OrderedBackupFileCollection
5
+ attr_reader :s3_collection
6
+ def initialize(s3_collection)
7
+ @s3_collection = s3_collection
8
+ end
9
+
10
+ def full_backup_for(restore_point)
11
+ sorted(full_backups).reverse.find do |file|
12
+ get_timestamp_from_s3_object(file) <= restore_point
13
+ end
14
+ end
15
+
16
+ def incremental_backups_for(restore_point)
17
+ sorted eligible_incremental_backups(restore_point)
18
+ end
19
+
20
+ INCREMENTAL = /incremental/i
21
+ def full_backups
22
+ s3_collection.reject { |file| INCREMENTAL =~ file.key }
23
+ end
24
+
25
+ def incremental_backups
26
+ @incremental_backups ||=
27
+ sorted(s3_collection.select { |file| INCREMENTAL =~ file.key }).to_a
28
+ end
29
+
30
+ private
31
+
32
+ TIMESTAMP_REGEXP = /(\d{4})-(\d{2})-(\d{2})T(\d{2})00\.UTC([+-]\d{4})/
33
+ def get_timestamp_from_s3_object(file)
34
+ time_parts = TIMESTAMP_REGEXP.match(file.key).captures
35
+ time_parts[-1].insert(3, ':')
36
+ # Add in hours and seconds arguments
37
+ # https://ruby-doc.org/core-2.2.0/Time.html#method-c-new
38
+ time_parts.insert(4, 0, 0)
39
+ Time.new(*time_parts)
40
+ end
41
+
42
+ def sorted(coll)
43
+ coll.sort_by { |file| get_timestamp_from_s3_object file }
44
+ end
45
+
46
+ def eligible_incremental_backups(restore_point)
47
+ full_backup_timestamp = get_timestamp_from_s3_object full_backup_for(restore_point)
48
+ incremental_backups.select do |file|
49
+ get_timestamp_from_s3_object(file) > full_backup_timestamp &&
50
+ get_timestamp_from_s3_object(file) <= restore_point
51
+ end
52
+ end
53
+ end
54
+ end
@@ -0,0 +1,47 @@
1
+ # frozen_string_literal: true
2
+
3
+ module ServerBackups
4
+ class RestoreBase
5
+ attr_reader :config, :s3, :working_dir, :restore_point, :database
6
+
7
+ def initialize(config_file, working_dir, restore_point)
8
+ @working_dir = working_dir
9
+ @config = Config.new(config_file)
10
+ Time.zone = config.time_zone
11
+ @restore_point = if restore_point.present?
12
+ restore_point
13
+ else
14
+ Time.zone.now
15
+ end
16
+ @s3 = S3.new(config)
17
+ logger.debug "Initialized #{title}."
18
+ end
19
+
20
+ private
21
+
22
+ def title
23
+ self.class.name.demodulize.titleize
24
+ end
25
+
26
+ TIMESTAMP_REGEXP = /(\d{4})-(\d{2})-(\d{2})T(\d{2})00\.UTC([+-]\d{4})/
27
+ def extract_backup_time_from_filename(filename)
28
+ time_parts = TIMESTAMP_REGEXP.match(filename).captures
29
+ # Add in hours and seconds arguments
30
+ # https://ruby-doc.org/core-2.2.0/Time.html#method-c-new
31
+ time_parts.insert(4, 0, 0)
32
+ Time.new(*time_parts)
33
+ end
34
+
35
+ def all_files
36
+ @all_files ||= s3.get_ordered_collection(s3_prefix)
37
+ end
38
+
39
+ def logger
40
+ config.logger
41
+ end
42
+
43
+ def last_command_succeeded?
44
+ $CHILD_STATUS.exitstatus.zero?
45
+ end
46
+ end
47
+ end
@@ -0,0 +1,71 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'aws-sdk-s3'
4
+
5
+ module ServerBackups
6
+ class S3
7
+ PROVIDER = 'AWS'
8
+ attr_reader :config, :logger
9
+ def initialize(config)
10
+ @config = config
11
+ @logger = config.logger
12
+ end
13
+
14
+ def client
15
+ @client ||= begin
16
+ Aws.config[:credentials] = Aws::Credentials.new(
17
+ config.access_key_id, config.secret_access_key
18
+ )
19
+ Aws::S3::Client.new region: config.region
20
+ end
21
+ end
22
+
23
+ def bucket
24
+ @bucket ||= Aws::S3::Bucket.new(config.bucket, client: client)
25
+ end
26
+
27
+ def get_ordered_collection(prefix)
28
+ OrderedBackupFileCollection.new bucket.objects(prefix: prefix)
29
+ end
30
+
31
+ def delete_files_not_newer_than(key, age)
32
+ bucket.objects(prefix: key).each do |file|
33
+ destroy key, true unless file.last_modified.to_datetime > age
34
+ end
35
+ end
36
+
37
+ def exists?(path)
38
+ logger.debug "Exists? #{config.bucket} #{path}"
39
+ !bucket.objects(prefix: path).to_a.empty?
40
+ # !!client.head_object(bucket: config.bucket, key: path)
41
+ end
42
+
43
+ def destroy(key, existence_known = false)
44
+ return unless existence_known || exists?(key)
45
+ client.delete_object bucket: config.bucket, key: key
46
+ end
47
+
48
+ def save(local_file_name, s3_key)
49
+ full_path = if s3_key[-1] == '/'
50
+ File.join(s3_key, File.basename(local_file_name))
51
+ else
52
+ s3_key
53
+ end
54
+
55
+ return if exists?(full_path)
56
+ file = Aws::S3::Object.new(config.bucket, full_path, client: client)
57
+ file.put(
58
+ acl: 'private',
59
+ body: File.open(local_file_name, 'rb'),
60
+ content_md5: md5of(local_file_name),
61
+ storage_class: 'STANDARD_IA'
62
+ )
63
+ end
64
+
65
+ private
66
+
67
+ def md5of(local_file_name)
68
+ Digest::MD5.base64digest(File.read(local_file_name))
69
+ end
70
+ end
71
+ end