airblade-mysql_s3_backup 0.0.2
Sign up to get free protection for your applications and to get access to all the features.
- data/README.md +57 -0
- data/Rakefile +17 -0
- data/bin/mysql_s3_backup +23 -0
- data/config/sample.yml +18 -0
- data/lib/mysql_s3_backup.rb +6 -0
- data/lib/mysql_s3_backup/backup.rb +84 -0
- data/lib/mysql_s3_backup/bucket.rb +59 -0
- data/lib/mysql_s3_backup/config.rb +30 -0
- data/lib/mysql_s3_backup/hash_ext.rb +10 -0
- data/lib/mysql_s3_backup/mysql.rb +59 -0
- data/lib/mysql_s3_backup/shell.rb +13 -0
- data/mysql_s3_backup.gemspec +17 -0
- data/spec/backup_spec.rb +51 -0
- data/spec/drop.sql +1 -0
- data/spec/insert.sql +1 -0
- data/spec/load.sql +4 -0
- data/spec/spec_helper.rb +16 -0
- metadata +106 -0
data/README.md
ADDED
@@ -0,0 +1,57 @@
|
|
1
|
+
# Send your Mysql backups to S3
|
2
|
+
A simple backup script for Mysql and S3 with incremental backups.
|
3
|
+
|
4
|
+
It's all based on Paul Dowman's blog post: http://pauldowman.com/2009/02/08/mysql-s3-backup/, so read this first.
|
5
|
+
|
6
|
+
## Configuration
|
7
|
+
|
8
|
+
To use incremental backups you need to enable binary logging by making sure that the MySQL config file (my.cnf) has the following line in it:
|
9
|
+
|
10
|
+
log_bin = /var/db/mysql/binlog/mysql-bin
|
11
|
+
|
12
|
+
The MySQL user needs to have the RELOAD and the SUPER privileges, these can be granted with the following SQL commands (which need to be executed as the MySQL root user):
|
13
|
+
|
14
|
+
GRANT RELOAD ON *.* TO 'user_name'@'%' IDENTIFIED BY 'password';
|
15
|
+
GRANT SUPER ON *.* TO 'user_name'@'%' IDENTIFIED BY 'password';
|
16
|
+
|
17
|
+
## Usage
|
18
|
+
|
19
|
+
Create a YAML config file:
|
20
|
+
|
21
|
+
mysql:
|
22
|
+
# Database name to backup
|
23
|
+
database: muffins_development
|
24
|
+
# Mysql user and password to execute commands
|
25
|
+
user: root
|
26
|
+
password: secret
|
27
|
+
# Path to mysql binaries, like mysql, mysqldump (optional)
|
28
|
+
bin_path: /usr/bin/
|
29
|
+
# Path to the binary logs, should match the bin_log option in your my.cnf
|
30
|
+
bin_log: /var/lib/mysql/binlog/mysql-bin
|
31
|
+
|
32
|
+
s3:
|
33
|
+
# S3 bucket name to backup to
|
34
|
+
bucket: db_backups
|
35
|
+
# S3 credentials
|
36
|
+
access_key_id: XXXXXXXXXXXXXXX
|
37
|
+
secret_access_key: XXXXXXXXXXXXXXXXXXXXXX
|
38
|
+
|
39
|
+
Create a full backup:
|
40
|
+
|
41
|
+
mysql_s3_backup -c=your_config.yml full
|
42
|
+
|
43
|
+
Create an incremental backup:
|
44
|
+
|
45
|
+
mysql_s3_backup -c=your_config.yml inc
|
46
|
+
|
47
|
+
Restore the latest backup (applying incremental backups):
|
48
|
+
|
49
|
+
mysql_s3_backup -c=your_config.yml restore
|
50
|
+
|
51
|
+
Restore a specific backup (NOT applying incremental backups):
|
52
|
+
|
53
|
+
mysql_s3_backup -c=your_config.yml restore 20091126112233
|
54
|
+
|
55
|
+
## Running the specs
|
56
|
+
|
57
|
+
Create a config file in config/test.yml
|
data/Rakefile
ADDED
@@ -0,0 +1,17 @@
|
|
1
|
+
require "spec/rake/spectask"
|
2
|
+
|
3
|
+
task :default => :spec
|
4
|
+
|
5
|
+
Spec::Rake::SpecTask.new do |t|
|
6
|
+
t.spec_opts = %w(-fs -c)
|
7
|
+
t.spec_files = FileList["spec/**_spec.rb"]
|
8
|
+
end
|
9
|
+
|
10
|
+
task :gem do
|
11
|
+
sh "gem build mysql_s3_backup.gemspec"
|
12
|
+
end
|
13
|
+
|
14
|
+
task :push => :gem do
|
15
|
+
file = Dir["*.gem"].sort.last
|
16
|
+
puts "gem push #{file}"
|
17
|
+
end
|
data/bin/mysql_s3_backup
ADDED
@@ -0,0 +1,23 @@
|
|
1
|
+
#!/usr/bin/env ruby -s
|
2
|
+
require "mysql_s3_backup"
|
3
|
+
|
4
|
+
config_file = $c
|
5
|
+
|
6
|
+
abort <<-USAGE unless config_file.is_a?(String) && ARGV.size > 0
|
7
|
+
usage:
|
8
|
+
|
9
|
+
Backing up:
|
10
|
+
|
11
|
+
mysql_s3_backup -c=<config.yml> full
|
12
|
+
|
13
|
+
Incremental backups:
|
14
|
+
|
15
|
+
mysql_s3_backup -c=<config.yml> inc
|
16
|
+
|
17
|
+
Restore a backup:
|
18
|
+
|
19
|
+
mysql_s3_backup -c=<config.yml> restore <name>
|
20
|
+
|
21
|
+
USAGE
|
22
|
+
|
23
|
+
MysqlS3Backup::Config.from_yaml_file(config_file).backup.send(*ARGV)
|
data/config/sample.yml
ADDED
@@ -0,0 +1,18 @@
|
|
1
|
+
mysql:
|
2
|
+
# Database name to backup
|
3
|
+
database: muffins_development
|
4
|
+
# Mysql user and password to execute commands
|
5
|
+
user: root
|
6
|
+
password: secret
|
7
|
+
# Path to mysql binaries, like mysql, mysqldump (optional)
|
8
|
+
bin_path: /usr/bin/
|
9
|
+
# Path to the binary logs, should match the bin_log option in your my.cnf
|
10
|
+
bin_log: /var/lib/mysql/binlog/mysql-bin
|
11
|
+
|
12
|
+
s3:
|
13
|
+
# S3 bucket name to backup to
|
14
|
+
bucket: db_backups
|
15
|
+
# S3 credentials
|
16
|
+
access_key_id: XXXXXXXXXXXXXXX
|
17
|
+
secret_access_key: XXXXXXXXXXXXXXXXXXXXXX
|
18
|
+
|
@@ -0,0 +1,84 @@
|
|
1
|
+
require 'tempfile'
|
2
|
+
require 'lockfile'
|
3
|
+
|
4
|
+
module MysqlS3Backup
|
5
|
+
class Backup
|
6
|
+
attr_reader :mysql, :bucket
|
7
|
+
|
8
|
+
def initialize(mysql, bucket)
|
9
|
+
@mysql = mysql
|
10
|
+
@bucket = bucket
|
11
|
+
@bin_log_prefix = "#{@mysql.database}/bin_logs"
|
12
|
+
end
|
13
|
+
|
14
|
+
def full(name=make_new_name)
|
15
|
+
lock do
|
16
|
+
# When the full backup runs it delete any binary log files that might already exist
|
17
|
+
# in the bucket. Otherwise the restore will try to restore them even though they’re
|
18
|
+
# older than the full backup.
|
19
|
+
@bucket.delete_all @bin_log_prefix
|
20
|
+
|
21
|
+
with_temp_file do |file|
|
22
|
+
@mysql.dump(file)
|
23
|
+
@bucket.store(dump_file_name(name), file)
|
24
|
+
@bucket.copy(dump_file_name(name), dump_file_name("latest"))
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
28
|
+
|
29
|
+
def incremental
|
30
|
+
lock do
|
31
|
+
@mysql.each_bin_log do |log|
|
32
|
+
@bucket.store "#{@bin_log_prefix}/#{File.basename(log)}", log
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
36
|
+
alias :inc :incremental
|
37
|
+
|
38
|
+
def restore(name="latest")
|
39
|
+
lock do
|
40
|
+
# restore from the dump file
|
41
|
+
with_temp_file do |file|
|
42
|
+
@bucket.fetch(dump_file_name(name), file)
|
43
|
+
@mysql.restore(file)
|
44
|
+
end
|
45
|
+
|
46
|
+
if name == "latest"
|
47
|
+
# Restoring binary log files
|
48
|
+
@bucket.find("#{@bin_log_prefix}/").sort.each do |log|
|
49
|
+
with_temp_file do |file|
|
50
|
+
@bucket.fetch log, file
|
51
|
+
@mysql.apply_bin_log file
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
private
|
59
|
+
def lock
|
60
|
+
result = nil
|
61
|
+
Lockfile("mysql_s3_backup_lock", :retries => 0) do
|
62
|
+
result = yield
|
63
|
+
end
|
64
|
+
result
|
65
|
+
end
|
66
|
+
|
67
|
+
def dump_file_name(name)
|
68
|
+
raise ArgumentError, "Need a backup name" unless name.is_a?(String)
|
69
|
+
"#{@mysql.database}/dumps/#{name}.sql.gz"
|
70
|
+
end
|
71
|
+
|
72
|
+
def make_new_name
|
73
|
+
Time.now.utc.strftime("%Y%m%d%H%M")
|
74
|
+
end
|
75
|
+
|
76
|
+
def with_temp_file
|
77
|
+
dump_file = Tempfile.new("mysql-dump")
|
78
|
+
yield dump_file.path
|
79
|
+
nil
|
80
|
+
ensure
|
81
|
+
dump_file.close!
|
82
|
+
end
|
83
|
+
end
|
84
|
+
end
|
@@ -0,0 +1,59 @@
|
|
1
|
+
require "aws/s3"
|
2
|
+
|
3
|
+
module MysqlS3Backup
|
4
|
+
class Bucket
|
5
|
+
def initialize(name, options)
|
6
|
+
@name = name
|
7
|
+
@s3_options = options.symbolize_keys.merge(:use_ssl => true)
|
8
|
+
connect
|
9
|
+
create
|
10
|
+
end
|
11
|
+
|
12
|
+
def connect
|
13
|
+
AWS::S3::Base.establish_connection!(@s3_options)
|
14
|
+
end
|
15
|
+
|
16
|
+
def create
|
17
|
+
# It doesn't hurt to try to create a bucket that already exists
|
18
|
+
AWS::S3::Bucket.create(@name)
|
19
|
+
end
|
20
|
+
|
21
|
+
def store(file_name, file)
|
22
|
+
AWS::S3::S3Object.store(file_name, open(file), @name)
|
23
|
+
end
|
24
|
+
|
25
|
+
def copy(file_name, new_file_name)
|
26
|
+
AWS::S3::S3Object.copy(file_name, new_file_name, @name)
|
27
|
+
end
|
28
|
+
|
29
|
+
def fetch(file_name, file)
|
30
|
+
open(file, 'w') do |f|
|
31
|
+
AWS::S3::S3Object.stream(file_name, @name) do |chunk|
|
32
|
+
f.write chunk
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
def find(prefix)
|
38
|
+
AWS::S3::Bucket.objects(@name, :prefix => prefix).map { |obj| obj.key }
|
39
|
+
end
|
40
|
+
|
41
|
+
def delete_all(prefix)
|
42
|
+
# Wrap the objects' deletion in a loop to ensure we really do delete them all.
|
43
|
+
#
|
44
|
+
# This should not be necessary but, for reasons I haven't yet fathomed, the
|
45
|
+
# `obj.delete` seems to increment some internal iteration pointer by two instead
|
46
|
+
# of one. So instead of deleting each object in turn, it deletes the first, the
|
47
|
+
# third, the fifth, etc.
|
48
|
+
#
|
49
|
+
# The `each` iterator behaves correctly; this does what you'd expect:
|
50
|
+
#
|
51
|
+
# AWS::S3::Bucket.objects(@name, :prefix => prefix).each { |obj| puts obj.key }
|
52
|
+
#
|
53
|
+
# It's only when `delete` gets involved that things go pear shaped.
|
54
|
+
while (size = AWS::S3::Bucket.objects(@name, :prefix => prefix).size) > 0
|
55
|
+
AWS::S3::Bucket.objects(@name, :prefix => prefix).each { |obj| obj.delete }
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end
|
@@ -0,0 +1,30 @@
|
|
1
|
+
require "yaml"
|
2
|
+
|
3
|
+
module MysqlS3Backup
|
4
|
+
class Config
|
5
|
+
attr_reader :mysql_config, :s3_config, :bucket
|
6
|
+
|
7
|
+
def initialize(config)
|
8
|
+
config = config.symbolize_keys
|
9
|
+
@mysql_config = config[:mysql].symbolize_keys
|
10
|
+
@s3_config = config[:s3].symbolize_keys
|
11
|
+
@bucket = @s3_config.delete(:bucket)
|
12
|
+
end
|
13
|
+
|
14
|
+
def mysql
|
15
|
+
MysqlS3Backup::Mysql.new(@mysql_config)
|
16
|
+
end
|
17
|
+
|
18
|
+
def bucket
|
19
|
+
MysqlS3Backup::Bucket.new(@bucket, @s3_config)
|
20
|
+
end
|
21
|
+
|
22
|
+
def backup
|
23
|
+
MysqlS3Backup::Backup.new(mysql, bucket)
|
24
|
+
end
|
25
|
+
|
26
|
+
def self.from_yaml_file(file)
|
27
|
+
new YAML.load_file(file)
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
@@ -0,0 +1,59 @@
|
|
1
|
+
module MysqlS3Backup
|
2
|
+
class Mysql
|
3
|
+
include Shell
|
4
|
+
|
5
|
+
attr_reader :database, :bin_log_path
|
6
|
+
|
7
|
+
def initialize(options)
|
8
|
+
options = options.symbolize_keys
|
9
|
+
@user = options[:user] || raise(ArgumentError, "user required")
|
10
|
+
@password = options[:password]
|
11
|
+
@database = options[:database] || raise(ArgumentError, "database required")
|
12
|
+
@bin_log_path = options[:bin_log]
|
13
|
+
@bin_path = options[:bin_path]
|
14
|
+
end
|
15
|
+
|
16
|
+
def cli_options
|
17
|
+
cmd = "-u'#{@user}'"
|
18
|
+
cmd += " -p'#{@password}'" if @password
|
19
|
+
cmd += " #{@database}"
|
20
|
+
cmd
|
21
|
+
end
|
22
|
+
|
23
|
+
def execute(sql)
|
24
|
+
run %{#{@bin_path}mysql -e "#{sql}" #{cli_options}}
|
25
|
+
end
|
26
|
+
|
27
|
+
def execute_file(file)
|
28
|
+
run "cat '#{file}' | #{@bin_path}mysql #{cli_options}"
|
29
|
+
end
|
30
|
+
|
31
|
+
def dump(file)
|
32
|
+
cmd = "#{@bin_path}mysqldump --quick --single-transaction --create-options -u'#{@user}'"
|
33
|
+
cmd += " --flush-logs --master-data=2 --delete-master-logs" if @bin_log_path
|
34
|
+
cmd += " -p'#{@password}'" if @password
|
35
|
+
cmd += " #{@database} | gzip > #{file}"
|
36
|
+
run cmd
|
37
|
+
end
|
38
|
+
|
39
|
+
def restore(file)
|
40
|
+
run "gunzip -c #{file} | #{@bin_path}mysql #{cli_options}"
|
41
|
+
end
|
42
|
+
|
43
|
+
def each_bin_log
|
44
|
+
execute "flush logs"
|
45
|
+
logs = Dir.glob("#{@bin_log_path}.[0-9]*").sort
|
46
|
+
logs_to_archive = logs[0..-2] # all logs except the last
|
47
|
+
logs_to_archive.each do |log|
|
48
|
+
yield log
|
49
|
+
end
|
50
|
+
execute "purge master logs to '#{File.basename(logs[-1])}'"
|
51
|
+
end
|
52
|
+
|
53
|
+
def apply_bin_log(file)
|
54
|
+
cmd = "#{@bin_path}mysqlbinlog --database=#{@database} #{file} | mysql -u#{@user} "
|
55
|
+
cmd += " -p'#{@password}' " if @password
|
56
|
+
run cmd
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end
|
@@ -0,0 +1,13 @@
|
|
1
|
+
module MysqlS3Backup
|
2
|
+
class ShellCommandError < RuntimeError ; end
|
3
|
+
|
4
|
+
module Shell
|
5
|
+
def run(command)
|
6
|
+
puts command if $VERBOSE
|
7
|
+
result = `#{command}`.chomp
|
8
|
+
puts result if $VERBOSE
|
9
|
+
raise ShellCommandError, "error, process exited with status #{$?.exitstatus}: #{result}" unless $?.success?
|
10
|
+
result
|
11
|
+
end
|
12
|
+
end
|
13
|
+
end
|
@@ -0,0 +1,17 @@
|
|
1
|
+
Gem::Specification.new do |s|
|
2
|
+
s.name = "airblade-mysql_s3_backup"
|
3
|
+
s.version = "0.0.2"
|
4
|
+
|
5
|
+
s.authors = ["Marc-Andre Cournoyer", "Andrew Stewart"]
|
6
|
+
s.email = "boss@airbladesoftware.com"
|
7
|
+
s.files = Dir["**/*"]
|
8
|
+
s.homepage = "http://github.com/airblade/mysql_s3_backup"
|
9
|
+
s.require_paths = ["lib"]
|
10
|
+
s.bindir = "bin"
|
11
|
+
s.executables = Dir["bin/*"].map { |f| File.basename(f) }
|
12
|
+
s.summary = "A simple backup script for Mysql and S3 with incremental backups."
|
13
|
+
s.test_files = Dir["spec/**"]
|
14
|
+
|
15
|
+
s.add_dependency "aws-s3"
|
16
|
+
s.add_dependency "lockfile"
|
17
|
+
end
|
data/spec/backup_spec.rb
ADDED
@@ -0,0 +1,51 @@
|
|
1
|
+
require File.dirname(__FILE__) + "/spec_helper"
|
2
|
+
|
3
|
+
describe MysqlS3Backup::Backup do
|
4
|
+
after do
|
5
|
+
CONFIG.bucket.delete_all(CONFIG.mysql_config[:database])
|
6
|
+
end
|
7
|
+
|
8
|
+
it "should restore from full backup" do
|
9
|
+
backup = CONFIG.backup
|
10
|
+
|
11
|
+
execute_sql_file "load.sql"
|
12
|
+
execute_sql_file "insert.sql"
|
13
|
+
|
14
|
+
backup.full
|
15
|
+
|
16
|
+
execute_sql_file "drop.sql"
|
17
|
+
backup.restore
|
18
|
+
|
19
|
+
backup.mysql.execute("select count(*) as n from users;").should == "n\n1"
|
20
|
+
end
|
21
|
+
|
22
|
+
it "should restore from incremental backup" do
|
23
|
+
backup = CONFIG.backup
|
24
|
+
|
25
|
+
execute_sql_file "load.sql"
|
26
|
+
backup.full
|
27
|
+
|
28
|
+
execute_sql_file "insert.sql"
|
29
|
+
backup.inc
|
30
|
+
|
31
|
+
execute_sql_file "drop.sql"
|
32
|
+
backup.restore
|
33
|
+
|
34
|
+
backup.mysql.execute("select count(*) as n from users;").should == "n\n1"
|
35
|
+
end
|
36
|
+
|
37
|
+
it "should restore named backup and ignore binary logs" do
|
38
|
+
backup = CONFIG.backup
|
39
|
+
|
40
|
+
execute_sql_file "load.sql"
|
41
|
+
backup.full("named")
|
42
|
+
|
43
|
+
execute_sql_file "insert.sql"
|
44
|
+
backup.inc
|
45
|
+
|
46
|
+
execute_sql_file "drop.sql"
|
47
|
+
backup.restore("named")
|
48
|
+
|
49
|
+
backup.mysql.execute("select count(*) as n from users;").should == "n\n0"
|
50
|
+
end
|
51
|
+
end
|
data/spec/drop.sql
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
DROP TABLE IF EXISTS users;
|
data/spec/insert.sql
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
INSERT INTO users (name) VALUES ("ma");
|
data/spec/load.sql
ADDED
data/spec/spec_helper.rb
ADDED
@@ -0,0 +1,16 @@
|
|
1
|
+
require "rubygems"
|
2
|
+
require "spec"
|
3
|
+
$:.unshift File.dirname(__FILE__) + "/../lib"
|
4
|
+
require "mysql_s3_backup"
|
5
|
+
|
6
|
+
CONFIG = MysqlS3Backup::Config.from_yaml_file(File.dirname(__FILE__) + "/../config/test.yml")
|
7
|
+
|
8
|
+
module Helpers
|
9
|
+
def execute_sql_file(file)
|
10
|
+
CONFIG.mysql.execute_file File.dirname(__FILE__) + "/#{file}"
|
11
|
+
end
|
12
|
+
end
|
13
|
+
|
14
|
+
Spec::Runner.configure do |config|
|
15
|
+
config.include Helpers
|
16
|
+
end
|
metadata
ADDED
@@ -0,0 +1,106 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: airblade-mysql_s3_backup
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
prerelease: false
|
5
|
+
segments:
|
6
|
+
- 0
|
7
|
+
- 0
|
8
|
+
- 2
|
9
|
+
version: 0.0.2
|
10
|
+
platform: ruby
|
11
|
+
authors:
|
12
|
+
- Marc-Andre Cournoyer
|
13
|
+
- Andrew Stewart
|
14
|
+
autorequire:
|
15
|
+
bindir: bin
|
16
|
+
cert_chain: []
|
17
|
+
|
18
|
+
date: 2010-05-17 00:00:00 +01:00
|
19
|
+
default_executable:
|
20
|
+
dependencies:
|
21
|
+
- !ruby/object:Gem::Dependency
|
22
|
+
name: aws-s3
|
23
|
+
prerelease: false
|
24
|
+
requirement: &id001 !ruby/object:Gem::Requirement
|
25
|
+
requirements:
|
26
|
+
- - ">="
|
27
|
+
- !ruby/object:Gem::Version
|
28
|
+
segments:
|
29
|
+
- 0
|
30
|
+
version: "0"
|
31
|
+
type: :runtime
|
32
|
+
version_requirements: *id001
|
33
|
+
- !ruby/object:Gem::Dependency
|
34
|
+
name: lockfile
|
35
|
+
prerelease: false
|
36
|
+
requirement: &id002 !ruby/object:Gem::Requirement
|
37
|
+
requirements:
|
38
|
+
- - ">="
|
39
|
+
- !ruby/object:Gem::Version
|
40
|
+
segments:
|
41
|
+
- 0
|
42
|
+
version: "0"
|
43
|
+
type: :runtime
|
44
|
+
version_requirements: *id002
|
45
|
+
description:
|
46
|
+
email: boss@airbladesoftware.com
|
47
|
+
executables:
|
48
|
+
- mysql_s3_backup
|
49
|
+
extensions: []
|
50
|
+
|
51
|
+
extra_rdoc_files: []
|
52
|
+
|
53
|
+
files:
|
54
|
+
- bin/mysql_s3_backup
|
55
|
+
- config/sample.yml
|
56
|
+
- lib/mysql_s3_backup/backup.rb
|
57
|
+
- lib/mysql_s3_backup/bucket.rb
|
58
|
+
- lib/mysql_s3_backup/config.rb
|
59
|
+
- lib/mysql_s3_backup/hash_ext.rb
|
60
|
+
- lib/mysql_s3_backup/mysql.rb
|
61
|
+
- lib/mysql_s3_backup/shell.rb
|
62
|
+
- lib/mysql_s3_backup.rb
|
63
|
+
- mysql_s3_backup.gemspec
|
64
|
+
- Rakefile
|
65
|
+
- README.md
|
66
|
+
- spec/backup_spec.rb
|
67
|
+
- spec/drop.sql
|
68
|
+
- spec/insert.sql
|
69
|
+
- spec/load.sql
|
70
|
+
- spec/spec_helper.rb
|
71
|
+
has_rdoc: true
|
72
|
+
homepage: http://github.com/airblade/mysql_s3_backup
|
73
|
+
licenses: []
|
74
|
+
|
75
|
+
post_install_message:
|
76
|
+
rdoc_options: []
|
77
|
+
|
78
|
+
require_paths:
|
79
|
+
- lib
|
80
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
81
|
+
requirements:
|
82
|
+
- - ">="
|
83
|
+
- !ruby/object:Gem::Version
|
84
|
+
segments:
|
85
|
+
- 0
|
86
|
+
version: "0"
|
87
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
88
|
+
requirements:
|
89
|
+
- - ">="
|
90
|
+
- !ruby/object:Gem::Version
|
91
|
+
segments:
|
92
|
+
- 0
|
93
|
+
version: "0"
|
94
|
+
requirements: []
|
95
|
+
|
96
|
+
rubyforge_project:
|
97
|
+
rubygems_version: 1.3.6
|
98
|
+
signing_key:
|
99
|
+
specification_version: 3
|
100
|
+
summary: A simple backup script for Mysql and S3 with incremental backups.
|
101
|
+
test_files:
|
102
|
+
- spec/backup_spec.rb
|
103
|
+
- spec/drop.sql
|
104
|
+
- spec/insert.sql
|
105
|
+
- spec/load.sql
|
106
|
+
- spec/spec_helper.rb
|