rds-s3-backup 0.0.6 → 0.0.8

Sign up to get free protection for your applications and to get access to all the features.
data/.rspec CHANGED
@@ -1,2 +1,2 @@
1
1
  --color
2
- --format progress
2
+ --format documentation
data/Rakefile CHANGED
@@ -1 +1,9 @@
1
1
  require "bundler/gem_tasks"
2
+
3
+ require "rspec/core/rake_task"
4
+ RSpec::Core::RakeTask.new do |t|
5
+ # spec options go in .rspec in root
6
+ end
7
+
8
+
9
+ task :default => [:spec]
@@ -0,0 +1 @@
1
+ /#rds-s3-backup.rb#
@@ -14,18 +14,18 @@ module Rds
14
14
 
15
15
  options = thor_defaults
16
16
 
17
- if thor_options[:config_file] && File.exists?(thor_options[:config_file])
17
+ if thor_options["config_file"] && File.exists?(thor_options["config_file"])
18
18
  begin
19
- options = options.merge(YAML.load(File.read(thor_options[:config_file])))
19
+ options = options.merge(YAML.load(File.read(thor_options["config_file"])))
20
20
  rescue Exception => e
21
- raise "Unable to read and parse #{thor_options[:config_file]}: #{e.class}: #{e}"
21
+ raise "Unable to read and parse #{thor_options["config_file"]}: #{e.class}: #{e}"
22
22
  end
23
23
  end
24
24
 
25
25
  options.merge!(thor_options)
26
26
 
27
27
  # Check for required options
28
- missing_options = %w{rds_instance_id s3_bucket aws_access_key_id aws_secret_access_key mysql_database mysql_username mysql_password}.reduce([]) {|a, o| a << o unless options.has_key?(o); a}
28
+ missing_options = %w{rds_instance_id s3_bucket aws_access_key_id aws_secret_access_key mysql_database mysql_username mysql_password}.select {|o| o unless options.has_key?(o)}
29
29
 
30
30
  raise "Missing required options #{missing_options.inspect} in either configuration or command line" if missing_options.count > 0
31
31
 
@@ -36,8 +36,10 @@ module Rds
36
36
 
37
37
  def run(thor_options,thor_defaults)
38
38
 
39
- @options = process_options(thor_options,thor_defaults)
40
39
  $logger = Logger.new(STDOUT)
40
+ @options = process_options(thor_options,thor_defaults)
41
+ $logger.debug "Running with Options: #{@options.to_yaml}"
42
+
41
43
  $logger.level = set_logger_level(@options["log_level"])
42
44
 
43
45
  $dogger = DataDog.new(@options['data_dog_api_key'])
@@ -50,24 +52,31 @@ module Rds
50
52
 
51
53
  $logger.debug "#{File.basename(__FILE__)}:#{__LINE__}: Running with options:"
52
54
  debug_opts = @options.dup
53
- debug_opts['aws_access_key_id'] = 'X'*10
54
- debug_opts['aws_secret_access_key'] = 'Y'*15
55
- debug_opts['mysql_password'] = "ZZY"*5
56
- debug_opts['data_dog_api_key'] = 'XYZZY'*3
57
- $logger.debug debug_opts.to_yaml
55
+
56
+ $logger.debug @options.merge(
57
+ 'aws_access_key_id' => 'X'*10,
58
+ 'aws_secret_access_key' => 'Y'*15,
59
+ 'mysql_password' => "ZZY"*5,
60
+ 'data_dog_api_key' => 'XYZZY'*3
61
+ ).to_yaml
58
62
 
59
63
  begin
60
64
 
61
65
  $logger.info "Creating RDS and S3 Connections"
62
66
  rds = MyRDS.new(@options)
63
- s3 = MyS3.new(@options)
67
+ $logger.debug "rds: #{rds.to_yaml}"
64
68
 
65
- $logger.info "Restoring Database"
69
+ $logger.info "Restoring Database"
66
70
  rds.restore_db()
67
71
 
68
72
  $logger.info "Dumping and saving original database contents"
69
- real_data_file = "#{rds.server.id}-mysqldump-#{@options['timestamp']}.sql.gz"
70
- s3.save_production(rds.dump(real_data_file))
73
+ real_data_file = "#{rds.server.id}-mysqldump-#{@options['timestamp']}.sql.gz".
74
+ tap{|t| $logger.debug "real_data_file: #{t}"}
75
+
76
+ real_dump_file = rds.dump(real_data_file).tap{|t| $logger.debug "rds.dump returns: #{t}"}
77
+ s3 = MyS3.new(@options)
78
+ $logger.debug "s3: #{s3.to_yaml}"
79
+ s3.save_production(real_dump_file)
71
80
 
72
81
  if @options['dump_ttl'] > 0
73
82
  $logger.info "Pruning old dumps"
@@ -20,23 +20,26 @@ module Rds::S3::Backup
20
20
  class MyRDSException < RuntimeError ; end
21
21
  class MyRDS
22
22
 
23
- attr_accessor :rds, :server
24
-
25
23
  def initialize(opts)
26
24
  @opts = opts
25
+ end
27
26
 
28
- @rds = get_rds_connection(:aws_access_key_id => @opts['aws_access_key_id'],
29
- :aws_secret_access_key => @opts['aws_secret_access_key'],
30
- :region => @opts['aws_region'])
31
-
32
- @server = get_rds_server(@opts['rds_instance_id'])
27
+ def rds
28
+ # Memoize @rds
29
+ @rds ||= get_rds_connection()
30
+ end
33
31
 
32
+ def server
33
+ # Memoize @server
34
+ @server ||= get_rds_server(@opts['rds_instance_id'])
34
35
  end
35
36
 
37
+
38
+ # Restore the production database from the most recent snapshot
36
39
  def restore_db
37
40
 
38
41
  begin
39
- @rds.restore_db_instance_from_db_snapshot(new_snap.id,
42
+ self.rds.restore_db_instance_from_db_snapshot(new_snap.id,
40
43
  backup_server_id,
41
44
  {"DBSubnetGroupName" => @opts['db_subnet_group_name'],
42
45
  "DBInstanceClass" => @opts['db_instance_type'] } )
@@ -47,6 +50,7 @@ module Rds::S3::Backup
47
50
  end
48
51
 
49
52
 
53
+ # Dump the database to the backup file name
50
54
  def dump(backup_file_name)
51
55
  @mysqlcmds ||= ::Rds::S3::Backup::MySqlCmds.new(backup_server.endpoint['Address'],
52
56
  @opts['mysql_username'],
@@ -59,15 +63,13 @@ module Rds::S3::Backup
59
63
  @mysqlcmds.dump(backup_file_path(backup_file_name)) # returns the dump file path
60
64
  end
61
65
 
66
+ # Convert personal data in the production data into random generic data
62
67
  def obfuscate
63
68
  @mysqlcmds ||= ::Rds::S3::Backup::MySqlCmds.new(backup_server.endpoint['Address'],
64
69
  @opts['mysql_username'],
65
70
  @opts['mysql_password'],
66
71
  @opts['mysql_database'])
67
72
 
68
-
69
-
70
-
71
73
  @mysqlcmds.exec(@opts['obfuscate_sql'])
72
74
  end
73
75
 
@@ -76,7 +78,7 @@ module Rds::S3::Backup
76
78
  end
77
79
 
78
80
  def backup_server_id
79
- @backup_server_id ||= "#{@server.id}-s3-dump-server-#{@opts['timestamp']}"
81
+ @backup_server_id ||= "#{self.server.id}-s3-dump-server-#{@opts['timestamp']}"
80
82
  end
81
83
 
82
84
  def new_snap
@@ -103,11 +105,11 @@ module Rds::S3::Backup
103
105
  @snap_name ||= "s3-dump-snap-#{@opts['timestamp']}"
104
106
  end
105
107
 
106
- def get_rds_connection(opts={})
108
+ def get_rds_connection()
107
109
  options = {
108
110
  :aws_access_key_id => @opts['aws_access_key_id'],
109
111
  :aws_secret_access_key => @opts['aws_secret_access_key'],
110
- :region => @opts['aws_region']}.merge(opts)
112
+ :region => @opts['aws_region']}
111
113
  Fog.timeout=@opts['fog_timeout']
112
114
  begin
113
115
  connection = Fog::AWS::RDS.new(options)
@@ -116,14 +118,15 @@ module Rds::S3::Backup
116
118
  end
117
119
 
118
120
  raise MyRDSException.new("Unable to make RDS connection") if connection.nil?
119
- connection
120
121
 
122
+ connection
121
123
 
122
124
  end
123
125
 
124
126
  def get_rds_server(id)
125
- begin
126
- server = @rds.servers.get(id)
127
+
128
+ begin
129
+ server = self.rds.servers.get(id)
127
130
  rescue Exception => e
128
131
  raise MyRDSException.new("Error getting server in #{self.class}#get_rds_server: #{e.class}: #{e}")
129
132
  end
@@ -18,12 +18,79 @@ module Rds::S3::Backup
18
18
 
19
19
  def initialize(options)
20
20
  @options = options
21
+ end
22
+
23
+ # Lazy loaders
24
+ def s3
25
+ @s3 ||= get_storage(:aws_access_key_id => @options['aws_access_key_id'],
26
+ :aws_secret_access_key => @options['aws_secret_access_key'],
27
+ :region => @options['aws_s3_region'] ||= @options['aws_region'])
28
+ end
21
29
 
22
- @s3 = get_storage(:aws_access_key_id => @options['aws_access_key_id'],
23
- :aws_secret_access_key => @options['aws_secret_access_key'],
24
- :region => @options['aws_s3_region'] ||= @options['aws_region'])
30
+ def s3_bucket
31
+ @s3_bucket ||= get_bucket(@options['s3_bucket'])
25
32
  end
26
33
 
34
+ def backup_bucket
35
+ @backup_bucket ||= get_bucket(@options['backup_bucket'])
36
+ end
37
+
38
+
39
+
40
+ # Save the Real version of the Production Database
41
+ #
42
+ # == Input
43
+ #
44
+ # :file_path:: the path where the production database resides to save up to S3
45
+ #
46
+ def save_production(file_path)
47
+ save(s3_bucket, file_path, :acl => 'private')
48
+ end
49
+
50
+
51
+ # Save the Cleaned (Obfuscated) Version of the Database
52
+ #
53
+ # == Input
54
+ #
55
+ # :file_path:: the path where the file resides to save up to S3
56
+ #
57
+ def save_clean(file_path)
58
+ save(backup_bucket, file_path, :acl => 'authenticated-read')
59
+ end
60
+
61
+
62
+ # Remove older files from S3
63
+ #
64
+ # == Input
65
+ #
66
+ # o -- an options hash, expecting the following keys:
67
+ #
68
+ # :prefix:: the prefix to use with the s3 bucket
69
+ # :keep:: the number of files to keep. Must keep at least one.
70
+ def prune_files(o={})
71
+ options = {
72
+ :prefix => '',
73
+ :keep => 1
74
+ }.merge(o)
75
+
76
+ raise MyS3Exception "Must keep at least one file. options[:keep] = #{options[:keep]}" if options[:keep] < 1 # must keep at least one, the last one!
77
+
78
+ my_files = s3_bucket.files.all('prefix' => options[:prefix])
79
+ return if my_files.nil?
80
+
81
+ if my_files.count > options[:keep]
82
+ my_files.
83
+ sort {|x,y| x.last_modified <=> y.last_modified}.
84
+ take(files_by_date.count - options[:keep]).
85
+ each do |f|
86
+ $logger.info "Deleting #{f.name}"
87
+ f.destroy
88
+ end
89
+ end
90
+ end
91
+
92
+
93
+ # Make a connection to AWS S3 using Fog::Storage
27
94
  def get_storage(o={})
28
95
  options = {
29
96
  :aws_access_key_id => nil,
@@ -38,33 +105,30 @@ module Rds::S3::Backup
38
105
  raise MyS3Exception.new "Error establishing storage connection: #{e.class}: #{e}"
39
106
  end
40
107
 
108
+ $logger.debug "What is storage? #{storage.class}:#{storage.inspect}"
41
109
  raise MyS3Exception.new "In #{self.class}#get_storage: storage is nil!" if storage.nil?
42
110
 
111
+ storage
43
112
  end
44
113
 
114
+
115
+ # Retrieve a pointer to an AWS S3 Bucket
45
116
  def get_bucket(bucket)
46
117
 
47
118
  begin
48
- bucket = @s3.directories.get(bucket)
119
+ bucket = self.s3.directories.get(bucket)
49
120
  rescue Exception => e
50
121
  raise MyS3Exception.new "Error getting bucket #{bucket} in S3: #{e.class}: #{e}"
51
122
  end
52
123
 
53
124
  raise MyS3Exception.new "In #{self.class}#get_bucket: bucket is nil!" if bucket.nil?
54
125
 
126
+ bucket
55
127
  end
56
128
 
57
- def save_production(file_path)
58
- save(s3_bucket, file_path, :acl => 'private')
59
- end
60
-
61
- def save_clean(file_path)
62
- save(backup_bucket, file_path, :acl => 'authenticated-read')
63
- end
64
-
65
-
66
-
129
+ # Perform the actual save from a local file_path to AWS S3
67
130
  def save(bucket, file_path, o={})
131
+ raise MyS3Exception.new "bucket is nil!!" if bucket.nil?
68
132
  options = {
69
133
  :key => File.join(@options['s3_prefix'], File.basename(file_path)),
70
134
  :body => File.open(file_path),
@@ -79,8 +143,9 @@ module Rds::S3::Backup
79
143
 
80
144
  rescue Exception => e
81
145
  if tries < 3
82
- @logger.info "Retrying S3 upload after #{tries} tries"
146
+ $logger.info "Retrying S3 upload after #{tries} tries"
83
147
  tries += 1
148
+ sleep tries * 60 # progressive back off
84
149
  retry
85
150
  else
86
151
  raise MyS3Exception.new "Could not save #{File.basename(file_path)} to S3 after 3 tries: #{e.class}: #{e}"
@@ -89,40 +154,8 @@ module Rds::S3::Backup
89
154
 
90
155
  end
91
156
 
92
-
93
- def s3_bucket
94
- @s3_bucket ||= get_bucket(@options['s3_bucket'])
95
- end
96
-
97
- def backup_bucket
98
- @backup_bucket ||= get_bucket(@options['backup_bucket'])
99
- end
100
-
101
-
102
- def prune_files(o={})
103
- options = {
104
- :prefix => '',
105
- :keep => 1
106
- }.merge(o)
107
-
108
- return if options[:keep] < 1 # must keep at least one, the last one!
109
-
110
- my_files = s3_bucket.files.all('prefix' => options[:prefix])
111
- return if my_files.nil?
112
-
113
- if my_files.count > options[:keep]
114
- my_files.
115
- sort {|x,y| x.last_modified <=> y.last_modified}.
116
- take(files_by_date.count - options[:keep]).
117
- each do |f|
118
- logger.info "Deleting #{f.name}"
119
- f.destroy
120
- end
121
- end
122
- end
123
-
157
+ # nothing really to do here...
124
158
  def destroy
125
- # nothing really to do here...
126
159
  end
127
160
 
128
161
 
@@ -22,7 +22,7 @@ module Rds
22
22
  # 2013-03-04 tamara
23
23
 
24
24
 
25
- VERSION = "0.0.6"
25
+ VERSION = "0.0.8"
26
26
  end
27
27
  end
28
28
  end
@@ -0,0 +1,45 @@
1
+ require 'spec_helper'
2
+ require 'dogapi'
3
+ require 'rds-s3-backup'
4
+
5
+ module Rds::S3::Backup
6
+
7
+ describe "DataDog" do
8
+ it { DataDog.should respond_to(:new) }
9
+
10
+ describe "#make_dog_client" do
11
+
12
+ it "should return the client object" do
13
+ Dogapi::Client.stub(:new).with("apikey").and_return("apikey")
14
+ DataDog.new("apikey").make_dog_client.should == "apikey"
15
+ end
16
+
17
+ it "should raise an exception" do
18
+ Dogapi::Client.stub(:new) { raise RuntimeError }
19
+ expect { DataDog.new("apikey").make_dog_client }.to raise_error(RuntimeError)
20
+ end
21
+
22
+ end
23
+
24
+ describe "#send" do
25
+ it "should do something interesting" do
26
+ Dogapi::Client.stub_chain(:new, :emit_event) {"event"}
27
+ Dogapi::Event.stub(:new)
28
+ DataDog.new("apikey").send("message").should == "event"
29
+ end
30
+
31
+ it "should raise and exception" do
32
+ Dogapi::Client.stub_chain(:new, :emit_event) { raise RuntimeError }
33
+ expect { DataDog.new("apikey").send("message")}.to raise_error(RuntimeError)
34
+ end
35
+
36
+
37
+
38
+ end
39
+
40
+
41
+
42
+
43
+ end
44
+
45
+ end
@@ -0,0 +1,55 @@
1
+ require 'spec_helper'
2
+ require 'rds-s3-backup'
3
+ require 'fog'
4
+
5
+ module Rds::S3::Backup
6
+
7
+ describe "MyRDS" do
8
+
9
+ let(:myrds_options) {{'aws_access_key_id' => 'abcdef',
10
+ 'aws_secret_access_key' => 'xyzzy',
11
+ 'region' => 'us-west',
12
+ 'rds_instance_id' => 'honeybadger',
13
+ 'db_subnet_group_name' => 'test',
14
+ 'db_instance_type' => 'big',
15
+ 'mysql_username' => 'JoeBob',
16
+ 'mysql_password' => 'BillyBriggs',
17
+ 'mysql_database' => 'test',
18
+ 'dump_directory' => '.',
19
+ 'obfuscate_sql' => 'obfuscate_database.sql',
20
+ 'timestamp' => Time.new.strftime('%Y-%m-%d-%H-%M-%S-%Z')
21
+ }}
22
+
23
+ let(:myrds) {MyRDS.new(myrds_options)}
24
+
25
+ before(:each) do
26
+ Fog.stub(:timeout=)
27
+ Fog::AWS::RDS.stub(:new) { Object.new }
28
+ end
29
+
30
+ it ":get_rds_connection should return an exception" do
31
+ Fog::AWS::RDS.stub(:new) { raise RuntimeError }
32
+ expect { myrds.get_rds_connection }.to raise_error(RuntimeError)
33
+ end
34
+
35
+ xit "execute :dump" do
36
+ Fog::AWS::RDS.stub_chain(:new, :servers)
37
+ Rds::S3::Backup::MySqlCmds.stub_chain(:new, :dump) {"dumpfile.sql.gz"}
38
+ myrds.dump("dumpfile.sql.gz").should == "dumpfile.sql.gz"
39
+ end
40
+
41
+ xit "execute :obfuscate" do
42
+ Rds::S3::Backup::MySqlCmds.stub_chain(:new, :exec)
43
+ myrds.exec(myrds_options['obfuscate_sql']).should == 0
44
+ end
45
+
46
+ xit "executing :obfuscate returns an error" do
47
+ Rds::S3::Backup::MySqlCmds.stub_chain(:new, :exec) {raise RuntimeError}
48
+ expect {myrds.exec(myrds_options['obfuscate_sql'])}.to raise_error(RuntimeError)
49
+ end
50
+
51
+
52
+ end
53
+
54
+
55
+ end
@@ -17,10 +17,6 @@ require 'rds-s3-backup'
17
17
 
18
18
  module Rds::S3::Backup
19
19
 
20
- describe "has version" do
21
- it {Rds::S3::Backup::VERSION.should == '0.0.1' }
22
- end
23
-
24
20
  describe "module methods" do
25
21
 
26
22
  it { Rds::S3::Backup.should respond_to(:run) }
@@ -31,54 +27,55 @@ module Rds::S3::Backup
31
27
 
32
28
  describe "#run method" do
33
29
  let(:options) { {
34
- :rds_instance_id => 'stagingdb',
35
- :s3_bucket => 'novu-backups',
36
- :backup_bucket => 'novu-backups',
37
- :s3_prefix => 'db_dumps',
38
- :aws_access_key_id => 'ABCDE',
39
- :aws_secret_access_key => '0987654321',
40
- :mysql_database => 'novu_text',
41
- :mysql_username => 'novurun',
42
- :mysql_password => 'passw0rd',
43
- :obfuscate_sql => '/usr/local/etc/obfuscate.sql',
44
- :dump_ttl => 0,
45
- :dump_directory => '/mnt/secure',
46
- :aws_region => 'us-east-1',
47
- :aws_s3_region => 'us-west-2',
48
- :db_subnet_group_name => 'staging db subnet',
49
- :db_instance_type => 'db.m1.small',
50
- :instance_id => '',
51
- :log_level => 0,
52
- :quiet => false,
53
- :verbose => true
30
+ "rds_instance_id" => 'stagingdb',
31
+ "s3_bucket" => 'novu-backups',
32
+ "backup_bucket" => 'novu-backups',
33
+ "s3_prefix" => 'db_dumps',
34
+ "aws_access_key_id" => 'ABCDE',
35
+ "aws_secret_access_key" => '0987654321',
36
+ "mysql_database" => 'novu_text',
37
+ "mysql_username" => 'novurun',
38
+ "mysql_password" => 'passw0rd',
39
+ "obfuscate_sql" => '/usr/local/etc/obfuscate.sql',
40
+ "dump_ttl" => 0,
41
+ "dump_directory" => '/mnt/secure',
42
+ "aws_region" => 'us-east-1',
43
+ "aws_s3_region" => 'us-west-2',
44
+ "db_subnet_group_name" => 'staging db subnet',
45
+ "db_instance_type" => 'db.m1.small',
46
+ "instance_id" => '',
47
+ "log_level" => 'debug',
48
+ "quiet" => false,
49
+ "verbose" => true
54
50
  } }
55
51
 
56
52
 
57
53
  before(:each) do
58
- DataDog.stub(:new).and_return(OpenStruct.new(:send => true))
59
-
60
- MyRDS.stub(:new).and_return(OpenStruct.new(:restore_db => true,
61
- :dump => Proc.new {|file| file },
62
- :obfuscate => true,
63
- :destroy => true,
64
- :server =>
65
- OpenStruct.new(:id => 1)
66
- ))
67
- MyS3.stub(:new).and_return(OpenStruct.new(:save_production => true,
68
- :save_clean => true,
69
- :prune_files => true,
70
- :destroy => true))
54
+ DataDog.stub_chain(:new, :send)
55
+
56
+
57
+ MyRDS.stub_chain(:new, :restore_db)
58
+ MyRDS.stub_chain(:new, :destroy)
59
+ MyRDS.stub_chain(:new, :server, :id).and_return 1
60
+ MyRDS.stub_chain(:new, :obfuscate)
61
+ MyRDS.stub_chain(:new, :dump).and_return "dump_path"
62
+
63
+ MyS3.stub_chain(:new, :save_production)
64
+ MyS3.stub_chain(:new, :save_clean)
65
+ MyS3.stub_chain(:new, :prune_files)
66
+ MyS3.stub_chain(:new, :destroy)
67
+
71
68
  end
72
69
 
73
70
  it "should process options" do
74
- opts = Rds::S3::Backup.process_options(options)
71
+ opts = Rds::S3::Backup.process_options(options,{})
75
72
  opts.should be_a(Hash)
76
- opts.should have_key(:timestamp)
77
- opts[:timestamp].should_not be_nil
73
+ opts.should have_key("timestamp")
74
+ opts["timestamp"].should_not be_nil
78
75
  end
79
76
 
80
77
 
81
- it { Rds::S3::Backup.run(options).should == 0 }
78
+ it { Rds::S3::Backup.run(options,{}).should == 0 }
82
79
 
83
80
  end
84
81
 
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: rds-s3-backup
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.0.6
4
+ version: 0.0.8
5
5
  prerelease:
6
6
  platform: ruby
7
7
  authors:
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2013-03-07 00:00:00.000000000 Z
12
+ date: 2013-04-26 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: thor
@@ -160,6 +160,7 @@ description: ! '"Thor script and libraries to backup an AWS RDS snapshot to AWS
160
160
  email:
161
161
  - tamouse@gmail.com
162
162
  executables:
163
+ - .gitignore
163
164
  - rds-s3-backup.rb
164
165
  extensions: []
165
166
  extra_rdoc_files: []
@@ -171,6 +172,7 @@ files:
171
172
  - LICENSE.txt
172
173
  - README.md
173
174
  - Rakefile
175
+ - bin/.gitignore
174
176
  - bin/rds-s3-backup.rb
175
177
  - lib/rds-s3-backup.rb
176
178
  - lib/rds-s3-backup/datadog.rb
@@ -179,6 +181,8 @@ files:
179
181
  - lib/rds-s3-backup/mysqlcmds.rb
180
182
  - lib/rds-s3-backup/version.rb
181
183
  - rds-s3-backup.gemspec
184
+ - spec/rds-s3-backup/datadog_spec.rb
185
+ - spec/rds-s3-backup/myrds_spec.rb
182
186
  - spec/rds-s3-backup_spec.rb
183
187
  - spec/spec_helper.rb
184
188
  - test_data/.gitignore
@@ -209,6 +213,8 @@ signing_key:
209
213
  specification_version: 3
210
214
  summary: ! '"Backup from AWS RDS snapshot to AWS S3 as mysqldump"'
211
215
  test_files:
216
+ - spec/rds-s3-backup/datadog_spec.rb
217
+ - spec/rds-s3-backup/myrds_spec.rb
212
218
  - spec/rds-s3-backup_spec.rb
213
219
  - spec/spec_helper.rb
214
220
  has_rdoc: