new_backup 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,193 @@
1
+ =begin
2
+
3
+ = main.rb
4
+
5
+ *Copyright*:: (C) 2013 by Novu, LLC
6
+ *Author(s)*:: Tamara Temple <tamara.temple@novu.com>
7
+ *Since*:: 2013-05-01
8
+ *License*:: MIT
9
+ *Version*:: 0.0.1
10
+
11
+ == Description
12
+
13
+ Main class/routine for new_backup.
14
+
15
+ =end
16
+
17
+ require 'methadone'
18
+ require 'new_backup/myrds'
19
+ require 'new_backup/mys3'
20
+ require 'new_backup/mysqlcmds'
21
+ require 'new_backup/datadog'
22
+
23
+ module NewBackup
24
+
25
+ class Main
26
+
27
+ REQUIRED_OPTIONS = %w{rds_instance_id s3_bucket aws_access_key_id aws_secret_access_key mysql_database mysql_username mysql_password timestamp}
28
+ DEFAULT_OPTIONS = {
29
+ 'fog_timeout' => 1800,
30
+ 'dump_directory' => '/tmp',
31
+ 'dump_ttl' => 0,
32
+ 'aws_region' => 'us-east-1',
33
+ 'db_instance_type' => 'db.m1.small',
34
+ 'timestamp_format' => '%Y-%m-%d-%H-%M-%S-%Z'
35
+ }
36
+
37
+ include Methadone::CLILogging
38
+ include NewBackup::DataDog
39
+
40
+ attr_accessor :options
41
+
42
+ # Initialize the Main loop, processing options and putting them in the right form
43
+ def initialize(opts={})
44
+
45
+ debug "#{self.class}##{__method__}:#{__LINE__}: original opts: #{opts.to_yaml}"
46
+
47
+ if opts["config_file"] && File.exists?(opts["config_file"])
48
+ converged_opts = DEFAULT_OPTIONS.merge(YAML.load(File.read(opts["config_file"]))).merge(opts)
49
+ else
50
+ converged_opts = DEFAULT_OPTIONS.merge(opts)
51
+ end
52
+
53
+ converged_opts["timestamp"] = Time.now.strftime(converged_opts["timestamp_format"])
54
+
55
+ debug "#{self.class}##{__method__}:#{__LINE__}: converged_opts: #{converged_opts.to_yaml}"
56
+
57
+ missing_options = REQUIRED_OPTIONS.select {|o| o unless converged_opts.has_key?(o)}
58
+ raise "Missing required options #{missing_options.inspect} in either configuration or command line" if missing_options.count > 0
59
+
60
+ @options = {
61
+ :fog => {
62
+ :timeout => converged_opts["fog_timeout"]
63
+ },
64
+ :aws => {
65
+ :access_key => '[HIDDEN]',
66
+ :secret_key => '[HIDDEN]',
67
+ :region => converged_opts["aws_region"]
68
+ },
69
+ :rds => {
70
+ :instance_id => converged_opts["rds_instance_id"],
71
+ :subnet_group => converged_opts["db_subnet_group_name"],
72
+ :instance_type => converged_opts["db_instance_type"]
73
+ },
74
+ :s3 => {
75
+ :raw_bucket => converged_opts["s3_bucket"],
76
+ :clean_bucket => converged_opts["backup_bucket"],
77
+ :prefix => converged_opts["s3_prefix"],
78
+ :region => converged_opts["aws_s3_region"] ||= converged_opts["aws_region"],
79
+ :dump_ttl => converged_opts["dump_ttl"]
80
+ },
81
+ :mysql => {
82
+ :username => converged_opts["mysql_username"],
83
+ :password => '[HIDDEN]',
84
+ :database => converged_opts["mysql_database"],
85
+ :obfuscate_script => converged_opts["obfuscate_script"]
86
+ },
87
+ :datadog => {
88
+ :api_key => '[HIDDEN]'
89
+ },
90
+ :dump_directory => converged_opts["dump_directory"],
91
+ :timestamp => converged_opts["timestamp"],
92
+ :debug => converged_opts["debug"],
93
+ :nords => converged_opts["nords"],
94
+ :nos3 => converged_opts["nos3"]
95
+ }
96
+
97
+
98
+ debug "Options:\n#{@options.to_yaml}"
99
+
100
+ # Fill in the hidden values after showing the options
101
+ @options[:aws][:access_key] = converged_opts["aws_access_key_id"]
102
+ @options[:aws][:secret_key] = converged_opts["aws_secret_access_key"]
103
+ @options[:mysql][:password] = converged_opts["mysql_password"]
104
+ @options[:datadog][:api_key] = converged_opts["datadog_apikey"]
105
+
106
+ debug @options.to_yaml
107
+
108
+
109
+ end
110
+
111
+ def run
112
+ dogger "Beginning RDS-S3-Backup"
113
+ begin
114
+
115
+ raise "#{self.class}##{__method__}:#{__LINE__}: Dump directory #{@options[:dump_directory]} does not exist!" unless File.directory?(@options[:dump_directory])
116
+
117
+
118
+ raw_file = File.join(File.expand_path(@options[:dump_directory]),save_file_name)
119
+ debug "#{self.class}##{__method__}:#{__LINE__}: raw_file: #{raw_file}"
120
+ clean_file = File.join(File.expand_path(@options[:dump_directory]),clean_file_name)
121
+ debug "#{self.class}##{__method__}:#{__LINE__}: clean_file: #{clean_file}"
122
+
123
+ if (@options[:nords])
124
+ info "Not running RDS"
125
+ File.open(raw_file,'w') do |f|
126
+ f.puts "default content when not running RDS"
127
+ end
128
+ File.open(clean_file,'w') do |f|
129
+ f.puts "default content when not running RDS"
130
+ end
131
+ else
132
+
133
+ NewBackup::MyRds.new(@options).restore do |db|
134
+ info "Dump raw database"
135
+ db.dump(raw_file)
136
+ info "Obfuscate database"
137
+ db.obfuscate
138
+ info "Dump clean database"
139
+ db.dump(clean_file)
140
+ end
141
+ end
142
+
143
+ if (@options[:nos3])
144
+ info "Not running S3"
145
+ else
146
+
147
+ s3 = NewBackup::MyS3.new(@options)
148
+ s3.connect do |connection|
149
+ s3.connect_bucket(connection, @options[:s3][:raw_bucket]) do |bucket|
150
+ info "Save raw db dump"
151
+ s3.put_file bucket, raw_file
152
+ info "Prune excess backups"
153
+ s3.prune bucket, @options[:s3][:dump_ttl]
154
+ end
155
+
156
+ s3.connect_bucket(connection, @options[:s3][:clean_bucket]) do |bucket|
157
+ info "Save cleaned db dump"
158
+ s3.put_file bucket, clean_file
159
+ end
160
+ end
161
+
162
+ end
163
+
164
+ rescue Exception => e
165
+ dogger "Fatal error in #{self.class}#run: #{e.class}: #{e}" ,
166
+ :type => :error,
167
+ :body => "Backtrace:\n#{e.backtrace.join("\n")}"
168
+ debug e.backtrace.join("\n")
169
+ raise e
170
+
171
+
172
+ ensure
173
+ File.unlink(raw_file) if File.exists?(raw_file) && ! @options[:debug]
174
+ File.unlink(clean_file) if File.exists?(clean_file) && ! @options[:debug]
175
+ end
176
+
177
+ dogger "End RDS-S3-Backup"
178
+
179
+ end
180
+
181
+ private
182
+
183
+ def save_file_name
184
+ "#{@options[:rds][:instance_id]}-mysqldump-#{@options[:timestamp]}.sql.gz"
185
+ end
186
+
187
+ def clean_file_name
188
+ "clean-mysqldump.sql.gz"
189
+ end
190
+
191
+ end
192
+
193
+ end
@@ -0,0 +1,140 @@
1
+ =begin
2
+
3
+ = myrds.rb
4
+
5
+ *Copyright*:: (C) 2013 by Novu, LLC
6
+ *Author(s)*:: Tamara Temple <tamara.temple@novu.com>
7
+ *Since*:: 2013-05-01
8
+ *License*:: MIT
9
+ *Version*:: 0.0.1
10
+
11
+ == Description
12
+
13
+ Restore an RDS database snapshot
14
+
15
+ =end
16
+
17
+ require 'methadone'
18
+ require 'fog'
19
+
20
+ module NewBackup
21
+
22
+ class MyRds
23
+
24
+ include Methadone::CLILogging
25
+
26
+ # Initialize the class with options
27
+ def initialize(options={})
28
+ debug "Options: #{options}"
29
+ @options = options
30
+ end
31
+
32
+
33
+ # Maximum number of tries to wait for database servers to be ready
34
+ MAX_TRIES = 3
35
+
36
+ # Restore a snapshot of the target database yielding the database to the block
37
+ def restore(&block)
38
+ connect do |connection|
39
+ get_rds(connection) do |rds_server|
40
+ retrieve_snapshot(rds_server) do |snapshot|
41
+ restore_db(connection,snapshot) do |db|
42
+ yield db
43
+ end
44
+ end
45
+ end
46
+ end
47
+ end
48
+
49
+ # Establishes a connection to AWS RDS and yeilds a block on the connection
50
+ def connect(&block)
51
+ raise "no block given in #{self.class}#connect" unless block_given?
52
+ aws = @options[:aws]
53
+ debug "AWS Options: #{aws}"
54
+ fog_options = {
55
+ :aws_access_key_id => aws[:access_key],
56
+ :aws_secret_access_key => aws[:secret_key],
57
+ :region => aws[:rds_region]}
58
+
59
+ Fog.timeout = @options[:fog][:timeout]
60
+ yield Fog::AWS::RDS.new(fog_options)
61
+ end
62
+
63
+ # Get the RDS server
64
+ def get_rds(connection)
65
+ debug "rds instance_id: #{@options[:rds][:instance_id]}"
66
+ debug "Connection servers: #{connection.servers}"
67
+ rds_server = connection.servers.get(@options[:rds][:instance_id])
68
+ raise "No RDS server!" if rds_server.nil?
69
+ yield rds_server
70
+ end
71
+
72
+
73
+ # Retrieve a snapshot
74
+ def retrieve_snapshot(rds_server, &block)
75
+ begin
76
+ rds_server.snapshots.new(:id => snap_name).save
77
+ new_snapshot = rds_server.snapshots.get(snap_name)
78
+ 1.upto(MAX_TRIES) do |i|
79
+ debug "waiting for new snapshot, try ##{i}"
80
+ new_snapshot.wait_for { ready? }
81
+ end
82
+
83
+ yield new_snapshot
84
+ ensure
85
+ unless @options[:debug]
86
+ new_snapshot.destroy unless new_snapshot.nil?
87
+ end
88
+ end
89
+
90
+ end
91
+
92
+ # Restore the snapshot to a database
93
+ def restore_db(connection, snapshot, &block)
94
+ begin
95
+ connection.
96
+ restore_db_instance_from_db_snapshot(snapshot.id,
97
+ backup_server_id,
98
+ {"DBSubnetGroupName" => @options[:rds][:subnet_group],
99
+ "DBInstanceClass" => @options[:rds][:instance_type]}
100
+ )
101
+ backup_server = connection.servers.get(backup_server_id)
102
+ 1.upto(MAX_TRIES) do |i|
103
+ debug "waiting for backup server, try ##{i}"
104
+ backup_server.wait_for { ready? }
105
+ end
106
+
107
+
108
+ yield MySqlCmds.new(backup_server.endpoint['Address'],
109
+ @options[:mysql][:username],
110
+ @options[:mysql][:password],
111
+ @options[:mysql][:database],
112
+ @options[:mysql][:obfuscate_script])
113
+
114
+
115
+ ensure
116
+ unless @options[:debug]
117
+ backup_server.destroy unless backup_server.nil?
118
+ end
119
+ end
120
+
121
+ end
122
+
123
+
124
+
125
+ # Return the snapshot name
126
+ def snap_name
127
+ "s3-dump-snap-#{@options[:timestamp]}".tap{|t| debug "Snap Name: #{t}"}
128
+ end
129
+
130
+ # Return the backup server id
131
+ def backup_server_id
132
+ "#{@options[:rds][:instance_id]}-s3-dump-server-#{@options[:timestamp]}".tap{|t| debug "Backup Server ID: #{t}"}
133
+ end
134
+
135
+
136
+
137
+ end
138
+
139
+
140
+ end
@@ -0,0 +1,123 @@
1
+ =begin
2
+
3
+ = mys3.rb
4
+
5
+ *Copyright*:: (C) 2013 by Novu, LLC
6
+ *Author(s)*:: Tamara Temple <tamara.temple@novu.com>
7
+ *Since*:: 2013-05-01
8
+ *License*:: MIT
9
+ *Version*:: 0.0.1
10
+
11
+ == Description
12
+
13
+ Upload the raw file and the clean file up to AWS S3
14
+
15
+ =end
16
+
17
+ require 'methadone'
18
+ require 'debugger'
19
+
20
+ module NewBackup
21
+
22
+ class MyS3
23
+
24
+ include Methadone::CLILogging
25
+
26
+ # Maximum number of attempts to try to upload file
27
+ MAX_TRIES = 3
28
+
29
+ # Initialize the MyS3 object
30
+ #
31
+ # options:: pass in the program options
32
+ def initialize(options={})
33
+ @options = options
34
+ end
35
+
36
+ # Connect to S3, cache the connection, and yield self
37
+ def connect(&block)
38
+
39
+ options = {
40
+ :aws_access_key_id => @options[:aws][:access_key],
41
+ :aws_secret_access_key => @options[:aws][:secret_key],
42
+ :region => @options[:s3][:region],
43
+ :provider => 'AWS',
44
+ :scheme => 'https'}
45
+
46
+ connection = Fog::Storage.new(options)
47
+
48
+ yield connection
49
+
50
+ end
51
+
52
+ # Connect to specific S3 bucket
53
+ #
54
+ # == inputs
55
+ # *connection*:: S3 connection to use
56
+ # *bucket_name*:: name of the bucket to use in S3
57
+ # *&block*:: block passed to evaluate in context of bucket
58
+ # (If no block given simply return the bucket pointer.)
59
+ def connect_bucket(connection, bucket_name, &block)
60
+ debug "#{self.class}##{__method__}:#{__LINE__}: connection: #{connection.inspect}"
61
+ buckets = connection.directories
62
+ raise "No buckets!" if buckets.nil? || buckets.empty?
63
+ bucket = connection.directories.get(bucket_name)
64
+ raise "No bucket #{bucket_name}" if bucket.nil?
65
+
66
+ if block_given?
67
+ yield bucket
68
+ else
69
+ bucket
70
+ end
71
+
72
+ end
73
+
74
+
75
+
76
+
77
+
78
+ # Do the heavy lifing to put the file in the appropriate bucket
79
+ #
80
+ # bucket:: directory where to put the file
81
+ # fn:: name of file to upload (gzipped)
82
+ def put_file(bucket,fn)
83
+ s3_fn = File.join(@options[:s3][:prefix], File.basename(fn))
84
+ options = {
85
+ :key => s3_fn,
86
+ :body => File.open(fn,'r'),
87
+ :acl => 'authenticated-read',
88
+ :encryption => 'AES256',
89
+ :content_type => 'application/x-gzip'}
90
+
91
+ bucket.files.create(options)
92
+
93
+ # Verify that file was uploaded
94
+ files = bucket.files.all.select {|f| f.key == s3_fn }
95
+ raise "#{fn} was not uploaded to #{s3_fn}!" unless files.count > 0
96
+ "s3://#{bucket.key}/#{s3_fn}"
97
+
98
+ end
99
+
100
+ # Prune extra files
101
+ def prune(bucket, keep=0)
102
+
103
+ if keep > 0
104
+ files = bucket.files.all('prefix' => @options[:s3][:prefix])
105
+
106
+ return if files.nil? || files.empty?
107
+
108
+ if files.count > keep
109
+ files.sort {|x,y| x.last_modified <=> y.last_modified}.
110
+ take(files.count - keep).
111
+ map(&:destroy)
112
+ end
113
+
114
+ end
115
+
116
+ files = bucket.files.all('prefix' => @options[:s3][:prefix])
117
+ files.count
118
+
119
+ end
120
+
121
+ end
122
+
123
+ end