ey_cloud_server 1.4.5 → 1.4.26

Sign up to get free protection for your applications and to get access to all the features.
Files changed (42) hide show
  1. data/bin/binary_log_purge +10 -2
  2. data/bin/ey-snapshots +7 -1
  3. data/bin/eybackup +13 -1
  4. data/lib/ey-flex.rb +18 -7
  5. data/lib/ey-flex/big-brother.rb +2 -2
  6. data/lib/ey-flex/bucket_minder.rb +46 -160
  7. data/lib/ey-flex/ec2.rb +17 -0
  8. data/lib/ey-flex/snapshot_minder.rb +93 -171
  9. data/lib/ey_backup.rb +84 -48
  10. data/lib/ey_backup/backend.rb +34 -0
  11. data/lib/ey_backup/backup_set.rb +70 -63
  12. data/lib/ey_backup/base.rb +0 -5
  13. data/lib/ey_backup/cli.rb +26 -6
  14. data/lib/ey_backup/database.rb +48 -0
  15. data/lib/ey_backup/dumper.rb +15 -31
  16. data/lib/ey_backup/engine.rb +7 -17
  17. data/lib/ey_backup/engines/mysql_engine.rb +24 -16
  18. data/lib/ey_backup/engines/postgresql_engine.rb +26 -20
  19. data/lib/ey_backup/loader.rb +13 -33
  20. data/lib/ey_backup/processors/gpg_encryptor.rb +3 -20
  21. data/lib/ey_backup/processors/gzipper.rb +0 -29
  22. data/lib/ey_backup/processors/splitter.rb +22 -34
  23. data/lib/ey_backup/spawner.rb +7 -13
  24. data/lib/ey_cloud_server.rb +1 -1
  25. data/lib/{ey-flex → ey_cloud_server}/version.rb +1 -1
  26. data/spec/big-brother_spec.rb +12 -0
  27. data/spec/bucket_minder_spec.rb +113 -0
  28. data/spec/config-example.yml +11 -0
  29. data/spec/ey_api_spec.rb +63 -0
  30. data/spec/ey_backup/backend_spec.rb +12 -0
  31. data/spec/ey_backup/backup_spec.rb +54 -0
  32. data/spec/ey_backup/cli_spec.rb +35 -0
  33. data/spec/ey_backup/mysql_backups_spec.rb +208 -0
  34. data/spec/ey_backup/postgres_backups_spec.rb +106 -0
  35. data/spec/ey_backup/spec_helper.rb +5 -0
  36. data/spec/fakefs_hax.rb +50 -0
  37. data/spec/gpg.public +0 -0
  38. data/spec/gpg.sekrit +0 -0
  39. data/spec/helpers.rb +270 -0
  40. data/spec/snapshot_minder_spec.rb +68 -0
  41. data/spec/spec_helper.rb +31 -0
  42. metadata +286 -53
data/bin/binary_log_purge CHANGED
@@ -100,14 +100,22 @@ def run_query(host, user, password, query)
100
100
  if host == '127.0.0.1'
101
101
  options = options + ' -P13306'
102
102
  end
103
- stdin, stdout, stderr = Open3.popen3("mysql -u#{user} -p#{password} #{options} -h#{host} -N -e\"#{query}\"")
103
+ if query == 'show processlist'
104
+ stdin, stdout, stderr = Open3.popen3("mysql -u#{user} -p#{password} #{options} -h#{host} -N -e\"#{query}\"|grep 'Binlog'")
105
+ else
106
+ stdin, stdout, stderr = Open3.popen3("mysql -u#{user} -p#{password} #{options} -h#{host} -N -e\"#{query}\"")
107
+ end
104
108
  query_error = stderr.read
105
109
  if query_error.length > 0
106
110
  log_error "Error caught: #{query_error}"
107
111
  test_add_privilege(user, password, query_error)
108
112
  exit 0
109
113
  end
110
- rs = stdout.read
114
+ result = stdout.read
115
+ stdin.close
116
+ stdout.close
117
+ stderr.close
118
+ result
111
119
  end
112
120
 
113
121
  # function to test for user privilege
data/bin/ey-snapshots CHANGED
@@ -1,9 +1,15 @@
1
1
  #!/usr/bin/env ruby
2
+
3
+ # Disable verbose (when running from cron) to squash warnings output by rest_client (1.6.1)
4
+ unless STDOUT.isatty
5
+ $VERBOSE = nil
6
+ end
7
+
2
8
  require File.dirname(__FILE__) + '/../lib/ey-flex'
3
9
 
4
10
  begin
5
11
  EY::SnapshotMinder.run(ARGV)
6
12
  rescue => e
7
- EY.notify_error(e)
13
+ EY.notify_snapshot_error(e)
8
14
  raise
9
15
  end
data/bin/eybackup CHANGED
@@ -1,6 +1,18 @@
1
1
  #!/usr/bin/env ruby
2
2
 
3
+ # Disable verbose (when running from cron) to squash warnings output by rest_client (1.6.1) and also fog (0.4.0)
4
+ unless STDOUT.isatty
5
+ $VERBOSE = nil
6
+ end
7
+
3
8
  require 'rubygems'
4
9
  require File.dirname(__FILE__) + '/../lib/ey_backup'
10
+ require File.dirname(__FILE__) + '/../lib/ey-flex'
11
+
12
+ begin
13
+ EY::Backup.run(ARGV)
14
+ rescue => e
15
+ EY.notify_backup_error(e)
16
+ raise
17
+ end
5
18
 
6
- EY::Backup.run(ARGV)
data/lib/ey-flex.rb CHANGED
@@ -14,19 +14,29 @@ require 'stringio'
14
14
  require 'yaml'
15
15
  require "optparse"
16
16
  require 'open4'
17
+ require "ey_enzyme"
17
18
 
18
19
  lib_dir = File.expand_path(__FILE__ + '/../ey-flex')
19
20
 
20
21
  module EY
21
- def self.notify_error(error)
22
- enzyme_api.notify_error("user", error)
22
+ def self.notify_snapshot_error(error)
23
+ enzyme_api.notify_error("snapshot", error)
24
+ end
25
+
26
+ def self.notify_backup_error(error)
27
+ enzyme_api.notify_error("backup", error)
23
28
  end
24
29
 
25
30
  def self.enzyme_api
26
- return @enzyme_api if @enzyme_api
27
- require 'ey_enzyme'
28
- config = YAML.load_file("/etc/engineyard/dracul.yml")
29
- @enzyme_api = EY::Enzyme::API.new(config[:api], config[:instance_id], config[:token])
31
+ @enzyme_api ||= EY::Enzyme::API.new(
32
+ enzyme_config[:api],
33
+ enzyme_config[:instance_id],
34
+ enzyme_config[:token]
35
+ )
36
+ end
37
+
38
+ def self.enzyme_config
39
+ @enzyme_config ||= YAML.load_file("/etc/engineyard/dracul.yml")
30
40
  end
31
41
 
32
42
  module Flex
@@ -39,4 +49,5 @@ require lib_dir + '/big-brother'
39
49
  require lib_dir + '/bucket_minder'
40
50
  require lib_dir + '/ey-api'
41
51
  require lib_dir + '/snapshot_minder'
42
- require lib_dir + '/version'
52
+ require lib_dir + '/ec2'
53
+ require lib_dir + '/../ey_cloud_server/version'
@@ -1,7 +1,7 @@
1
1
  module EY
2
2
  class BigBrother
3
- def self.check
4
- json = JSON.parse(File.read('/etc/chef/dna.json'))
3
+ def self.check(chef_dna = '/etc/chef/dna.json')
4
+ json = JSON.parse(File.read(chef_dna))
5
5
 
6
6
  # {'skip':[
7
7
  # 'mysqld'
@@ -1,194 +1,80 @@
1
- module AWS::S3
2
- class S3Object
3
- def <=>(other)
4
- DateTime.parse(self.about['last-modified']) <=> DateTime.parse(other.about['last-modified'])
5
- end
6
- end
7
- end
8
-
9
1
  require 'open-uri'
10
2
 
11
3
  module EY
12
-
13
4
  class BucketMinder
14
- attr_accessor :bucket, :name
15
-
16
- def initialize(opts={})
17
- AWS::S3::Base.establish_connection!(
18
- :access_key_id => opts[:aws_secret_id],
19
- :secret_access_key => opts[:aws_secret_key]
20
- )
21
- @instance_id = opts[:instance_id]
22
- @type = opts[:type]
23
- @env = opts[:env]
24
- @opts = opts
25
- @bucket = find_bucket(opts[:bucket])
26
-
27
- opts[:extension] ||= "tgz"
28
- @keep = opts[:keep]
29
- @name = "#{Time.now.strftime("%Y-%m-%dT%H:%M:%S").gsub(/:/, '-')}.#{@type}.#{opts[:extension]}"
30
- end
31
-
32
- def find_bucket(buck="#{@env}-#{@type}-#{@instance_id}-#{Digest::SHA1.hexdigest(@opts[:aws_secret_id])[0..6]}")
33
- begin
34
- begin
35
- AWS::S3::Bucket.find(buck)
36
- rescue AWS::S3::NoSuchBucket
37
- begin
38
- AWS::S3::Bucket.create(buck)
39
- rescue AWS::S3::ResponseError
40
- end
41
- end
42
- buck
43
- end
44
- end
45
-
46
- def instance_id
47
- @instance_id ||= open("http://169.254.169.254/latest/meta-data/instance-id").read
48
- end
49
-
50
- def upload_object(file, printer = true)
51
- AWS::S3::S3Object.store(
52
- @name,
53
- File.open(file),
54
- bucket,
55
- :access => :private
56
- )
57
- FileUtils.rm file
58
- puts "successful upload: #{@name}" if printer
59
- true
60
- end
61
-
62
- def remove_object(key, printer = false)
63
- begin
64
- AWS::S3::S3Object.delete(key, @bucket)
65
- puts "Deleting #{key}" if printer
66
- # S3's eventual consistency sometimes causes really weird
67
- # failures.
68
- # Since cleanup happens every time and will clean up all stale
69
- # objects, we can just ignore S3-interaction failures. It'll
70
- # work next time.
71
- rescue AWS::S3::S3Exception, AWS::S3::Error
72
- nil
73
- end
5
+ def initialize(secret_id, secret_key, bucket_name, region = 'us-east-1')
6
+ @s3 = Fog::Storage.new(:provider => 'AWS',:aws_access_key_id => secret_id, :aws_secret_access_key => secret_key, :region => region)
7
+ @region = region
8
+ @bucket_name = bucket_name || "ey-backup-#{Digest::SHA1.hexdigest(secret_id)[0..11]}"
9
+
10
+ setup_bucket
74
11
  end
75
-
76
- def retrieve_object(key, printer = false)
77
- puts "Retrieving #{key}" if printer
78
- obj = AWS::S3::S3Object.find(key, @bucket)
12
+
13
+ attr_reader :bucket_name
14
+
15
+ def bucket
16
+ @bucket ||= @s3.directories.get(@bucket_name)
79
17
  end
80
-
81
- def download(index, printer = false)
82
- obj = list[index.to_i]
83
- puts "downloading: #{obj}" if printer
84
- File.open(obj.key, 'wb') do |f|
85
- print "." if printer
86
- obj.value {|chunk| f.write chunk }
87
- end
88
- puts if printer
89
- puts "finished" if printer
90
- obj.key
18
+
19
+ def files
20
+ bucket.files
91
21
  end
92
22
 
93
- def stream(key)
94
- AWS::S3::S3Object.stream(key, @bucket) do |chunk|
95
- yield chunk
23
+ def setup_bucket
24
+ unless bucket
25
+ @s3.directories.create(s3_params(:key => @bucket_name))
96
26
  end
97
27
  end
98
-
99
- def cleanup
100
- begin
101
- list[0...-(@keep)].each do |o|
102
- puts "deleting: #{o.key}"
103
- o.delete
104
- end
105
- # S3's eventual consistency sometimes causes really weird
106
- # failures.
107
- # Since cleanup happens every time and will clean up all stale
108
- # objects, we can just ignore S3-interaction failures. It'll
109
- # work next time.
110
- rescue AWS::S3::S3Exception, AWS::S3::Error
111
- nil
28
+
29
+ def s3_params(params = {})
30
+ return params if @region == 'us-east-1'
31
+ if @region == 'eu-west-1'
32
+ params.merge({:location => 'EU'})
33
+ else
34
+ params.merge({:location => @region})
112
35
  end
113
36
  end
114
-
115
- def get_current
116
- name = download(list.size - 1)
117
- File.expand_path(name)
118
- end
119
-
120
- # Removes all of the items in the current bucket
121
- def clear_bucket
122
- list.each do |o|
123
- puts "deleting: #{o.key}"
124
- o.delete
125
- end
126
- end
127
-
128
- # Deletes the most recent file from the bucket
129
- def rollback
130
- o = list.last
131
- puts "rolling back: #{o.key}"
132
- o.delete
37
+
38
+ def remove_object(key)
39
+ @s3.delete_object(bucket.key, key)
133
40
  end
134
-
135
- def empty?
136
- list.empty?
41
+
42
+ def stream(key, &block)
43
+ files.get(key, &block)
137
44
  end
138
-
139
- def list(directive = false)
140
- if directive.is_a? Hash
141
- prefix = directive[:prefix]
142
- printer = directive[:print] ||= false
143
- listing = AWS::S3::Bucket.objects(bucket, :prefix => prefix).flatten.sort
144
- objects = s3merge(listing)
145
- elsif directive.is_a? String
146
- printer = directive
147
- objects = AWS::S3::Bucket.objects(bucket).sort
148
- else
149
- puts "Input to list method must be Hash or String not: #{printer.class}"
150
- exit
151
- end
152
45
 
153
- return objects if objects.empty?
154
-
155
- puts "listing bucket #{bucket}" if printer && !objects.empty?
156
-
157
- if printer
158
- objects.each_with_index do |b, i|
159
- if b.is_a? Hash
160
- puts "#{i}:#{@env} #{b[:name]}"
161
- else
162
- puts "#{i}:#{@env} #{b.key}"
163
- end
164
- end
165
- end
166
- objects
46
+ def list(prefix)
47
+ listing = files.all(:prefix => prefix)
48
+ s3merge(listing)
167
49
  end
168
-
50
+
169
51
  # Merge s3 file listing to work with split files with naming of *.part\d\d
170
52
  def s3merge(list)
171
53
  return list if list.empty?
172
- bucketfiles=Array.new()
54
+ distinct_files=Array.new()
55
+
173
56
  list.each do |item|
174
57
  fname = item.key.gsub(/.part\d+$/,'')
175
58
  match = false
176
- bucketfiles.each_with_index do |b, i|
59
+ distinct_files.each_with_index do |b, i|
177
60
  if b[:name] == fname
178
- bucketfiles[i][:keys] << item.key
61
+ distinct_files[i][:keys] << item.key
179
62
  match = true
180
63
  end
181
64
  end
182
-
65
+
183
66
  if not match
184
67
  path = Array.new()
185
68
  path << item.key
186
69
  file = {:name => fname, :keys => path}
187
- bucketfiles << file
70
+ distinct_files << file
188
71
  end
189
72
  end
190
- bucketfiles
73
+ distinct_files
74
+ end
75
+
76
+ def put(filename, contents)
77
+ files.create(:key => filename, :body => contents)
191
78
  end
192
-
193
79
  end
194
- end
80
+ end
@@ -0,0 +1,17 @@
1
+ module EY
2
+ class EC2
3
+ def initialize(opts = {})
4
+ @ec2 = RightAws::Ec2.new(opts[:aws_secret_id], opts[:aws_secret_key], :logger => Logger.new("/dev/null"))
5
+ end
6
+
7
+ def method_missing(method, *a, &b)
8
+ @ec2.send(method, *a, &b)
9
+ rescue RightAws::AwsError => e
10
+ retries ||= 10
11
+ retries -= 1
12
+ raise e if retries == 0
13
+ sleep 30
14
+ retry
15
+ end
16
+ end
17
+ end
@@ -1,11 +1,64 @@
1
+ require 'ey_instance_api_client'
1
2
  module EY
2
3
  class SnapshotMinder
3
- def self.run(args)
4
- defaults = {:config => '/etc/.mysql.backups.yml',
5
- :command => :list_snapshots,
6
- :keep => 5}
7
4
 
8
- options = {}
5
+ class Backend
6
+ class Mock
7
+ def initialize
8
+ @commands_called = []
9
+ @raise_count = 0
10
+ @total_wait_time = 0
11
+ end
12
+
13
+ attr_reader :commands_called, :total_wait_time
14
+ attr_accessor :raise_count
15
+
16
+ def wait(seconds)
17
+ @total_wait_time += seconds
18
+ end
19
+
20
+ def snapshot
21
+ if @raise_count > 0
22
+ @raise_count -= 1
23
+ raise EY::InstanceAPIClient::Connection::UnexpectedStatus.new("")
24
+ else
25
+ @commands_called << "request_snapshot"
26
+ end
27
+ end
28
+
29
+ def list
30
+ @commands_called << "list_snapshots"
31
+ [EY::InstanceAPIClient::Snapshot.new({
32
+ 'state' => 'in progres',
33
+ 'progress' => '0%',
34
+ 'volume_type' => 'db',
35
+ 'snapshot_id' => 12,
36
+ 'created_at' => Time.now.to_s
37
+ })]
38
+ end
39
+ end
40
+
41
+ class Real
42
+ def initialize
43
+ @snapshots = EY::InstanceAPIClient::Snapshots.new
44
+ end
45
+
46
+ def wait(seconds)
47
+ sleep(seconds)
48
+ end
49
+
50
+ def snapshot
51
+ @snapshots.request
52
+ end
53
+
54
+ def list
55
+ @snapshots.list
56
+ end
57
+ end
58
+ end
59
+
60
+ def self.run(args)
61
+ options = {:command => :list_snapshots}
9
62
  # Build a parser for the command line arguments
10
63
  opts = OptionParser.new do |opts|
11
64
  opts.version = "0.0.1"
@@ -18,16 +71,15 @@ module EY
18
71
  options[:command] = :list_snapshots
19
72
  end
20
73
 
21
- opts.on("-c", "--config CONFIG", "Use config file.") do |config|
22
- options[:config] = config
74
+ opts.on("-c", "--config CONFIG", "DEPRECATED, does nothing") do |config|
75
+ STDERR.puts "WARNING: --config is DEPRECATED and has no effect"
23
76
  end
24
77
 
25
- opts.on("-i", "--instance-id ID", "specify the instance id to work with(only needed if you are running this from ourside of ec2)") do |iid|
26
- options[:instance_id] = iid
78
+ opts.on("-i", "--instance-id ID", "DEPRECATED, does nothing") do |iid|
79
+ STDERR.puts "WARNING: --instance-id is DEPRECATED and has no effect"
27
80
  end
28
81
 
29
-
30
- opts.on("--snapshot", "take snapshots of both of your volumes(only runs on your ec2 instance)") do
82
+ opts.on("--snapshot", "request a snapshot of the volumes attached to this instance") do
31
83
  options[:command] = :snapshot_volumes
32
84
  end
33
85
 
@@ -38,186 +90,56 @@ module EY
38
90
  end
39
91
 
40
92
  opts.parse!(args)
41
-
42
- ey = nil
43
- if File.exist?(config = File.expand_path(defaults[:config]))
44
- ey = new(options = defaults.merge(YAML::load(File.read(config))).merge(options))
45
- else
46
- abort "You need to have an /etc/.mysql.backups.yml file with your credentials in it to use this tool.\nOr point it at a yaml file with -c .mysql.backups.yml"
47
- end
48
-
49
- ey.send(options[:command])
50
- ey.clean_snapshots(options[:keep])
93
+ new(options).send(options[:command])
51
94
  end
52
95
 
53
- def initialize(opts={})
54
- @opts = opts
55
- @instance_id = opts[:instance_id]
56
- @db = Mysql.new('root', opts[:dbpass], opts[:lock_wait_timeout]) rescue nil
57
- @ec2 = RightAws::Ec2.new(opts[:aws_secret_id], opts[:aws_secret_key], :logger => Logger.new("/dev/null"))
58
- get_instance_id
59
- silence_stream($stderr) { find_volume_ids }
96
+ def self.backend
97
+ @backend ||= Backend::Real.new
60
98
  end
61
99
 
62
- def find_volume_ids
63
- @volume_ids = {}
64
- @ec2.describe_volumes.each do |volume|
65
- if volume[:aws_instance_id] == @instance_id
66
- if volume[:aws_device] == "/dev/sdz1"
67
- @volume_ids[:data] = volume[:aws_id]
68
- elsif volume[:aws_device] == "/dev/sdz2"
69
- @volume_ids[:db] = volume[:aws_id]
70
- end
71
- end
72
- end
73
- say "Volume IDs are #{@volume_ids.inspect}"
74
- @volume_ids
100
+ def self.enable_mock!
101
+ @backend = Backend::Mock.new
75
102
  end
76
103
 
77
- def list_snapshots
78
- @snapshot_ids = {}
79
- @ec2.describe_snapshots.sort { |a,b| b[:aws_started_at] <=> a[:aws_started_at] }.each do |snapshot|
80
- @volume_ids.each do |mnt, vol|
81
- if snapshot[:aws_volume_id] == vol
82
- (@snapshot_ids[mnt] ||= []) << snapshot[:aws_id]
83
- end
84
- end
85
- end
86
- say "Snapshots #{@snapshot_ids.inspect}"
87
- @snapshot_ids
88
- end
89
-
90
- def clean_snapshots(keep=5)
91
- list_snapshots
92
- @snapshot_ids.each do |mnt, ids|
93
- snaps = []
94
- @ec2.describe_snapshots(ids).sort { |a,b| b[:aws_started_at] <=> a[:aws_started_at] }.each do |snapshot|
95
- snaps << snapshot
96
- end
97
- (snaps[keep..-1]||[]).each do |snapshot|
98
- say "deleting snapshot of /#{mnt}: #{snapshot[:aws_id]}"
99
- @ec2.delete_snapshot(snapshot[:aws_id])
100
- end
101
- end
102
- list_snapshots
104
+ def initialize(opts={})
105
+ @opts = opts
103
106
  end
104
107
 
105
108
  def snapshot_volumes
106
- snaps = []
107
- @volume_ids.each do |vol, vid|
108
- case vol
109
- when :data
110
- sync_filesystem_buffers
111
- snaps << create_snapshot(vid)
112
- when :db
113
- @db.flush_tables_with_read_lock
114
- say "Read lock acquired. Writing master binlog info to #{@master_status_file} and syncing filesystem buffers."
115
- @db.write_master_status
116
- sync_filesystem_buffers
117
- snaps << create_snapshot(vid)
118
- @db.unlock_tables
109
+ retry_count = 0
110
+ begin
111
+ SnapshotMinder.backend.snapshot
112
+ say "Snapshot requested."
113
+ rescue EY::InstanceAPIClient::Connection::UnexpectedStatus => e
114
+ retry_count += 1
115
+ if retry_count <= 10
116
+ retry_in = backoff(retry_count)
117
+ say "failed with #{e.inspect}, retrying in #{retry_in} seconds"
118
+ SnapshotMinder.backend.wait(retry_in)
119
+ retry
120
+ else
121
+ raise e
119
122
  end
120
123
  end
121
- snaps
122
- end
123
-
124
- def get_instance_id
125
- return @instance_id if @instance_id
126
-
127
- open('http://169.254.169.254/latest/meta-data/instance-id') do |f|
128
- @instance_id = f.gets
129
- end
130
- abort "Cannot find instance id!" unless @instance_id
131
- say "Instance ID is #{@instance_id}"
132
- @instance_id
133
124
  end
134
125
 
135
- def sync_filesystem_buffers
136
- sync_cmd = "sync && sync && sync"
137
- system(sync_cmd)
126
+ def backoff(nth_time)
127
+ 60*nth_time + rand((60 / (nth_time+1)) + 5) #see tests
138
128
  end
139
129
 
140
- def create_snapshot(volume_id)
141
- retries = 0
142
- begin
143
- snap = @ec2.create_snapshot(volume_id)
144
- rescue RightAws::AwsError
145
- retries += 1
146
- raise if retries > 10
147
- sleep retries * retries
148
- retry
130
+ def list_snapshots
131
+ snapshots = SnapshotMinder.backend.list
132
+ say "#{snapshots.size} Snapshots available:"
133
+ snapshots.each do |s|
134
+ say "#{s.id} - #{s.volume} #{s.state} #{s.progress} #{s.created_at}"
149
135
  end
150
- say "Created snapshot of #{volume_id} as #{snap[:aws_id]}"
151
- snap
152
136
  end
153
137
 
154
138
  private
155
- def say(msg, newline = true)
156
- return if @opts[:quiet]
157
- print("#{msg}#{"\n" if newline}")
158
- end
159
-
160
- def silence_stream(stream)
161
- old_stream = stream.dup
162
- stream.reopen("/dev/null")
163
- stream.sync = true
164
- yield
165
- ensure
166
- stream.reopen(old_stream)
167
- end
168
- end
169
-
170
- class Mysql
171
-
172
- attr_accessor :dbh
173
-
174
- def initialize(username, password, lock_wait_timeout)
175
- @username = username
176
- @password = password
177
- @read_lock_pid = nil
178
- @lock_wait_timeout = lock_wait_timeout.nil? ? 5 : lock_wait_timeout
179
- @master_status_file = "/db/mysql/.snapshot_backup_master_status.txt"
180
- end
181
-
182
- def waiting_read_lock_thread
183
- thread_cmd = "mysql -p#{@password} -u #{@username} -N -e 'show full processlist;' | grep 'flush tables with read lock' | awk '{print $1}'"
184
- %x{#{thread_cmd}}
185
- end
186
-
187
- def write_master_status
188
- master_status_cmd = "mysql -p#{@password} -u #{@username} -e'SHOW MASTER STATUS\\G' > #{@master_status_file}"
189
- system(master_status_cmd)
190
- end
191
-
192
- def flush_tables_with_read_lock
193
- pipe = IO.popen("mysql -u #{@username} -p#{@password}", 'w')
194
- @read_lock_pid = pipe.pid
195
-
196
- pipe.puts('flush tables with read lock;')
197
- sleep(@lock_wait_timeout)
198
-
199
- if (thread_id = waiting_read_lock_thread) != ''
200
- Process.kill('TERM', @read_lock_pid)
201
-
202
- # after killing the process the mysql thread is still hanging out, need to kill it directly
203
- kill_thread_cmd = "mysql -u #{@username} -p#{@password} -e'kill #{thread_id};'"
204
- system(kill_thread_cmd)
205
- abort "Read lock not acquired after #{@lock_wait_timeout} second timeout. Killed request and aborting backup."
206
- end
207
-
208
- true
209
- end
210
-
211
- def unlock_tables
212
- # technically we don't actually have to do anything here since the spawned
213
- # process that has the read lock will die with this one but it doesn't hurt
214
- # to be safe
215
- Process.kill('TERM', @read_lock_pid)
216
- true
217
- end
218
139
 
219
- def disconnect
220
- @dbh.disconnect
140
+ def say(msg, newline = true)
141
+ return if @opts[:quiet]
142
+ print("#{msg}#{"\n" if newline}")
221
143
  end
222
144
  end
223
145
  end