ey_cloud_server 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/LICENSE +20 -0
- data/README.rdoc +25 -0
- data/bin/ey-agent +5 -0
- data/bin/ey-monitor +5 -0
- data/bin/ey-snapshots +5 -0
- data/bin/eybackup +5 -0
- data/lib/ey-flex/backups.rb +226 -0
- data/lib/ey-flex/big-brother.rb +80 -0
- data/lib/ey-flex/bucket_minder.rb +119 -0
- data/lib/ey-flex/ey-api.rb +21 -0
- data/lib/ey-flex/mysql_database.rb +23 -0
- data/lib/ey-flex/postgresql_database.rb +14 -0
- data/lib/ey-flex/snapshot_minder.rb +205 -0
- data/lib/ey-flex/stonith.rb +190 -0
- data/lib/ey-flex/version.rb +3 -0
- data/lib/ey-flex.rb +37 -0
- metadata +129 -0
data/LICENSE
ADDED
@@ -0,0 +1,20 @@
|
|
1
|
+
Copyright (c) 2009 Engine Yard Inc.
|
2
|
+
|
3
|
+
Permission is hereby granted, free of charge, to any person obtaining
|
4
|
+
a copy of this software and associated documentation files (the
|
5
|
+
"Software"), to deal in the Software without restriction, including
|
6
|
+
without limitation the rights to use, copy, modify, merge, publish,
|
7
|
+
distribute, sublicense, and/or sell copies of the Software, and to
|
8
|
+
permit persons to whom the Software is furnished to do so, subject to
|
9
|
+
the following conditions:
|
10
|
+
|
11
|
+
The above copyright notice and this permission notice shall be
|
12
|
+
included in all copies or substantial portions of the Software.
|
13
|
+
|
14
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
15
|
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
16
|
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
17
|
+
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
18
|
+
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
19
|
+
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
20
|
+
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
data/README.rdoc
ADDED
@@ -0,0 +1,25 @@
|
|
1
|
+
== ey
|
2
|
+
|
3
|
+
= Development
|
4
|
+
|
5
|
+
You need to have gem bundler install
|
6
|
+
|
7
|
+
gem install bundler
|
8
|
+
|
9
|
+
Then bundle everything up:
|
10
|
+
|
11
|
+
gem bundle -- --with-mysql_config
|
12
|
+
|
13
|
+
To run the specs, just jump into a "bundled" shell:
|
14
|
+
|
15
|
+
gem exec bash
|
16
|
+
|
17
|
+
Copy the example file to spec/config.yml:
|
18
|
+
|
19
|
+
Then you can run specs:
|
20
|
+
|
21
|
+
spec -c spec
|
22
|
+
|
23
|
+
Running the bin files are easy too:
|
24
|
+
|
25
|
+
bin/ey-recipes
|
data/bin/ey-agent
ADDED
data/bin/ey-monitor
ADDED
data/bin/ey-snapshots
ADDED
data/bin/eybackup
ADDED
@@ -0,0 +1,226 @@
|
|
1
|
+
module AWS::S3
|
2
|
+
class S3Object
|
3
|
+
def <=>(other)
|
4
|
+
DateTime.parse(self.about['last-modified']) <=> DateTime.parse(other.about['last-modified'])
|
5
|
+
end
|
6
|
+
end
|
7
|
+
end
|
8
|
+
|
9
|
+
module EY::Flex
|
10
|
+
class DatabaseEngine
|
11
|
+
def self.register_as(name)
|
12
|
+
EY::Flex::Backups::ENGINES[name] = self
|
13
|
+
end
|
14
|
+
|
15
|
+
def initialize(backups)
|
16
|
+
@backups = backups
|
17
|
+
end
|
18
|
+
|
19
|
+
def dump_database(name)
|
20
|
+
raise "Implement #dump_database in #{self.class}"
|
21
|
+
end
|
22
|
+
|
23
|
+
def dbuser
|
24
|
+
@backups.config[:dbuser]
|
25
|
+
end
|
26
|
+
|
27
|
+
def dbpass
|
28
|
+
@backups.config[:dbpass]
|
29
|
+
end
|
30
|
+
end
|
31
|
+
|
32
|
+
class Backups
|
33
|
+
class BackupNotFound < EY::Flex::Error; end
|
34
|
+
|
35
|
+
def self.run(args)
|
36
|
+
options = {:command => :new_backup}
|
37
|
+
|
38
|
+
# Build a parser for the command line arguments
|
39
|
+
opts = OptionParser.new do |opts|
|
40
|
+
opts.version = "0.0.1"
|
41
|
+
|
42
|
+
opts.banner = "Usage: eybackup [-flag] [argument]"
|
43
|
+
opts.define_head "eybackup: backing up your shit since way back when..."
|
44
|
+
opts.separator '*'*80
|
45
|
+
|
46
|
+
opts.on("-l", "--list-backup DATABASE", "List mysql backups for DATABASE") do |db|
|
47
|
+
options[:db] = (db || 'all')
|
48
|
+
options[:command] = :list
|
49
|
+
end
|
50
|
+
|
51
|
+
opts.on("-n", "--new-backup", "Create new mysql backup") do
|
52
|
+
options[:command] = :new_backup
|
53
|
+
end
|
54
|
+
|
55
|
+
opts.on("-c", "--config CONFIG", "Use config file.") do |config|
|
56
|
+
options[:config] = config
|
57
|
+
end
|
58
|
+
|
59
|
+
opts.on("-d", "--download BACKUP_INDEX", "download the backup specified by index. Run eybackup -l to get the index.") do |index|
|
60
|
+
options[:command] = :download
|
61
|
+
options[:index] = index
|
62
|
+
end
|
63
|
+
|
64
|
+
opts.on("-e", "--engine DATABASE_ENGINE", "The database engine. ex: mysql, postgres.") do |engine|
|
65
|
+
options[:engine] = engine
|
66
|
+
end
|
67
|
+
|
68
|
+
opts.on("-r", "--restore BACKUP_INDEX", "Download and apply the backup specified by index WARNING! will overwrite the current db with the backup. Run eybackup -l to get the index.") do |index|
|
69
|
+
options[:command] = :restore
|
70
|
+
options[:index] = index
|
71
|
+
end
|
72
|
+
|
73
|
+
end
|
74
|
+
|
75
|
+
opts.parse!(args)
|
76
|
+
|
77
|
+
options[:engine] ||= 'mysql'
|
78
|
+
options[:config] ||= "/etc/.#{options[:engine]}.backups.yml"
|
79
|
+
|
80
|
+
eyb = new(options)
|
81
|
+
|
82
|
+
case options[:command]
|
83
|
+
when :list
|
84
|
+
eyb.list options[:db], true
|
85
|
+
when :new_backup
|
86
|
+
eyb.new_backup
|
87
|
+
when :download
|
88
|
+
eyb.download(options[:index])
|
89
|
+
when :restore
|
90
|
+
eyb.restore(options[:index])
|
91
|
+
end
|
92
|
+
eyb.cleanup
|
93
|
+
rescue EY::Flex::Error => e
|
94
|
+
$stderr.puts e.message
|
95
|
+
exit 1
|
96
|
+
end
|
97
|
+
|
98
|
+
ENGINES = {}
|
99
|
+
|
100
|
+
def initialize(options = {})
|
101
|
+
engine_klass = ENGINES[options[:engine]] || raise("Invalid database engine: #{options[:engine].inspect}")
|
102
|
+
@engine = engine_klass.new(self)
|
103
|
+
|
104
|
+
load_config(options[:config])
|
105
|
+
|
106
|
+
AWS::S3::Base.establish_connection!(
|
107
|
+
:access_key_id => config[:aws_secret_id],
|
108
|
+
:secret_access_key => config[:aws_secret_key]
|
109
|
+
)
|
110
|
+
@databases = config[:databases]
|
111
|
+
@keep = config[:keep]
|
112
|
+
@bucket = "ey-backup-#{Digest::SHA1.hexdigest(config[:aws_secret_id])[0..11]}"
|
113
|
+
@tmpname = "#{Time.now.strftime("%Y-%m-%dT%H:%M:%S").gsub(/:/, '-')}.sql.gz"
|
114
|
+
@env = config[:env]
|
115
|
+
FileUtils.mkdir_p '/mnt/backups'
|
116
|
+
FileUtils.mkdir_p '/mnt/tmp'
|
117
|
+
begin
|
118
|
+
AWS::S3::Bucket.find(@bucket)
|
119
|
+
rescue AWS::S3::NoSuchBucket
|
120
|
+
AWS::S3::Bucket.create(@bucket)
|
121
|
+
end
|
122
|
+
|
123
|
+
FileUtils.mkdir_p self.backup_dir
|
124
|
+
end
|
125
|
+
attr_reader :config
|
126
|
+
|
127
|
+
def load_config(filename)
|
128
|
+
if File.exist?(filename)
|
129
|
+
@config = YAML::load(File.read(filename))
|
130
|
+
else
|
131
|
+
$stderr.puts "You need to have a backup file at #{filename}"
|
132
|
+
exit 1
|
133
|
+
end
|
134
|
+
end
|
135
|
+
|
136
|
+
def new_backup
|
137
|
+
@databases.each do |db|
|
138
|
+
backup_database(db)
|
139
|
+
end
|
140
|
+
end
|
141
|
+
|
142
|
+
def backup_database(database)
|
143
|
+
File.open("#{self.backup_dir}/#{database}.#{@tmpname}", "w") do |f|
|
144
|
+
puts "doing database: #{database}"
|
145
|
+
@engine.dump_database(database, f)
|
146
|
+
end
|
147
|
+
|
148
|
+
File.open("#{self.backup_dir}/#{database}.#{@tmpname}") do |f|
|
149
|
+
path = "#{@env}.#{database}/#{database}.#{@tmpname}"
|
150
|
+
AWS::S3::S3Object.store(path, f, @bucket, :access => :private)
|
151
|
+
puts "successful backup: #{database}.#{@tmpname}"
|
152
|
+
end
|
153
|
+
end
|
154
|
+
|
155
|
+
def download(index)
|
156
|
+
idx, db = index.split(":")
|
157
|
+
raise Error, "You didn't specify a database name: e.g. 1:rails_production" unless db
|
158
|
+
|
159
|
+
if obj = list(db)[idx.to_i]
|
160
|
+
filename = normalize_name(obj)
|
161
|
+
puts "downloading: #{filename}"
|
162
|
+
File.open(filename, 'wb') do |f|
|
163
|
+
print "."
|
164
|
+
obj.value {|chunk| f.write chunk }
|
165
|
+
end
|
166
|
+
puts
|
167
|
+
puts "finished"
|
168
|
+
[db, filename]
|
169
|
+
else
|
170
|
+
raise BackupNotFound, "No backup found for database #{db.inspect}: requested index: #{idx}"
|
171
|
+
end
|
172
|
+
end
|
173
|
+
|
174
|
+
def restore(index)
|
175
|
+
db, filename = download(index)
|
176
|
+
File.open(filename) do |f|
|
177
|
+
@engine.restore_database(db, f)
|
178
|
+
end
|
179
|
+
end
|
180
|
+
|
181
|
+
def cleanup
|
182
|
+
begin
|
183
|
+
list('all',false)[0...-(@keep*@databases.size)].each do |o|
|
184
|
+
puts "deleting: #{o.key}"
|
185
|
+
o.delete
|
186
|
+
end
|
187
|
+
rescue AWS::S3::S3Exception, AWS::S3::Error
|
188
|
+
nil # see bucket_minder cleanup note regarding S3 consistency
|
189
|
+
end
|
190
|
+
end
|
191
|
+
|
192
|
+
def normalize_name(obj)
|
193
|
+
obj.key.gsub(/^.*?\//, '')
|
194
|
+
end
|
195
|
+
|
196
|
+
def find_obj(name)
|
197
|
+
AWS::S3::S3Object.find name, @bucket
|
198
|
+
end
|
199
|
+
|
200
|
+
def list(database='all', printer = false)
|
201
|
+
puts "Listing database backups for #{database}" if printer
|
202
|
+
backups = []
|
203
|
+
if database == 'all'
|
204
|
+
@databases.each do |db|
|
205
|
+
backups << AWS::S3::Bucket.objects(@bucket, :prefix => "#{@env}.#{db}")
|
206
|
+
end
|
207
|
+
backups = backups.flatten.sort
|
208
|
+
else
|
209
|
+
backups = AWS::S3::Bucket.objects(@bucket, :prefix => "#{@env}.#{database}").sort
|
210
|
+
end
|
211
|
+
if printer
|
212
|
+
puts "#{backups.size} backup(s) found"
|
213
|
+
backups.each_with_index do |b,i|
|
214
|
+
puts "#{i}:#{database} #{normalize_name(b)}"
|
215
|
+
end
|
216
|
+
end
|
217
|
+
backups
|
218
|
+
end
|
219
|
+
|
220
|
+
protected
|
221
|
+
def backup_dir
|
222
|
+
"/mnt/tmp"
|
223
|
+
end
|
224
|
+
|
225
|
+
end
|
226
|
+
end
|
@@ -0,0 +1,80 @@
|
|
1
|
+
module EY
|
2
|
+
class BigBrother
|
3
|
+
def self.check
|
4
|
+
json = JSON.parse(File.read('/etc/chef/dna.json'))
|
5
|
+
|
6
|
+
# {'skip':[
|
7
|
+
# 'mysqld'
|
8
|
+
# ],
|
9
|
+
# 'check':[
|
10
|
+
# 'ttsrv'
|
11
|
+
# ]}
|
12
|
+
|
13
|
+
skips = JSON.parse(File.read('/etc/ey-alerts.json')) rescue {}
|
14
|
+
|
15
|
+
new(json.merge(skips)).check
|
16
|
+
end
|
17
|
+
|
18
|
+
def initialize(dna)
|
19
|
+
@dna = dna
|
20
|
+
@result = {}
|
21
|
+
end
|
22
|
+
|
23
|
+
def nginx_or_apache
|
24
|
+
server = ''
|
25
|
+
@dna['applications'].each do |name, app_data|
|
26
|
+
if app_data['recipes'].detect { |r| r == 'nginx' }
|
27
|
+
server = 'nginx'
|
28
|
+
end
|
29
|
+
|
30
|
+
if app_data['recipes'].detect { |r| r == 'passenger' }
|
31
|
+
server = 'apache2'
|
32
|
+
end
|
33
|
+
end
|
34
|
+
server
|
35
|
+
end
|
36
|
+
|
37
|
+
def skip?(name)
|
38
|
+
(@dna['skip']||[]).include?(name)
|
39
|
+
end
|
40
|
+
|
41
|
+
def check
|
42
|
+
case @dna['instance_role']
|
43
|
+
when 'solo'
|
44
|
+
check_process(nginx_or_apache) unless skip?(nginx_or_apache)
|
45
|
+
check_mysql unless skip?('mysqld')
|
46
|
+
when 'app', 'app_master'
|
47
|
+
check_process(nginx_or_apache) unless skip?(nginx_or_apache)
|
48
|
+
check_process('haproxy') unless skip?('haproxy')
|
49
|
+
when 'db_master', 'db_slave'
|
50
|
+
check_mysql unless skip?('mysqld')
|
51
|
+
when 'util'
|
52
|
+
end
|
53
|
+
(@dna['check']||[]).each do |check|
|
54
|
+
check_process(check)
|
55
|
+
end
|
56
|
+
@result.to_json
|
57
|
+
end
|
58
|
+
|
59
|
+
def check_mysql
|
60
|
+
check_process('mysqld')
|
61
|
+
DBI.connect("DBI:Mysql:mysql:#{@dna['db_host']}", 'root', @dna['users'].first['password'])
|
62
|
+
rescue DBI::DatabaseError => e
|
63
|
+
@result['mysqld'] = 'down'
|
64
|
+
end
|
65
|
+
|
66
|
+
def check_process(name)
|
67
|
+
return if name == ''
|
68
|
+
pids = `pgrep #{name}`.split("\n")
|
69
|
+
if pids.empty?
|
70
|
+
@result[name] = 'down'
|
71
|
+
else
|
72
|
+
if pids.detect {|p| `kill -0 #{p}; echo $?`.chomp.to_i != 0}
|
73
|
+
@result[name] = 'down'
|
74
|
+
else
|
75
|
+
@result[name] = 'up'
|
76
|
+
end
|
77
|
+
end
|
78
|
+
end
|
79
|
+
end
|
80
|
+
end
|
@@ -0,0 +1,119 @@
|
|
1
|
+
module AWS::S3
|
2
|
+
class S3Object
|
3
|
+
def <=>(other)
|
4
|
+
DateTime.parse(self.about['last-modified']) <=> DateTime.parse(other.about['last-modified'])
|
5
|
+
end
|
6
|
+
end
|
7
|
+
end
|
8
|
+
|
9
|
+
require 'open-uri'
|
10
|
+
|
11
|
+
module EY
|
12
|
+
|
13
|
+
class BucketMinder
|
14
|
+
|
15
|
+
def initialize(opts={})
|
16
|
+
AWS::S3::Base.establish_connection!(
|
17
|
+
:access_key_id => opts[:aws_secret_id],
|
18
|
+
:secret_access_key => opts[:aws_secret_key]
|
19
|
+
)
|
20
|
+
@instance_id = opts[:instance_id]
|
21
|
+
@type = opts[:type]
|
22
|
+
@env = opts[:env]
|
23
|
+
@opts = opts
|
24
|
+
opts[:extension] ||= "tgz"
|
25
|
+
@keep = opts[:keep]
|
26
|
+
@name = "#{Time.now.strftime("%Y-%m-%dT%H:%M:%S").gsub(/:/, '-')}.#{@type}.#{opts[:extension]}"
|
27
|
+
end
|
28
|
+
|
29
|
+
def bucket
|
30
|
+
@bucket ||= begin
|
31
|
+
buck = "#{@env}-#{@type}-#{instance_id}-#{Digest::SHA1.hexdigest(@opts[:aws_secret_id])[0..6]}"
|
32
|
+
begin
|
33
|
+
AWS::S3::Bucket.create buck
|
34
|
+
rescue AWS::S3::ResponseError
|
35
|
+
end
|
36
|
+
buck
|
37
|
+
end
|
38
|
+
end
|
39
|
+
|
40
|
+
def instance_id
|
41
|
+
@instance_id ||= open("http://169.254.169.254/latest/meta-data/instance-id").read
|
42
|
+
end
|
43
|
+
|
44
|
+
def upload_object(file)
|
45
|
+
AWS::S3::S3Object.store(
|
46
|
+
@name,
|
47
|
+
open(file),
|
48
|
+
bucket,
|
49
|
+
:access => :private
|
50
|
+
)
|
51
|
+
FileUtils.rm file
|
52
|
+
puts "successful upload: #{@name}"
|
53
|
+
true
|
54
|
+
end
|
55
|
+
|
56
|
+
def download(index, printer = false)
|
57
|
+
obj = list[index.to_i]
|
58
|
+
puts "downloading: #{obj}" if printer
|
59
|
+
File.open(obj.key, 'wb') do |f|
|
60
|
+
print "." if printer
|
61
|
+
obj.value {|chunk| f.write chunk }
|
62
|
+
end
|
63
|
+
puts if printer
|
64
|
+
puts "finished" if printer
|
65
|
+
obj.key
|
66
|
+
end
|
67
|
+
|
68
|
+
def cleanup
|
69
|
+
begin
|
70
|
+
list[0...-(@keep)].each do |o|
|
71
|
+
puts "deleting: #{o.key}"
|
72
|
+
o.delete
|
73
|
+
end
|
74
|
+
# S3's eventual consistency sometimes causes really weird
|
75
|
+
# failures.
|
76
|
+
# Since cleanup happens every time and will clean up all stale
|
77
|
+
# objects, we can just ignore S3-interaction failures. It'll
|
78
|
+
# work next time.
|
79
|
+
rescue AWS::S3::S3Exception, AWS::S3::Error
|
80
|
+
nil
|
81
|
+
end
|
82
|
+
end
|
83
|
+
|
84
|
+
def get_current
|
85
|
+
name = download(list.size - 1)
|
86
|
+
File.expand_path(name)
|
87
|
+
end
|
88
|
+
|
89
|
+
def clear_bucket
|
90
|
+
list.each do |o|
|
91
|
+
puts "deleting: #{o.key}"
|
92
|
+
o.delete
|
93
|
+
end
|
94
|
+
end
|
95
|
+
|
96
|
+
def rollback
|
97
|
+
o = list.last
|
98
|
+
puts "rolling back: #{o.key}"
|
99
|
+
o.delete
|
100
|
+
end
|
101
|
+
|
102
|
+
def empty?
|
103
|
+
list.empty?
|
104
|
+
end
|
105
|
+
|
106
|
+
def list(printer = false)
|
107
|
+
objects = AWS::S3::Bucket.objects(bucket).sort
|
108
|
+
puts "listing bucket #{bucket}" if printer && !objects.empty?
|
109
|
+
if printer
|
110
|
+
objects.each_with_index do |b,i|
|
111
|
+
puts "#{i}:#{@env} #{b.key}"
|
112
|
+
end
|
113
|
+
end
|
114
|
+
objects
|
115
|
+
end
|
116
|
+
|
117
|
+
end
|
118
|
+
|
119
|
+
end
|
@@ -0,0 +1,21 @@
|
|
1
|
+
module EyApi
|
2
|
+
def call_api(path, opts={})
|
3
|
+
JSON.parse(@rest["/api/#{path}"].post(@keys.merge(opts), {"Accept" => "application/json"}))
|
4
|
+
rescue RestClient::RequestFailed => e
|
5
|
+
case e.http_code
|
6
|
+
when 503
|
7
|
+
sleep 10 # Nanite, save us...
|
8
|
+
retry
|
9
|
+
else
|
10
|
+
raise "API call to Engine Yard failed with status #{e.http_code}."
|
11
|
+
end
|
12
|
+
end
|
13
|
+
|
14
|
+
def get_envs
|
15
|
+
@_envs ||= call_api("environments")
|
16
|
+
end
|
17
|
+
|
18
|
+
def get_json(instance_id)
|
19
|
+
call_api("json_for_instance", :instance_id => instance_id)
|
20
|
+
end
|
21
|
+
end
|
@@ -0,0 +1,23 @@
|
|
1
|
+
module EY::Flex
|
2
|
+
class MysqlDatabase < DatabaseEngine
|
3
|
+
register_as 'mysql'
|
4
|
+
|
5
|
+
def dump_database(name, io)
|
6
|
+
single_transaction = db_has_myisam?(name) ? '' : '--single-transaction'
|
7
|
+
Open4.spawn ["mysqldump -u#{dbuser} #{password_option} #{single_transaction} #{name} | gzip -c"], :stdout => io
|
8
|
+
end
|
9
|
+
|
10
|
+
def db_has_myisam?(name)
|
11
|
+
query = "SELECT 1 FROM information_schema.tables WHERE table_schema='#{name}' AND engine='MyISAM' LIMIT 1;"
|
12
|
+
%x{mysql -u #{dbuser} #{password_option} -N -e"#{query}"}.strip == '1'
|
13
|
+
end
|
14
|
+
|
15
|
+
def restore_database(name, io)
|
16
|
+
Open4.spawn ["gzip -dc | mysql -u#{dbuser} #{password_option} #{name}"], :stdin => io
|
17
|
+
end
|
18
|
+
|
19
|
+
def password_option
|
20
|
+
dbpass.blank? ? "" : "-p'#{dbpass}'"
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
@@ -0,0 +1,14 @@
|
|
1
|
+
module EY::Flex
|
2
|
+
class PostgresqlDatabase < DatabaseEngine
|
3
|
+
register_as 'postgresql'
|
4
|
+
|
5
|
+
def dump_database(name, io)
|
6
|
+
Open4.spawn ["PGPASSWORD='#{dbpass}' pg_dump -h localhost --clean --no-owner --no-privileges -U#{dbuser} #{name}"], :stdout => io
|
7
|
+
end
|
8
|
+
|
9
|
+
def restore_database(name, io)
|
10
|
+
Open4.spawn ["PGPASSWORD='#{dbpass}' createdb -h localhost -U#{dbuser} #{name}"], :stdin => io, :raise => false
|
11
|
+
Open4.spawn ["PGPASSWORD='#{dbpass}' psql -h localhost -U#{dbuser} #{name}"], :stdin => io
|
12
|
+
end
|
13
|
+
end
|
14
|
+
end
|
@@ -0,0 +1,205 @@
|
|
1
|
+
module EY
|
2
|
+
class SnapshotMinder
|
3
|
+
def self.run(args)
|
4
|
+
defaults = {:config => '/etc/.mysql.backups.yml',
|
5
|
+
:command => :list_snapshots,
|
6
|
+
:keep => 5}
|
7
|
+
|
8
|
+
options = {}
|
9
|
+
# Build a parser for the command line arguments
|
10
|
+
opts = OptionParser.new do |opts|
|
11
|
+
opts.version = "0.0.1"
|
12
|
+
|
13
|
+
opts.banner = "Usage: ey-snapshots [-flag] [argument]"
|
14
|
+
opts.define_head "ey-snapshots: managing your snapshots..."
|
15
|
+
opts.separator '*'*80
|
16
|
+
|
17
|
+
opts.on("-l", "--list-snapshots", "list snapshots") do
|
18
|
+
options[:command] = :list_snapshots
|
19
|
+
end
|
20
|
+
|
21
|
+
opts.on("-c", "--config CONFIG", "Use config file.") do |config|
|
22
|
+
options[:config] = config
|
23
|
+
end
|
24
|
+
|
25
|
+
opts.on("-i", "--instance-id ID", "specify the instance id to work with(only needed if you are running this from ourside of ec2)") do |iid|
|
26
|
+
options[:instance_id] = iid
|
27
|
+
end
|
28
|
+
|
29
|
+
|
30
|
+
opts.on("--snapshot", "take snapshots of both of your volumes(only runs on your ec2 instance)") do
|
31
|
+
options[:command] = :snapshot_volumes
|
32
|
+
end
|
33
|
+
|
34
|
+
|
35
|
+
end
|
36
|
+
|
37
|
+
opts.parse!(args)
|
38
|
+
|
39
|
+
ey = nil
|
40
|
+
if File.exist?(config = File.expand_path(defaults[:config]))
|
41
|
+
ey = new(options = defaults.merge(YAML::load(File.read(config))).merge(options))
|
42
|
+
else
|
43
|
+
puts"You need to have an /etc/.mysql.backups.yml file with your credentials in it to use this tool.\nOr point it at a yaml file with -c .mysql.backups.yml"
|
44
|
+
exit 1
|
45
|
+
end
|
46
|
+
|
47
|
+
ey.send(options[:command])
|
48
|
+
ey.clean_snapshots(options[:keep])
|
49
|
+
end
|
50
|
+
|
51
|
+
def initialize(opts={})
|
52
|
+
@opts = opts
|
53
|
+
@instance_id = opts[:instance_id]
|
54
|
+
@db = Mysql.new('root', opts[:dbpass], opts[:lock_wait_timeout]) rescue nil
|
55
|
+
@ec2 = RightAws::Ec2.new(opts[:aws_secret_id], opts[:aws_secret_key])
|
56
|
+
get_instance_id
|
57
|
+
find_volume_ids
|
58
|
+
end
|
59
|
+
|
60
|
+
def find_volume_ids
|
61
|
+
@volume_ids = {}
|
62
|
+
@ec2.describe_volumes.each do |volume|
|
63
|
+
if volume[:aws_instance_id] == @instance_id
|
64
|
+
if volume[:aws_device] == "/dev/sdz1"
|
65
|
+
@volume_ids[:data] = volume[:aws_id]
|
66
|
+
elsif volume[:aws_device] == "/dev/sdz2"
|
67
|
+
@volume_ids[:db] = volume[:aws_id]
|
68
|
+
end
|
69
|
+
end
|
70
|
+
end
|
71
|
+
puts("Volume IDs are #{@volume_ids.inspect}")
|
72
|
+
@volume_ids
|
73
|
+
end
|
74
|
+
|
75
|
+
def list_snapshots
|
76
|
+
@snapshot_ids = {}
|
77
|
+
@ec2.describe_snapshots.sort { |a,b| b[:aws_started_at] <=> a[:aws_started_at] }.each do |snapshot|
|
78
|
+
@volume_ids.each do |mnt, vol|
|
79
|
+
if snapshot[:aws_volume_id] == vol
|
80
|
+
(@snapshot_ids[mnt] ||= []) << snapshot[:aws_id]
|
81
|
+
end
|
82
|
+
end
|
83
|
+
end
|
84
|
+
puts("Snapshots #{@snapshot_ids.inspect}")
|
85
|
+
@snapshot_ids
|
86
|
+
end
|
87
|
+
|
88
|
+
def clean_snapshots(keep=5)
|
89
|
+
list_snapshots
|
90
|
+
@snapshot_ids.each do |mnt, ids|
|
91
|
+
snaps = []
|
92
|
+
@ec2.describe_snapshots(ids).sort { |a,b| b[:aws_started_at] <=> a[:aws_started_at] }.each do |snapshot|
|
93
|
+
snaps << snapshot
|
94
|
+
end
|
95
|
+
(snaps[keep..-1]||[]).each do |snapshot|
|
96
|
+
puts "deleting snapshot of /#{mnt}: #{snapshot[:aws_id]}"
|
97
|
+
@ec2.delete_snapshot(snapshot[:aws_id])
|
98
|
+
end
|
99
|
+
end
|
100
|
+
list_snapshots
|
101
|
+
end
|
102
|
+
|
103
|
+
def snapshot_volumes
|
104
|
+
snaps = []
|
105
|
+
@volume_ids.each do |vol, vid|
|
106
|
+
case vol
|
107
|
+
when :data
|
108
|
+
sync_filesystem_buffers
|
109
|
+
snaps << create_snapshot(vid)
|
110
|
+
when :db
|
111
|
+
@db.flush_tables_with_read_lock
|
112
|
+
puts("Read lock acquired. Writing master binlog info to #{@master_status_file} and syncing filesystem buffers.")
|
113
|
+
@db.write_master_status
|
114
|
+
sync_filesystem_buffers
|
115
|
+
snaps << create_snapshot(vid)
|
116
|
+
@db.unlock_tables
|
117
|
+
end
|
118
|
+
end
|
119
|
+
snaps
|
120
|
+
end
|
121
|
+
|
122
|
+
def get_instance_id
|
123
|
+
return @instance_id if @instance_id
|
124
|
+
|
125
|
+
open('http://169.254.169.254/latest/meta-data/instance-id') do |f|
|
126
|
+
@instance_id = f.gets
|
127
|
+
end
|
128
|
+
raise "Cannot find instance id!" unless @instance_id
|
129
|
+
puts("Instance ID is #{@instance_id}")
|
130
|
+
@instance_id
|
131
|
+
end
|
132
|
+
|
133
|
+
def sync_filesystem_buffers
|
134
|
+
sync_cmd = "sync && sync && sync"
|
135
|
+
system(sync_cmd)
|
136
|
+
end
|
137
|
+
|
138
|
+
def create_snapshot(volume_id)
|
139
|
+
snap = @ec2.create_snapshot(volume_id)
|
140
|
+
puts("Created snapshot of #{volume_id} as #{snap[:aws_id]}")
|
141
|
+
snap
|
142
|
+
end
|
143
|
+
|
144
|
+
end
|
145
|
+
|
146
|
+
class Mysql
|
147
|
+
|
148
|
+
attr_accessor :dbh
|
149
|
+
|
150
|
+
def initialize(username, password, lock_wait_timeout)
|
151
|
+
@username = username
|
152
|
+
@password = password
|
153
|
+
@read_lock_pid = nil
|
154
|
+
@lock_wait_timeout = lock_wait_timeout.nil? ? 5 : lock_wait_timeout
|
155
|
+
@master_status_file = "/db/mysql/.snapshot_backup_master_status.txt"
|
156
|
+
end
|
157
|
+
|
158
|
+
def waiting_read_lock_thread
|
159
|
+
thread_cmd = "mysql -p#{@password} -u #{@username} -N -e 'show full processlist;' | grep 'flush tables with read lock' | awk '{print $1}'"
|
160
|
+
%x{#{thread_cmd}}
|
161
|
+
end
|
162
|
+
|
163
|
+
def write_master_status
|
164
|
+
master_status_cmd = "mysql -p#{@password} -u #{@username} -e'SHOW MASTER STATUS\\G' > #{@master_status_file}"
|
165
|
+
system(master_status_cmd)
|
166
|
+
end
|
167
|
+
|
168
|
+
def flush_tables_with_read_lock
|
169
|
+
puts("Requesting read lock for snapshot.")
|
170
|
+
|
171
|
+
pipe = IO.popen("mysql -u #{@username} -p#{@password}", 'w')
|
172
|
+
@read_lock_pid = pipe.pid
|
173
|
+
|
174
|
+
pipe.puts('flush tables with read lock;')
|
175
|
+
sleep(@lock_wait_timeout)
|
176
|
+
|
177
|
+
if (thread_id = waiting_read_lock_thread) != ''
|
178
|
+
Process.kill('TERM', @read_lock_pid)
|
179
|
+
|
180
|
+
# after killing the process the mysql thread is still hanging out, need to kill it directly
|
181
|
+
kill_thread_cmd = "mysql -u #{@username} -p#{@password} -e'kill #{thread_id};'"
|
182
|
+
system(kill_thread_cmd)
|
183
|
+
raise "Read lock not acquired after #{@lock_wait_timeout} second timeout. Killed request and aborting backup."
|
184
|
+
end
|
185
|
+
|
186
|
+
true
|
187
|
+
end
|
188
|
+
|
189
|
+
def unlock_tables
|
190
|
+
puts("Unlocking tables")
|
191
|
+
# technically we don't actually have to do anything here since the spawned
|
192
|
+
# process that has the read lock will die with this one but it doesn't hurt
|
193
|
+
# to be safe
|
194
|
+
Process.kill('TERM', @read_lock_pid)
|
195
|
+
true
|
196
|
+
end
|
197
|
+
|
198
|
+
def disconnect
|
199
|
+
puts("Disconnecting from MySQL")
|
200
|
+
@dbh.disconnect
|
201
|
+
end
|
202
|
+
|
203
|
+
end
|
204
|
+
|
205
|
+
end
|
@@ -0,0 +1,190 @@
|
|
1
|
+
module EY
|
2
|
+
class Log
|
3
|
+
def self.write(str)
|
4
|
+
puts str
|
5
|
+
File.open("/root/ey-monitor2.log", "a") do |f|
|
6
|
+
f.write("#{str}\n")
|
7
|
+
end
|
8
|
+
end
|
9
|
+
end
|
10
|
+
|
11
|
+
class Stonith
|
12
|
+
include EyApi
|
13
|
+
|
14
|
+
def self.run
|
15
|
+
opts = YAML::load(File.read("/etc/.mysql.backups.yml"))
|
16
|
+
opts.merge!(YAML::load(File.read("/etc/.ey-cloud.yml")))
|
17
|
+
EventMachine.run {
|
18
|
+
EY::Stonith.new(opts.merge(:heartbeat => 10))
|
19
|
+
}
|
20
|
+
end
|
21
|
+
|
22
|
+
def initialize(opts={})
|
23
|
+
Log.write "Starting up"
|
24
|
+
@opts = opts
|
25
|
+
@rest = RestClient::Resource.new(opts[:api])
|
26
|
+
@keys = {:aws_secret_id => @opts[:aws_secret_id], :aws_secret_key => @opts[:aws_secret_key]}
|
27
|
+
@bad_checks = 0
|
28
|
+
@seen_good_check = false
|
29
|
+
@ec2 = RightAws::Ec2.new(@opts[:aws_secret_id], @opts[:aws_secret_key])
|
30
|
+
@taking_over = false
|
31
|
+
get_local_json
|
32
|
+
get_master_from_json
|
33
|
+
setup_traps
|
34
|
+
start
|
35
|
+
am_i_master?
|
36
|
+
end
|
37
|
+
|
38
|
+
def setup_traps
|
39
|
+
trap("HUP") { cancel_master_check_timer; Log.write "timer canceled, not monitoring until you wake me up again"}
|
40
|
+
trap("USR1") { EM.add_timer(600) { setup_master_check_timer unless am_i_master? }; Log.write "woke up, starting monitoring again in 10 minutes"}
|
41
|
+
end
|
42
|
+
|
43
|
+
def get_mysql_handle
|
44
|
+
DBI.connect("DBI:Mysql:engineyard:#{@json['db_host']}", 'root', @opts[:dbpass])
|
45
|
+
end
|
46
|
+
|
47
|
+
def try_lock(nodename)
|
48
|
+
Log.write("Trying to grab the lock for: #{nodename}")
|
49
|
+
db = get_mysql_handle
|
50
|
+
db.execute("begin")
|
51
|
+
res = db.execute("select master_lock from locks for update")
|
52
|
+
master = res.fetch[0]
|
53
|
+
res.finish
|
54
|
+
got_lock = false
|
55
|
+
if master == @master
|
56
|
+
got_lock = true
|
57
|
+
@master = "http://#{private_dns_name}/haproxy/monitor"
|
58
|
+
db.do("update locks set master_lock = '#{@master}'")
|
59
|
+
else
|
60
|
+
# new master, don't start monitoring till it comes up
|
61
|
+
@seen_good_check = false
|
62
|
+
@taking_over = false
|
63
|
+
@master = master
|
64
|
+
Log.write("Failed to grab lock, relenting: #{nodename}\nmaster is: #{@master}")
|
65
|
+
EM.add_timer(600) { Log.write "restarting monitoring"; setup_master_check_timer }
|
66
|
+
end
|
67
|
+
db.do("commit")
|
68
|
+
db.disconnect
|
69
|
+
got_lock
|
70
|
+
end
|
71
|
+
|
72
|
+
def start
|
73
|
+
setup_master_check_timer
|
74
|
+
EM.add_periodic_timer(300) { get_local_json }
|
75
|
+
end
|
76
|
+
|
77
|
+
def setup_master_check_timer
|
78
|
+
cancel_master_check_timer
|
79
|
+
unless self_monitor_url == @master
|
80
|
+
@check_master_timer = EventMachine::PeriodicTimer.new(@opts[:heartbeat]) { check_master }
|
81
|
+
end
|
82
|
+
end
|
83
|
+
|
84
|
+
def cancel_master_check_timer
|
85
|
+
@check_master_timer && @check_master_timer.cancel
|
86
|
+
end
|
87
|
+
|
88
|
+
def check_master
|
89
|
+
http = EventMachine::HttpRequest.new(@master).get :timeout => 10
|
90
|
+
|
91
|
+
http.callback {
|
92
|
+
unless http.response_header.status == 200
|
93
|
+
take_over_as_master
|
94
|
+
else
|
95
|
+
@seen_good_check = true
|
96
|
+
@bad_checks = 0
|
97
|
+
end
|
98
|
+
http.response_header.status
|
99
|
+
}
|
100
|
+
http.errback { |msg, err|
|
101
|
+
take_over_as_master
|
102
|
+
}
|
103
|
+
end
|
104
|
+
|
105
|
+
def take_over_as_master
|
106
|
+
Log.write("Got a bad check: seen good check is #{@seen_good_check.inspect}")
|
107
|
+
@bad_checks += 1
|
108
|
+
if @bad_checks > 5 && @seen_good_check && !@taking_over
|
109
|
+
Log.write "I'm trying to take over!"
|
110
|
+
@taking_over = true
|
111
|
+
cancel_master_check_timer
|
112
|
+
if try_lock(private_dns_name)
|
113
|
+
Log.write("I got the lock!")
|
114
|
+
steal_ip
|
115
|
+
unless notify_awsm
|
116
|
+
timer = EventMachine::PeriodicTimer.new(5) { timer.cancel if notify_awsm }
|
117
|
+
end
|
118
|
+
end
|
119
|
+
end
|
120
|
+
end
|
121
|
+
|
122
|
+
def am_i_master?
|
123
|
+
res = false
|
124
|
+
@ec2.describe_addresses.each do |desc|
|
125
|
+
res = true if (desc[:public_ip] == public_ip && desc[:instance_id] == instance_id)
|
126
|
+
end
|
127
|
+
if res
|
128
|
+
db = get_mysql_handle
|
129
|
+
db.execute("begin")
|
130
|
+
rows = db.execute("select master_lock from locks for update")
|
131
|
+
master = rows.fetch[0]
|
132
|
+
rows.finish
|
133
|
+
db.do("update locks set master_lock = '#{self_monitor_url}'")
|
134
|
+
@master = self_monitor_url
|
135
|
+
cancel_master_check_timer
|
136
|
+
db.do("commit")
|
137
|
+
end
|
138
|
+
res
|
139
|
+
end
|
140
|
+
|
141
|
+
def notify_awsm
|
142
|
+
Log.write "Notifying awsm that I won"
|
143
|
+
res = call_api("promote_instance_to_master", :instance_id => instance_id)
|
144
|
+
case res['status']
|
145
|
+
when 'ok'
|
146
|
+
true
|
147
|
+
when 'already_promoted'
|
148
|
+
am_i_master?
|
149
|
+
EM.add_timer(600) { setup_master_check_timer unless am_i_master? }
|
150
|
+
true
|
151
|
+
else
|
152
|
+
false
|
153
|
+
end
|
154
|
+
end
|
155
|
+
|
156
|
+
def instance_id
|
157
|
+
@instance_id ||= open("http://169.254.169.254/latest/meta-data/instance-id").read
|
158
|
+
end
|
159
|
+
|
160
|
+
def private_dns_name
|
161
|
+
@private_dns_name ||= open("http://169.254.169.254/latest/meta-data/local-hostname").read
|
162
|
+
end
|
163
|
+
|
164
|
+
def self_monitor_url
|
165
|
+
"http://#{private_dns_name}/haproxy/monitor"
|
166
|
+
end
|
167
|
+
|
168
|
+
def steal_ip
|
169
|
+
if @ec2.disassociate_address(public_ip)
|
170
|
+
@ec2.associate_address(instance_id, public_ip)
|
171
|
+
end
|
172
|
+
end
|
173
|
+
|
174
|
+
def public_ip
|
175
|
+
@public_ip ||= @json['master_app_server']['public_ip']
|
176
|
+
end
|
177
|
+
|
178
|
+
def get_local_json
|
179
|
+
@json = JSON.parse(File.read("/etc/chef/dna.json"))
|
180
|
+
end
|
181
|
+
|
182
|
+
def get_master_from_json
|
183
|
+
if host = @json['master_app_server']['private_dns_name']
|
184
|
+
@master = "http://#{host}/haproxy/monitor"
|
185
|
+
end
|
186
|
+
end
|
187
|
+
|
188
|
+
end
|
189
|
+
end
|
190
|
+
|
data/lib/ey-flex.rb
ADDED
@@ -0,0 +1,37 @@
|
|
1
|
+
require 'json'
|
2
|
+
require 'aws/s3'
|
3
|
+
require 'date'
|
4
|
+
require 'digest'
|
5
|
+
require 'net/http'
|
6
|
+
require 'fileutils'
|
7
|
+
require 'eventmachine'
|
8
|
+
require 'em-http'
|
9
|
+
require 'json/ext'
|
10
|
+
require 'right_aws'
|
11
|
+
require 'open-uri'
|
12
|
+
require 'rest_client'
|
13
|
+
require 'dbi'
|
14
|
+
require 'zlib'
|
15
|
+
require 'stringio'
|
16
|
+
require 'yaml'
|
17
|
+
require "optparse"
|
18
|
+
require 'open4'
|
19
|
+
|
20
|
+
lib_dir = File.expand_path(__FILE__ + '/../ey-flex')
|
21
|
+
|
22
|
+
module EY
|
23
|
+
module Flex
|
24
|
+
class Error < StandardError; end
|
25
|
+
end
|
26
|
+
module CloudServer; end
|
27
|
+
end
|
28
|
+
|
29
|
+
require lib_dir + '/big-brother'
|
30
|
+
require lib_dir + '/backups'
|
31
|
+
require lib_dir + '/bucket_minder'
|
32
|
+
require lib_dir + '/ey-api'
|
33
|
+
require lib_dir + '/mysql_database'
|
34
|
+
require lib_dir + '/postgresql_database'
|
35
|
+
require lib_dir + '/snapshot_minder'
|
36
|
+
require lib_dir + '/stonith'
|
37
|
+
require lib_dir + '/version'
|
metadata
ADDED
@@ -0,0 +1,129 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: ey_cloud_server
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 1.0.1
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- Ezra Zygmuntowicz
|
8
|
+
autorequire:
|
9
|
+
bindir: bin
|
10
|
+
cert_chain: []
|
11
|
+
|
12
|
+
date: 2010-01-20 00:00:00 -08:00
|
13
|
+
default_executable:
|
14
|
+
dependencies:
|
15
|
+
- !ruby/object:Gem::Dependency
|
16
|
+
name: json
|
17
|
+
type: :runtime
|
18
|
+
version_requirement:
|
19
|
+
version_requirements: !ruby/object:Gem::Requirement
|
20
|
+
requirements:
|
21
|
+
- - ">="
|
22
|
+
- !ruby/object:Gem::Version
|
23
|
+
version: "0"
|
24
|
+
version:
|
25
|
+
- !ruby/object:Gem::Dependency
|
26
|
+
name: right_aws
|
27
|
+
type: :runtime
|
28
|
+
version_requirement:
|
29
|
+
version_requirements: !ruby/object:Gem::Requirement
|
30
|
+
requirements:
|
31
|
+
- - ">="
|
32
|
+
- !ruby/object:Gem::Version
|
33
|
+
version: "0"
|
34
|
+
version:
|
35
|
+
- !ruby/object:Gem::Dependency
|
36
|
+
name: open4
|
37
|
+
type: :runtime
|
38
|
+
version_requirement:
|
39
|
+
version_requirements: !ruby/object:Gem::Requirement
|
40
|
+
requirements:
|
41
|
+
- - ">="
|
42
|
+
- !ruby/object:Gem::Version
|
43
|
+
version: "0"
|
44
|
+
version:
|
45
|
+
- !ruby/object:Gem::Dependency
|
46
|
+
name: aws-s3
|
47
|
+
type: :runtime
|
48
|
+
version_requirement:
|
49
|
+
version_requirements: !ruby/object:Gem::Requirement
|
50
|
+
requirements:
|
51
|
+
- - ">="
|
52
|
+
- !ruby/object:Gem::Version
|
53
|
+
version: "0"
|
54
|
+
version:
|
55
|
+
- !ruby/object:Gem::Dependency
|
56
|
+
name: igrigorik-em-http-request
|
57
|
+
type: :runtime
|
58
|
+
version_requirement:
|
59
|
+
version_requirements: !ruby/object:Gem::Requirement
|
60
|
+
requirements:
|
61
|
+
- - ">="
|
62
|
+
- !ruby/object:Gem::Version
|
63
|
+
version: "0"
|
64
|
+
version:
|
65
|
+
- !ruby/object:Gem::Dependency
|
66
|
+
name: rest-client
|
67
|
+
type: :runtime
|
68
|
+
version_requirement:
|
69
|
+
version_requirements: !ruby/object:Gem::Requirement
|
70
|
+
requirements:
|
71
|
+
- - ">="
|
72
|
+
- !ruby/object:Gem::Version
|
73
|
+
version: "0"
|
74
|
+
version:
|
75
|
+
description: Server side components for Engine Yard's cloud
|
76
|
+
email: awsmdev@engineyard.com
|
77
|
+
executables:
|
78
|
+
- eybackup
|
79
|
+
- ey-snapshots
|
80
|
+
- ey-monitor
|
81
|
+
- ey-agent
|
82
|
+
extensions: []
|
83
|
+
|
84
|
+
extra_rdoc_files:
|
85
|
+
- README.rdoc
|
86
|
+
- LICENSE
|
87
|
+
files:
|
88
|
+
- LICENSE
|
89
|
+
- README.rdoc
|
90
|
+
- lib/ey-flex/backups.rb
|
91
|
+
- lib/ey-flex/big-brother.rb
|
92
|
+
- lib/ey-flex/bucket_minder.rb
|
93
|
+
- lib/ey-flex/ey-api.rb
|
94
|
+
- lib/ey-flex/mysql_database.rb
|
95
|
+
- lib/ey-flex/postgresql_database.rb
|
96
|
+
- lib/ey-flex/snapshot_minder.rb
|
97
|
+
- lib/ey-flex/stonith.rb
|
98
|
+
- lib/ey-flex/version.rb
|
99
|
+
- lib/ey-flex.rb
|
100
|
+
has_rdoc: true
|
101
|
+
homepage: http://engineyard.com/cloud
|
102
|
+
licenses: []
|
103
|
+
|
104
|
+
post_install_message:
|
105
|
+
rdoc_options: []
|
106
|
+
|
107
|
+
require_paths:
|
108
|
+
- lib
|
109
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
110
|
+
requirements:
|
111
|
+
- - ">="
|
112
|
+
- !ruby/object:Gem::Version
|
113
|
+
version: "0"
|
114
|
+
version:
|
115
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
116
|
+
requirements:
|
117
|
+
- - ">="
|
118
|
+
- !ruby/object:Gem::Version
|
119
|
+
version: "0"
|
120
|
+
version:
|
121
|
+
requirements: []
|
122
|
+
|
123
|
+
rubyforge_project:
|
124
|
+
rubygems_version: 1.3.5
|
125
|
+
signing_key:
|
126
|
+
specification_version: 3
|
127
|
+
summary: Server side components for Engine Yard's cloud
|
128
|
+
test_files: []
|
129
|
+
|