judo 0.0.9 → 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/Rakefile +1 -2
- data/VERSION +1 -1
- data/bin/judo +102 -212
- data/lib/judo.rb +18 -0
- data/lib/judo/base.rb +199 -0
- data/lib/judo/commandline_helpers.rb +151 -0
- data/lib/judo/config.rb +133 -0
- data/lib/judo/group.rb +221 -0
- data/lib/judo/server.rb +513 -0
- data/lib/judo/setup.rb +114 -0
- data/spec/base.rb +1 -1
- data/spec/base_spec.rb +9 -0
- metadata +16 -26
- data/lib/all.rb +0 -12
- data/lib/config.rb +0 -105
- data/lib/group.rb +0 -185
- data/lib/server.rb +0 -491
- data/lib/setup.rb +0 -114
data/lib/judo/server.rb
ADDED
@@ -0,0 +1,513 @@
|
|
1
|
+
### NEEDED for new gem launch
|
2
|
+
|
3
|
+
### [ ] return right away.. (1 hr)
|
4
|
+
### [ ] two phase delete (1 hr)
|
5
|
+
### [-] refactor availability_zone (2 hrs)
|
6
|
+
### [ ] pick availability zone from config "X":"Y" or "X":["Y","Z"]
|
7
|
+
### [ ] assign to state on creation ( could delay till volume creation )
|
8
|
+
### [ ] implement auto security_group creation and setup (6 hrs)
|
9
|
+
### [ ] write some examples - simple postgres/redis/couchdb server (5hrs)
|
10
|
+
### [ ] write new README (4 hrs)
|
11
|
+
### [ ] bind kuzushi gem version version
|
12
|
+
### [ ] realase new gem! (1 hr)
|
13
|
+
|
14
|
+
### [ ] should be able to do ALL actions except commit without the repo!
|
15
|
+
### [ ] store git commit hash with commit to block a judo commit if there is newer material stored
|
16
|
+
### [ ] remove the tarball - store files a sha hashes in the bucket - makes for faster commits if the files have not changed
|
17
|
+
|
18
|
+
### [ ] use a logger service (1 hr)
|
19
|
+
### [ ] write specs (5 hr)
|
20
|
+
|
21
|
+
### Error Handling
|
22
|
+
### [ ] no availability zone before making disks
|
23
|
+
### [ ] security group does not exists
|
24
|
+
|
25
|
+
### Do Later
|
26
|
+
### [ ] use amazon's new conditional write tools so we never have problems from concurrent updates
|
27
|
+
### [ ] is thor really what we want to use here?
|
28
|
+
### [ ] need to be able to pin a config to a version of kuzushi - gem updates can/will break a lot of things
|
29
|
+
### [ ] I want a "judo monitor" command that will make start servers if they go down, and poke a listed port to make sure a service is listening, would be cool if it also detects wrong ami, wrong secuirity group, missing/extra volumes, missing/extra elastic_ip - might not want to force a reboot quite yet in these cases
|
30
|
+
### [ ] Implement "judo snapshot [NAME]" to take a snapshot of the ebs's blocks
|
31
|
+
### [ ] ruby 1.9.1 support
|
32
|
+
### [ ] find a good way to set the hostname or prompt to :name
|
33
|
+
### [ ] remove fog/s3 dependancy
|
34
|
+
### [ ] enforce template files end in .erb to make room for other possible templates as defined by the extensions
|
35
|
+
### [ ] zerigo integration for automatic DNS setup
|
36
|
+
### [ ] How cool would it be if this was all reimplemented in eventmachine and could start lots of boxes in parallel? Would need to evented AWS api calls... Never seen a library to do that - would have to write our own... "Fog Machine?"
|
37
|
+
|
38
|
+
module Judo
|
39
|
+
class Server
|
40
|
+
attr_accessor :name
|
41
|
+
|
42
|
+
def initialize(base, name, group)
|
43
|
+
@base = base
|
44
|
+
@name = name
|
45
|
+
@group_name = group
|
46
|
+
end
|
47
|
+
|
48
|
+
def create
|
49
|
+
raise JudoError, "no group specified" unless @group_name
|
50
|
+
|
51
|
+
if @name.nil?
|
52
|
+
index = @base.servers.map { |s| (s.name =~ /^#{s.group.name}.(\d*)$/); $1.to_i }.sort.last.to_i + 1
|
53
|
+
@name = "#{group.name}.#{index}"
|
54
|
+
end
|
55
|
+
|
56
|
+
raise JudoError, "there is already a server named #{name}" if @base.servers.detect { |s| s.name == @name and s != self}
|
57
|
+
|
58
|
+
task("Creating server #{name}") do
|
59
|
+
update "name" => name, "group" => @group_name, "virgin" => true, "secret" => rand(2 ** 128).to_s(36)
|
60
|
+
@base.sdb.put_attributes("judo_config", "groups", @group_name => name)
|
61
|
+
end
|
62
|
+
|
63
|
+
allocate_resources
|
64
|
+
|
65
|
+
self
|
66
|
+
end
|
67
|
+
|
68
|
+
def group
|
69
|
+
@group ||= @base.groups.detect { |g| g.name == @group_name }
|
70
|
+
end
|
71
|
+
|
72
|
+
def fetch_state
|
73
|
+
@base.sdb.get_attributes(self.class.domain, name)[:attributes]
|
74
|
+
end
|
75
|
+
|
76
|
+
def state
|
77
|
+
@base.servers_state[name] ||= fetch_state
|
78
|
+
end
|
79
|
+
|
80
|
+
def get(key)
|
81
|
+
state[key] && [state[key]].flatten.first
|
82
|
+
end
|
83
|
+
|
84
|
+
def instance_id
|
85
|
+
get "instance_id"
|
86
|
+
end
|
87
|
+
|
88
|
+
def elastic_ip
|
89
|
+
get "elastic_ip"
|
90
|
+
end
|
91
|
+
|
92
|
+
def size_desc
|
93
|
+
if not running? or ec2_instance_type == instance_size
|
94
|
+
instance_size
|
95
|
+
else
|
96
|
+
"#{ec2_instance_type}/#{instance_size}"
|
97
|
+
end
|
98
|
+
end
|
99
|
+
|
100
|
+
def version_desc
|
101
|
+
return "" unless running?
|
102
|
+
if version == group.version
|
103
|
+
"v#{version}"
|
104
|
+
else
|
105
|
+
"v#{version}/#{group.version}"
|
106
|
+
end
|
107
|
+
end
|
108
|
+
|
109
|
+
def version
|
110
|
+
get("version").to_i
|
111
|
+
end
|
112
|
+
|
113
|
+
def virgin?
|
114
|
+
get("virgin").to_s == "true" ## I'm going to set it to true and it will come back from the db as "true" -> could be "false" or false or nil also
|
115
|
+
end
|
116
|
+
|
117
|
+
def secret
|
118
|
+
get "secret"
|
119
|
+
end
|
120
|
+
|
121
|
+
def volumes
|
122
|
+
Hash[ (state["volumes"] || []).map { |a| a.split(":") } ]
|
123
|
+
end
|
124
|
+
|
125
|
+
def self.domain
|
126
|
+
"judo_servers"
|
127
|
+
end
|
128
|
+
|
129
|
+
def update(attrs)
|
130
|
+
@base.sdb.put_attributes(self.class.domain, name, attrs, :replace)
|
131
|
+
state.merge! attrs
|
132
|
+
end
|
133
|
+
|
134
|
+
def add(key, value)
|
135
|
+
@base.sdb.put_attributes(self.class.domain, name, { key => value })
|
136
|
+
(state[key] ||= []) << value
|
137
|
+
end
|
138
|
+
|
139
|
+
def remove(key, value = nil)
|
140
|
+
if value
|
141
|
+
@base.sdb.delete_attributes(self.class.domain, name, key => value)
|
142
|
+
state[key] - [value]
|
143
|
+
else
|
144
|
+
@base.sdb.delete_attributes(self.class.domain, name, [ key ])
|
145
|
+
state.delete(key)
|
146
|
+
end
|
147
|
+
end
|
148
|
+
|
149
|
+
def delete
|
150
|
+
group.delete_server(self)
|
151
|
+
@base.sdb.delete_attributes(self.class.domain, name)
|
152
|
+
end
|
153
|
+
|
154
|
+
######## end simple DB access #######
|
155
|
+
|
156
|
+
def instance_size
|
157
|
+
config["instance_size"]
|
158
|
+
end
|
159
|
+
|
160
|
+
def config
|
161
|
+
group.config
|
162
|
+
end
|
163
|
+
|
164
|
+
def to_s
|
165
|
+
"#{name}:#{@group_name}"
|
166
|
+
end
|
167
|
+
|
168
|
+
def allocate_resources
|
169
|
+
if config["volumes"]
|
170
|
+
[config["volumes"]].flatten.each do |volume_config|
|
171
|
+
device = volume_config["device"]
|
172
|
+
if volume_config["media"] == "ebs"
|
173
|
+
size = volume_config["size"]
|
174
|
+
if not volumes[device]
|
175
|
+
task("Creating EC2 Volume #{device} #{size}") do
|
176
|
+
### EC2 create_volume
|
177
|
+
volume_id = @base.ec2.create_volume(nil, size, config["availability_zone"])[:aws_id]
|
178
|
+
add_volume(volume_id, device)
|
179
|
+
end
|
180
|
+
else
|
181
|
+
puts "Volume #{device} already exists."
|
182
|
+
end
|
183
|
+
else
|
184
|
+
puts "device #{device || volume_config["mount"]} is not of media type 'ebs', skipping..."
|
185
|
+
end
|
186
|
+
end
|
187
|
+
end
|
188
|
+
|
189
|
+
begin
|
190
|
+
if config["elastic_ip"] and not elastic_ip
|
191
|
+
### EC2 allocate_address
|
192
|
+
task("Adding an elastic ip") do
|
193
|
+
ip = @base.ec2.allocate_address
|
194
|
+
add_ip(ip)
|
195
|
+
end
|
196
|
+
end
|
197
|
+
rescue Aws::AwsError => e
|
198
|
+
if e.message =~ /AddressLimitExceeded/
|
199
|
+
invalid "Failed to allocate ip address: Limit Exceeded"
|
200
|
+
else
|
201
|
+
raise
|
202
|
+
end
|
203
|
+
end
|
204
|
+
end
|
205
|
+
|
206
|
+
def task(msg, &block)
|
207
|
+
@base.task(msg, &block)
|
208
|
+
end
|
209
|
+
|
210
|
+
def self.task(msg, &block)
|
211
|
+
printf "---> %-24s ", "#{msg}..."
|
212
|
+
STDOUT.flush
|
213
|
+
start = Time.now
|
214
|
+
result = block.call
|
215
|
+
result = "done" unless result.is_a? String
|
216
|
+
finish = Time.now
|
217
|
+
time = sprintf("%0.1f", finish - start)
|
218
|
+
puts "#{result} (#{time}s)"
|
219
|
+
result
|
220
|
+
end
|
221
|
+
|
222
|
+
def has_ip?
|
223
|
+
!!elastic_ip
|
224
|
+
end
|
225
|
+
|
226
|
+
def has_volumes?
|
227
|
+
not volumes.empty?
|
228
|
+
end
|
229
|
+
|
230
|
+
def ec2_volumes
|
231
|
+
return [] if volumes.empty?
|
232
|
+
@base.ec2.describe_volumes( volumes.values )
|
233
|
+
end
|
234
|
+
|
235
|
+
def remove_ip
|
236
|
+
@base.ec2.release_address(elastic_ip) rescue nil
|
237
|
+
remove "elastic_ip"
|
238
|
+
end
|
239
|
+
|
240
|
+
def destroy
|
241
|
+
stop if running?
|
242
|
+
### EC2 release_address
|
243
|
+
task("Deleting Elastic Ip") { remove_ip } if has_ip?
|
244
|
+
volumes.each { |dev,v| remove_volume(v,dev) }
|
245
|
+
task("Destroying server #{name}") { delete }
|
246
|
+
end
|
247
|
+
|
248
|
+
def ec2_state
|
249
|
+
ec2_instance[:aws_state] rescue "offline"
|
250
|
+
end
|
251
|
+
|
252
|
+
def ec2_instance
|
253
|
+
### EC2 describe_instances
|
254
|
+
@base.ec2_instances.detect { |e| e[:aws_instance_id] == instance_id } or {}
|
255
|
+
end
|
256
|
+
|
257
|
+
def running?
|
258
|
+
## other options are "terminated" and "nil"
|
259
|
+
["pending", "running", "shutting_down", "degraded"].include?(ec2_state)
|
260
|
+
end
|
261
|
+
|
262
|
+
def start
|
263
|
+
invalid "Already running" if running?
|
264
|
+
invalid "No config has been commited yet, type 'judo commit'" unless group.version > 0
|
265
|
+
task("Starting server #{name}") { launch_ec2 }
|
266
|
+
task("Wait for server") { wait_for_running } if elastic_ip or has_volumes?
|
267
|
+
task("Attaching ip") { attach_ip } if elastic_ip
|
268
|
+
task("Attaching volumes") { attach_volumes } if has_volumes?
|
269
|
+
end
|
270
|
+
|
271
|
+
def restart
|
272
|
+
stop if running?
|
273
|
+
start
|
274
|
+
end
|
275
|
+
|
276
|
+
def generic_name?
|
277
|
+
name =~ /^#{group}[.]\d*$/
|
278
|
+
end
|
279
|
+
|
280
|
+
def generic?
|
281
|
+
volumes.empty? and not has_ip? and generic_name?
|
282
|
+
end
|
283
|
+
|
284
|
+
def invalid(str)
|
285
|
+
raise JudoInvalid, str
|
286
|
+
end
|
287
|
+
|
288
|
+
def stop
|
289
|
+
invalid "not running" unless running?
|
290
|
+
## EC2 terminate_isntaces
|
291
|
+
task("Terminating instance") { @base.ec2.terminate_instances([ instance_id ]) }
|
292
|
+
task("Wait for volumes to detach") { wait_for_volumes_detached } if volumes.size > 0
|
293
|
+
remove "instance_id"
|
294
|
+
end
|
295
|
+
|
296
|
+
def launch_ec2
|
297
|
+
# validate
|
298
|
+
|
299
|
+
## EC2 launch_instances
|
300
|
+
ud = user_data
|
301
|
+
debug(ud)
|
302
|
+
result = @base.ec2.launch_instances(ami,
|
303
|
+
:instance_type => config["instance_size"],
|
304
|
+
:availability_zone => config["availability_zone"],
|
305
|
+
:key_name => config["key_name"],
|
306
|
+
:group_ids => security_groups,
|
307
|
+
:user_data => ud).first
|
308
|
+
|
309
|
+
update "instance_id" => result[:aws_instance_id], "virgin" => false, "version" => group.version
|
310
|
+
end
|
311
|
+
|
312
|
+
def debug(str)
|
313
|
+
return unless ENV['JUDO_DEBUG'] == "1"
|
314
|
+
puts "<JUDO_DEBUG>#{str}</JUDO_DEBUG>"
|
315
|
+
end
|
316
|
+
|
317
|
+
def security_groups
|
318
|
+
[ config["security_group"] ].flatten
|
319
|
+
end
|
320
|
+
|
321
|
+
def console_output
|
322
|
+
invalid "not running" unless running?
|
323
|
+
@base.ec2.get_console_output(instance_id)[:aws_output]
|
324
|
+
end
|
325
|
+
|
326
|
+
def ami
|
327
|
+
ia32? ? config["ami32"] : config["ami64"]
|
328
|
+
end
|
329
|
+
|
330
|
+
def ia32?
|
331
|
+
["m1.small", "c1.medium"].include?(instance_size)
|
332
|
+
end
|
333
|
+
|
334
|
+
def ia64?
|
335
|
+
not ia32?
|
336
|
+
end
|
337
|
+
|
338
|
+
def hostname
|
339
|
+
ec2_instance[:dns_name] == "" ? nil : ec2_instance[:dns_name]
|
340
|
+
end
|
341
|
+
|
342
|
+
def wait_for_running
|
343
|
+
loop do
|
344
|
+
return if ec2_state == "running"
|
345
|
+
reload
|
346
|
+
sleep 1
|
347
|
+
end
|
348
|
+
end
|
349
|
+
|
350
|
+
def wait_for_hostname
|
351
|
+
loop do
|
352
|
+
reload
|
353
|
+
return hostname if hostname
|
354
|
+
sleep 1
|
355
|
+
end
|
356
|
+
end
|
357
|
+
|
358
|
+
def wait_for_volumes_detached
|
359
|
+
## FIXME - force if it takes too long
|
360
|
+
loop do
|
361
|
+
break if ec2_volumes.reject { |v| v[:aws_status] == "available" }.empty?
|
362
|
+
sleep 2
|
363
|
+
end
|
364
|
+
end
|
365
|
+
|
366
|
+
def wait_for_termination
|
367
|
+
loop do
|
368
|
+
reload
|
369
|
+
break if ec2_instance[:aws_state] == "terminated"
|
370
|
+
sleep 1
|
371
|
+
end
|
372
|
+
end
|
373
|
+
|
374
|
+
def wait_for_ssh
|
375
|
+
invalid "not running" unless running?
|
376
|
+
loop do
|
377
|
+
begin
|
378
|
+
reload
|
379
|
+
Timeout::timeout(4) do
|
380
|
+
TCPSocket.new(hostname, 22)
|
381
|
+
return
|
382
|
+
end
|
383
|
+
rescue SocketError, Timeout::Error, Errno::ECONNREFUSED, Errno::EHOSTUNREACH
|
384
|
+
end
|
385
|
+
end
|
386
|
+
end
|
387
|
+
|
388
|
+
def add_ip(public_ip)
|
389
|
+
update "elastic_ip" => public_ip
|
390
|
+
attach_ip
|
391
|
+
end
|
392
|
+
|
393
|
+
def attach_ip
|
394
|
+
return unless running? and elastic_ip
|
395
|
+
### EC2 associate_address
|
396
|
+
@base.ec2.associate_address(instance_id, elastic_ip)
|
397
|
+
end
|
398
|
+
|
399
|
+
def dns_name
|
400
|
+
return nil unless elastic_ip
|
401
|
+
`dig +short -x #{elastic_ip}`.strip
|
402
|
+
end
|
403
|
+
|
404
|
+
def attach_volumes
|
405
|
+
return unless running?
|
406
|
+
volumes.each do |device,volume_id|
|
407
|
+
### EC2 attach_volume
|
408
|
+
@base.ec2.attach_volume(volume_id, instance_id, device)
|
409
|
+
end
|
410
|
+
end
|
411
|
+
|
412
|
+
def remove_volume(volume_id, device)
|
413
|
+
task("Deleting #{device} #{volume_id}") do
|
414
|
+
### EC2 delete_volume
|
415
|
+
@base.ec2.delete_volume(volume_id)
|
416
|
+
remove "volumes", "#{device}:#{volume_id}"
|
417
|
+
end
|
418
|
+
end
|
419
|
+
|
420
|
+
def add_volume(volume_id, device)
|
421
|
+
invalid("Server already has a volume on that device") if volumes[device]
|
422
|
+
|
423
|
+
add "volumes", "#{device}:#{volume_id}"
|
424
|
+
|
425
|
+
@base.ec2.attach_volume(volume_id, instance_id, device) if running?
|
426
|
+
|
427
|
+
volume_id
|
428
|
+
end
|
429
|
+
|
430
|
+
def connect_ssh
|
431
|
+
wait_for_ssh
|
432
|
+
system "chmod 600 #{group.keypair_file}"
|
433
|
+
system "ssh -i #{group.keypair_file} #{config["user"]}@#{hostname}"
|
434
|
+
end
|
435
|
+
|
436
|
+
def self.commit
|
437
|
+
## FIXME
|
438
|
+
Config.group_dirs.each do |group_dir|
|
439
|
+
group = File.basename(group_dir)
|
440
|
+
next if Config.group and Config.group != group
|
441
|
+
puts "commiting #{group}"
|
442
|
+
doc = Config.couchdb.get(group) rescue {}
|
443
|
+
config = Config.read_config(group)
|
444
|
+
config['_id'] = group
|
445
|
+
config['_rev'] = doc['_rev'] if doc.has_key?('_rev')
|
446
|
+
response = Config.couchdb.save_doc(config)
|
447
|
+
doc = Config.couchdb.get(response['id'])
|
448
|
+
|
449
|
+
# walk subdirs and save as _attachments
|
450
|
+
['files', 'templates', 'packages', 'scripts'].each { |subdir|
|
451
|
+
Dir["#{group_dir}/#{subdir}/*"].each do |f|
|
452
|
+
puts "storing attachment #{f}"
|
453
|
+
doc.put_attachment("#{subdir}/#{File.basename(f)}", File.read(f))
|
454
|
+
end
|
455
|
+
}
|
456
|
+
end
|
457
|
+
end
|
458
|
+
|
459
|
+
def ec2_instance_type
|
460
|
+
ec2_instance[:aws_instance_type] rescue nil
|
461
|
+
end
|
462
|
+
|
463
|
+
def ip
|
464
|
+
hostname || config["state_ip"]
|
465
|
+
end
|
466
|
+
|
467
|
+
def reload
|
468
|
+
@base.reload_ec2_instances
|
469
|
+
@base.servers_state.delete(name)
|
470
|
+
end
|
471
|
+
|
472
|
+
def user_data
|
473
|
+
<<USER_DATA
|
474
|
+
#!/bin/sh
|
475
|
+
|
476
|
+
export DEBIAN_FRONTEND="noninteractive"
|
477
|
+
export DEBIAN_PRIORITY="critical"
|
478
|
+
export SECRET='#{secret}'
|
479
|
+
apt-get update
|
480
|
+
apt-get install ruby rubygems ruby-dev irb libopenssl-ruby libreadline-ruby -y
|
481
|
+
gem install kuzushi --no-rdoc --no-ri
|
482
|
+
GEM_BIN=`ruby -r rubygems -e "puts Gem.bindir"`
|
483
|
+
echo "$GEM_BIN/kuzushi #{virgin? && "init" || "start"} '#{url}'" > /var/log/kuzushi.log
|
484
|
+
$GEM_BIN/kuzushi #{virgin? && "init" || "start"} '#{url}' >> /var/log/kuzushi.log 2>&1
|
485
|
+
USER_DATA
|
486
|
+
end
|
487
|
+
|
488
|
+
def url
|
489
|
+
@url ||= group.s3_url
|
490
|
+
end
|
491
|
+
|
492
|
+
def validate
|
493
|
+
### EC2 create_security_group
|
494
|
+
@base.create_security_group
|
495
|
+
|
496
|
+
### EC2 desctibe_key_pairs
|
497
|
+
k = @base.ec2.describe_key_pairs.detect { |kp| kp[:aws_key_name] == config["key_name"] }
|
498
|
+
|
499
|
+
if k.nil?
|
500
|
+
if config["key_name"] == "judo"
|
501
|
+
@base.create_keypair
|
502
|
+
else
|
503
|
+
raise "cannot use key_pair #{config["key_name"]} b/c it does not exist"
|
504
|
+
end
|
505
|
+
end
|
506
|
+
end
|
507
|
+
|
508
|
+
def <=>(s)
|
509
|
+
[group.name, name] <=> [s.group.name, s.name]
|
510
|
+
end
|
511
|
+
|
512
|
+
end
|
513
|
+
end
|