judo 0.0.1
Sign up to get free protection for your applications and to get access to all the features.
- data/README.rdoc +90 -0
- data/Rakefile +27 -0
- data/VERSION +1 -0
- data/bin/judo +212 -0
- data/lib/all.rb +17 -0
- data/lib/config.rb +233 -0
- data/lib/group.rb +166 -0
- data/lib/server.rb +462 -0
- data/spec/base.rb +21 -0
- data/spec/server_spec.rb +9 -0
- metadata +104 -0
data/README.rdoc
ADDED
@@ -0,0 +1,90 @@
|
|
1
|
+
= Tired of wrestling with server provisioning? Sumo!
|
2
|
+
|
3
|
+
Want to launch and manage persistent utility servers on EC2? ec2-run-instances got you down? Try this:
|
4
|
+
|
5
|
+
$ sumo create mybox
|
6
|
+
Created server mybox
|
7
|
+
$ sumo list
|
8
|
+
mybox i-8a3d05e2 m1.small ami-1515f67c
|
9
|
+
$ sumo launch mybox
|
10
|
+
---> Launch server mybox... done (1.9s)
|
11
|
+
---> Acquire hostname... ec2-75-101-254-61.compute-1.amazonaws.com (34.0s)
|
12
|
+
---> Wait for ssh... done (3.1s)
|
13
|
+
|
14
|
+
Logging you in via ssh. Type 'exit' or Ctrl-D to return to your local system.
|
15
|
+
------------------------------------------------------------------------------
|
16
|
+
...
|
17
|
+
root@domU-12-31-39-04-31-37:~#
|
18
|
+
|
19
|
+
== Setup
|
20
|
+
|
21
|
+
Dependencies:
|
22
|
+
|
23
|
+
$ sudo gem install thor aws right_aws
|
24
|
+
|
25
|
+
Then create ~/.sumo/config.yml containing:
|
26
|
+
|
27
|
+
---
|
28
|
+
access_id: <your amazon access key id>
|
29
|
+
access_secret: <your amazon secret access key>
|
30
|
+
|
31
|
+
You'll need Bacon and Mocha if you want to run the specs, and Jewler if you want to create gems.
|
32
|
+
|
33
|
+
== Usage
|
34
|
+
|
35
|
+
Create a named persistent server:
|
36
|
+
|
37
|
+
$ sumo create mybox
|
38
|
+
Created server mybox
|
39
|
+
|
40
|
+
See config vars on the server record:
|
41
|
+
|
42
|
+
$ sumo info mybox
|
43
|
+
mybox:
|
44
|
+
availability_zone:"us-east-1d"
|
45
|
+
ami32:"ami-1515f67c"
|
46
|
+
user:"ubuntu"
|
47
|
+
key_name:"sumo"
|
48
|
+
ami64:"ami-ab15f6c2"
|
49
|
+
user_data:""
|
50
|
+
security_group:"sumo"
|
51
|
+
instance_size:"m1.small"
|
52
|
+
|
53
|
+
Set config vars before launch:
|
54
|
+
|
55
|
+
$ sumo set mybox --instance-size c1.medium
|
56
|
+
|
57
|
+
Create a static IP address and/or volumes:
|
58
|
+
|
59
|
+
$ sumo attach_ip mybox
|
60
|
+
$ sumo attach_volume mbox 50 /dev/sdh1
|
61
|
+
|
62
|
+
Launch an instance of the server:
|
63
|
+
|
64
|
+
$ sumo launch mybox
|
65
|
+
---> Launch server mybox...
|
66
|
+
|
67
|
+
You may want to spin down the server if it's not in use:
|
68
|
+
|
69
|
+
$ sumo stop mybox
|
70
|
+
|
71
|
+
...and bring it up again later:
|
72
|
+
|
73
|
+
$ sumo start mybox
|
74
|
+
|
75
|
+
== Some details you might want to know
|
76
|
+
|
77
|
+
Sumo creates its own keypair named sumo, which is stored in ~/.ssh/keypair.pem. Amazon doesn't let you upload your own ssh public key, which is lame, so this is the best option for making the launch-and-connect process a single step.
|
78
|
+
|
79
|
+
It will also create an Amazon security group called sumo, so that it can lower the firewall for services you configure via cookbook roles.
|
80
|
+
|
81
|
+
== Meta
|
82
|
+
|
83
|
+
Created by Adam Wiggins and Orion Henry
|
84
|
+
|
85
|
+
Patches contributed by Blake Mizerany, Jesse Newland, Gert Goet, and Tim Lossen
|
86
|
+
|
87
|
+
Released under the MIT License: http://www.opensource.org/licenses/mit-license.php
|
88
|
+
|
89
|
+
http://github.com/adamwiggins/sumo
|
90
|
+
|
data/Rakefile
ADDED
@@ -0,0 +1,27 @@
|
|
1
|
+
require 'jeweler'
|
2
|
+
|
3
|
+
Jeweler::Tasks.new do |s|
|
4
|
+
s.name = "judo"
|
5
|
+
s.description = "A no-hassle way to launch one-off EC2 instances from the command line"
|
6
|
+
s.summary = s.description
|
7
|
+
s.author = "Orion Henry"
|
8
|
+
s.email = "orion@heroku.com"
|
9
|
+
s.homepage = "http://github.com/orionz/judo"
|
10
|
+
s.rubyforge_project = "judo"
|
11
|
+
s.files = FileList["[A-Z]*", "{bin,lib,spec}/**/*"]
|
12
|
+
s.executables = %w(judo)
|
13
|
+
s.add_dependency "uuidtools"
|
14
|
+
s.add_dependency "aws"
|
15
|
+
s.add_dependency "thor"
|
16
|
+
s.add_dependency "json"
|
17
|
+
end
|
18
|
+
|
19
|
+
Jeweler::RubyforgeTasks.new
|
20
|
+
|
21
|
+
desc 'Run specs'
|
22
|
+
task :spec do
|
23
|
+
sh 'bacon -s spec/*_spec.rb'
|
24
|
+
end
|
25
|
+
|
26
|
+
task :default => :spec
|
27
|
+
|
data/VERSION
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
0.0.1
|
data/bin/judo
ADDED
@@ -0,0 +1,212 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
|
3
|
+
require File.dirname(__FILE__) + '/../lib/all'
|
4
|
+
|
5
|
+
#Sumo::Config.connect
|
6
|
+
|
7
|
+
require 'thor'
|
8
|
+
|
9
|
+
class CLI < Thor
|
10
|
+
desc "start [NAMES]", "start one or more servers"
|
11
|
+
def start(*names)
|
12
|
+
servers(*names) do |server|
|
13
|
+
server.start
|
14
|
+
end
|
15
|
+
end
|
16
|
+
|
17
|
+
desc "ssh [NAMES]", "ssh to a specified server or first available"
|
18
|
+
def ssh(*names)
|
19
|
+
servers(*names) do |server|
|
20
|
+
server.reload
|
21
|
+
server.wait_for_ssh
|
22
|
+
server.connect_ssh
|
23
|
+
end
|
24
|
+
end
|
25
|
+
|
26
|
+
desc "launch NAME", "create and start a persistent server"
|
27
|
+
def launch(name)
|
28
|
+
create(name, true)
|
29
|
+
end
|
30
|
+
|
31
|
+
desc "create NAME", "create a persistent server"
|
32
|
+
def create(name, start = false)
|
33
|
+
group = Sumo::Group.current
|
34
|
+
if name =~ /^[+](\d*)/
|
35
|
+
n = $1.to_i
|
36
|
+
fail "woah nelly - that's too many - 5 or less pls" if n > 5
|
37
|
+
fail "woah nelly - that's not enough" if n < 1
|
38
|
+
top_counter = group.servers.map { |s| (s.name =~ /^#{s.group}.(\d*)$/); $1.to_i }.sort.last.to_i
|
39
|
+
n.times do |i|
|
40
|
+
top_counter += 1
|
41
|
+
server = group.create_server "#{group}.#{top_counter}"
|
42
|
+
server.allocate_resources
|
43
|
+
server.start if start
|
44
|
+
end
|
45
|
+
else
|
46
|
+
server = group.create_server name
|
47
|
+
server.allocate_resources
|
48
|
+
server.start if start
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
desc "restart NAME", "restart a running server"
|
53
|
+
def restart(*names)
|
54
|
+
servers(*names) do |server|
|
55
|
+
server.restart
|
56
|
+
end
|
57
|
+
end
|
58
|
+
|
59
|
+
desc "stop [NAMES]", "stop a persistent server"
|
60
|
+
def stop(*names)
|
61
|
+
servers(*names) do |server|
|
62
|
+
server.stop
|
63
|
+
server.destroy if server.generic?
|
64
|
+
end
|
65
|
+
end
|
66
|
+
|
67
|
+
desc "destroy NAMES", "destroy a persistent server"
|
68
|
+
def destroy(*names)
|
69
|
+
raise "Must specify names of servers to destroy" if names.empty?
|
70
|
+
servers(*names) do |server|
|
71
|
+
server.destroy
|
72
|
+
end
|
73
|
+
end
|
74
|
+
|
75
|
+
desc "info [NAMES]", "show server config"
|
76
|
+
def info(*names)
|
77
|
+
servers(*names) do |server|
|
78
|
+
require 'pp'
|
79
|
+
puts "#{server}"
|
80
|
+
if server.ec2_instance and not server.ec2_instance.empty?
|
81
|
+
puts "\t[ EC2 ]"
|
82
|
+
[:aws_instance_id, :ssh_key_name, :aws_availability_zone, :aws_state, :aws_image_id, :dns_name, :aws_instance_type, :private_dns_name, :aws_launch_time, :aws_groups ].each do |k|
|
83
|
+
printf "\t %-24s: %s\n",k, server.ec2_instance[k]
|
84
|
+
end
|
85
|
+
end
|
86
|
+
puts "\t[ VOLUMES ]"
|
87
|
+
server.ec2_volumes.each do |v|
|
88
|
+
printf "\t %-13s %-10s %-10s %4d %-10s %-8s\n",
|
89
|
+
v[:aws_id],
|
90
|
+
v[:aws_status],
|
91
|
+
v[:zone],
|
92
|
+
v[:aws_size],
|
93
|
+
v[:aws_attachment_status],
|
94
|
+
v[:aws_device]
|
95
|
+
end
|
96
|
+
# pp ({ :name => server.name , :group => server.group, :volumes => server.volumes, :hostname => server.hostname })
|
97
|
+
# pp server.volumes.inspect
|
98
|
+
# puts server.state.inspect
|
99
|
+
## EC2 describe_volumes
|
100
|
+
# puts " #{dev.to_s}:#{Sumo::Config.ec2.describe_volumes([vol_id])}"
|
101
|
+
# end
|
102
|
+
end
|
103
|
+
end
|
104
|
+
|
105
|
+
desc "list [NAMES]", "list all servers"
|
106
|
+
def list(*names)
|
107
|
+
if group = Sumo::Group.current
|
108
|
+
servers(*names) do |s|
|
109
|
+
printf "%-18s %-11s %-11s %-13s %-10s %-10s %s\n", s.name, s.state["instance_id"], s.instance_size, s.ami, s.ec2_state, "#{s.volumes.keys.size} volumes", s.ip
|
110
|
+
end
|
111
|
+
else
|
112
|
+
printf " SERVER GROUPS\n"
|
113
|
+
Sumo::Group.all.each do |g|
|
114
|
+
printf "%-18s %s servers\n", g.name, g.servers.size
|
115
|
+
end
|
116
|
+
# printf " UNGROUPED SERVERS\n"
|
117
|
+
# servers.each do |s|
|
118
|
+
# printf "%-18s %-11s %-11s %-13s %-10s %-10s %s\n", s.name, s.state["instance_id"], s.state["security_group"], s.ami, s.ec2_state, "#{s.volumes.keys.size} volumes", s.ip
|
119
|
+
# end
|
120
|
+
else
|
121
|
+
end
|
122
|
+
end
|
123
|
+
|
124
|
+
desc "console [NAMES]", "get console output for server or first available"
|
125
|
+
def console(*names)
|
126
|
+
servers(*names) do |server|
|
127
|
+
if server.running?
|
128
|
+
puts "Not running: #{server.name}"
|
129
|
+
else
|
130
|
+
puts server.console_output if server.running?
|
131
|
+
end
|
132
|
+
end
|
133
|
+
end
|
134
|
+
|
135
|
+
desc "init", "create a new sumo repository in the current directory"
|
136
|
+
def init
|
137
|
+
Sumo::Config.init
|
138
|
+
end
|
139
|
+
|
140
|
+
desc "volumes", "list all volumes"
|
141
|
+
def volumes
|
142
|
+
format = "%13s %6s %12s %-10s %-16s %-16s\n"
|
143
|
+
printf format, "AWS_ID", "SIZE", "AWS_STATUS", "AWS_DEVICE", "ATTACHED TO", "CONFIGURED FOR"
|
144
|
+
printf "%s\n", ("-" * 80)
|
145
|
+
### EC2 describe_volumes
|
146
|
+
Sumo::Config.ec2.describe_volumes.map do |volume|
|
147
|
+
[ volume[:aws_id], volume[:aws_size], volume[:aws_status], volume[:aws_device] || "", instance_id_to_sumo(volume[:aws_instance_id]) || volume[:aws_instance_id] || "", volume_id_to_sumo(volume[:aws_id]) ]
|
148
|
+
end.sort { |a,b| [ a[5].to_s, a[3].to_s ] <=> [ b[5].to_s, b[3].to_s ] }.each do |d|
|
149
|
+
printf format, *d
|
150
|
+
end
|
151
|
+
end
|
152
|
+
|
153
|
+
desc "ips", "list all ips"
|
154
|
+
def ips
|
155
|
+
format = "%15s %20s %20s\n"
|
156
|
+
printf format, "IP", "ATTACHED TO", "CONFIGURED FOR"
|
157
|
+
printf "%s\n", ("-"*57)
|
158
|
+
## EC2 describe_addresses
|
159
|
+
Sumo::Config.ec2.describe_addresses.map do |ip|
|
160
|
+
[ ip[:public_ip], instance_id_to_sumo(ip[:instance_id]) || ip[:instance_id], ip_to_sumo(ip[:public_ip]) ]
|
161
|
+
end.sort { |a,b| a[2].to_s <=> b[2].to_s }.each do |d|
|
162
|
+
printf format, *d
|
163
|
+
end
|
164
|
+
end
|
165
|
+
|
166
|
+
desc "commit", "push configs and files to couchdb"
|
167
|
+
def commit
|
168
|
+
# group = Sumo::Config.group
|
169
|
+
Sumo::Server.commit
|
170
|
+
end
|
171
|
+
|
172
|
+
no_tasks do
|
173
|
+
def servers(*names, &block)
|
174
|
+
group = Sumo::Group.current
|
175
|
+
good_servers = group.servers.select { |s| names.empty? or names.include?(s.name) }
|
176
|
+
bad_names = (names - good_servers.map(&:name))
|
177
|
+
fail "bad server name: #{bad_names.join(', ')}" unless bad_names.empty?
|
178
|
+
good_servers.each do |server|
|
179
|
+
begin
|
180
|
+
block.call(server)
|
181
|
+
rescue Object => e
|
182
|
+
puts "Error on #{server.name}: #{e.message}"
|
183
|
+
end
|
184
|
+
end
|
185
|
+
end
|
186
|
+
|
187
|
+
def task(msg, &block)
|
188
|
+
printf "---> %-24s ", "#{msg}..."
|
189
|
+
start = Time.now
|
190
|
+
result = block.call || 'done'
|
191
|
+
result = "done" unless result.is_a? String
|
192
|
+
finish = Time.now
|
193
|
+
time = sprintf("%0.1f", finish - start)
|
194
|
+
puts "#{result} (#{time}s)"
|
195
|
+
result
|
196
|
+
end
|
197
|
+
|
198
|
+
def volume_id_to_sumo(volume)
|
199
|
+
Sumo::Server.all.detect { |s| s.volumes.invert[volume] }
|
200
|
+
end
|
201
|
+
|
202
|
+
def ip_to_sumo(ip)
|
203
|
+
Sumo::Server.all.detect { |s| s.state["elastic_ip"] == ip }
|
204
|
+
end
|
205
|
+
|
206
|
+
def instance_id_to_sumo(instance_id)
|
207
|
+
Sumo::Server.all.detect { |s| s.state["instance_id"] and s.state["instance_id"] == instance_id }
|
208
|
+
end
|
209
|
+
end
|
210
|
+
end
|
211
|
+
|
212
|
+
CLI.start
|
data/lib/all.rb
ADDED
@@ -0,0 +1,17 @@
|
|
1
|
+
require 'rubygems'
|
2
|
+
require 'aws'
|
3
|
+
require 'right_aws'
|
4
|
+
require 'sdb/active_sdb' ##
|
5
|
+
require 'yaml'
|
6
|
+
require 'socket'
|
7
|
+
require 'json'
|
8
|
+
require 'couchrest' ##
|
9
|
+
require 'fileutils'
|
10
|
+
|
11
|
+
require 'fog'
|
12
|
+
require 'fog/aws/s3'
|
13
|
+
|
14
|
+
require File.dirname(__FILE__) + '/config'
|
15
|
+
require File.dirname(__FILE__) + '/group'
|
16
|
+
require File.dirname(__FILE__) + '/server'
|
17
|
+
require File.dirname(__FILE__) + '/couchrest_hacks'
|
data/lib/config.rb
ADDED
@@ -0,0 +1,233 @@
|
|
1
|
+
module Judo
|
2
|
+
module Config
|
3
|
+
extend self
|
4
|
+
|
5
|
+
## FIXME - maybe write these to defaults.json and dont have them hidden here in the code...
|
6
|
+
def defaults
|
7
|
+
defaults_file = "#{judo_dir}/defaults.json"
|
8
|
+
unless File.exists? defaults_file
|
9
|
+
File.open(defaults_file, "w") { |f| f.write(default_config.to_json) }
|
10
|
+
end
|
11
|
+
JSON.parse File.read(defaults_file)
|
12
|
+
end
|
13
|
+
|
14
|
+
def default_config
|
15
|
+
{
|
16
|
+
:key_name => "judo",
|
17
|
+
:instance_size => "m1.small",
|
18
|
+
:ami32 => "ami-bb709dd2", ## public ubuntu 9.10 ami - 32 bit
|
19
|
+
:ami64 => "ami-55739e3c", ## public ubuntu 9.10 ami - 64 bit
|
20
|
+
:user => "ubuntu",
|
21
|
+
:security_group => { "name" => "judo", "public_ports" => ["22"] },
|
22
|
+
:availability_zone => "us-east-1d" ## FIXME -- this should be state not config -- what if they differ - how to set?
|
23
|
+
}
|
24
|
+
end
|
25
|
+
|
26
|
+
# def merged_config(name)
|
27
|
+
# stack = load_config_stack(name)
|
28
|
+
# stack.reverse.inject({}) { |sum,conf| sum.merge(conf) }
|
29
|
+
# end
|
30
|
+
|
31
|
+
# def load_config_stack(name, all = [])
|
32
|
+
# return (all << defaults) if name.nil?
|
33
|
+
# conf = read_config(name)
|
34
|
+
# load_config_stack(conf["import"], all << conf)
|
35
|
+
# end
|
36
|
+
|
37
|
+
def repo_dir
|
38
|
+
judo_config["repo"] || File.dirname(judo_dir)
|
39
|
+
end
|
40
|
+
|
41
|
+
# def group_dirs
|
42
|
+
# Dir["#{repo_dir}/*/config.json"].map { |d| File.dirname(d) }
|
43
|
+
# end
|
44
|
+
|
45
|
+
# def group_dir(name)
|
46
|
+
# group_dirs.select { |d| File.basename(d) == name }
|
47
|
+
# end
|
48
|
+
|
49
|
+
# def groups
|
50
|
+
# group_dirs.map { |g| File.basename(g) }
|
51
|
+
# end
|
52
|
+
|
53
|
+
# def group
|
54
|
+
# File.basename(group_dirs.detect { |d| Dir.pwd == d or Dir.pwd =~ /^#{d}\// }) rescue nil
|
55
|
+
# end
|
56
|
+
|
57
|
+
# def read_config(name)
|
58
|
+
# begin
|
59
|
+
# JSON.parse(File.read("#{group_dir(name)}/config.json"))
|
60
|
+
# rescue Errno::ENOENT
|
61
|
+
# {}
|
62
|
+
# end
|
63
|
+
# end
|
64
|
+
|
65
|
+
def access_id
|
66
|
+
judo_config["access_id"] || ENV["AWS_ACCESS_KEY_ID"] || (raise "please define access_id in #{judo_config_file} or in the env as AWS_ACCESS_KEY_ID")
|
67
|
+
end
|
68
|
+
|
69
|
+
def access_secret
|
70
|
+
judo_config["access_secret"] || ENV["AWS_SECRET_ACCESS_KEY"] || (raise "please define access_secet in #{judo_config_file} or in the env as AWS_SECRET_ACCESS_KEY")
|
71
|
+
end
|
72
|
+
|
73
|
+
def ec2
|
74
|
+
@ec2 ||= Aws::Ec2.new(access_id, access_secret, :logger => Logger.new(nil))
|
75
|
+
end
|
76
|
+
|
77
|
+
### REMOVE
|
78
|
+
def couchdb
|
79
|
+
@couchdb ||= CouchRest.database!(couch_url)
|
80
|
+
end
|
81
|
+
|
82
|
+
def couch_url
|
83
|
+
judo_config["couch_url"] || "http://127.0.0.1:5984/judo"
|
84
|
+
end
|
85
|
+
|
86
|
+
## FIXME
|
87
|
+
def keypair_file
|
88
|
+
judo_config["keypair_file"] || "#{judo_dir}/keypair.pem"
|
89
|
+
end
|
90
|
+
|
91
|
+
# def connect
|
92
|
+
# @@con = Aws::ActiveSdb.establish_connection(Config.access_id, Config.access_secret, :logger => Logger.new(nil))
|
93
|
+
# one_time_setup unless setup?
|
94
|
+
# end
|
95
|
+
|
96
|
+
## FIXME
|
97
|
+
def one_time_setup
|
98
|
+
puts "ONE TIME SETUP"
|
99
|
+
Sumo::Server.create_domain
|
100
|
+
end
|
101
|
+
|
102
|
+
# def setup?
|
103
|
+
# Sumo::Server.connection.list_domains[:domains].include? Sumo::Server.domain
|
104
|
+
# end
|
105
|
+
|
106
|
+
## FIXME
|
107
|
+
def create_keypair
|
108
|
+
## EC2 create_key_pair
|
109
|
+
material = ec2.create_key_pair("judo")[:aws_material]
|
110
|
+
File.open(keypair_file, 'w') { |f| f.write material }
|
111
|
+
File.chmod 0600, keypair_file
|
112
|
+
end
|
113
|
+
|
114
|
+
## FIXME - this seems... lame
|
115
|
+
def create_security_group
|
116
|
+
## EC2 create_security_group
|
117
|
+
ec2.create_security_group('judo', 'Sumo')
|
118
|
+
## EC2 authorize_security_group
|
119
|
+
ec2.authorize_security_group_IP_ingress("judo", 22, 22,'tcp','0.0.0.0/0')
|
120
|
+
rescue Aws::AwsError
|
121
|
+
end
|
122
|
+
|
123
|
+
def judo_config
|
124
|
+
@config ||= read_judo_config
|
125
|
+
end
|
126
|
+
|
127
|
+
def judo_config_file
|
128
|
+
"#{judo_dir}/config.yml"
|
129
|
+
end
|
130
|
+
|
131
|
+
def judo_dir
|
132
|
+
@judo_dir ||= find_judo_dir(Dir.pwd) || abort("fatal: Not a judo repository (or any of the parent directories): .judo\nrun commands from the judo repository or type 'judo init' to setup the current directory as a new judo repository")
|
133
|
+
end
|
134
|
+
|
135
|
+
def find_judo_dir(check)
|
136
|
+
if check == "/"
|
137
|
+
if File.exists?("#{ENV['HOME']}/.judo")
|
138
|
+
"#{ENV['HOME']}/.judo"
|
139
|
+
else
|
140
|
+
nil
|
141
|
+
end
|
142
|
+
else
|
143
|
+
File.exists?(check + "/.judo") ? check + "/.judo" : find_judo_dir(File.dirname(check))
|
144
|
+
end
|
145
|
+
end
|
146
|
+
|
147
|
+
def read_judo_config
|
148
|
+
YAML.load File.read(judo_config_file)
|
149
|
+
rescue Errno::ENOENT
|
150
|
+
{}
|
151
|
+
end
|
152
|
+
|
153
|
+
def sdb
|
154
|
+
@sdb = Aws::SdbInterface.new(access_id, access_secret, :logger => Logger.new(nil))
|
155
|
+
end
|
156
|
+
|
157
|
+
def s3
|
158
|
+
# @s3 ||= RightAws::S3.new(access_id, access_secret, :logger => Logger.new(nil))
|
159
|
+
# @s3 ||= Aws::S3.new(access_id, access_secret)
|
160
|
+
@s3 = Fog::AWS::S3.new( :aws_access_key_id => access_id, :aws_secret_access_key => access_secret)
|
161
|
+
end
|
162
|
+
|
163
|
+
def s3_url(k)
|
164
|
+
s3.get_object_url(judo_config["s3_bucket"], k,Time.now.to_i + 100_000_000)
|
165
|
+
end
|
166
|
+
|
167
|
+
def s3_put(k, file)
|
168
|
+
s3.put_object(judo_config["s3_bucket"], k, file)
|
169
|
+
end
|
170
|
+
|
171
|
+
def collect(keys, prompt, &blk)
|
172
|
+
k = keys.detect do |k|
|
173
|
+
printf "Looking in your environment for #{k}..."
|
174
|
+
printf "found!" if ENV[k]
|
175
|
+
printf "\n"
|
176
|
+
ENV[k]
|
177
|
+
end
|
178
|
+
value = ENV[k]
|
179
|
+
retries = 3
|
180
|
+
begin
|
181
|
+
unless value
|
182
|
+
printf "#{prompt}: "
|
183
|
+
value = STDIN.readline
|
184
|
+
end
|
185
|
+
blk.call(value) if blk
|
186
|
+
value
|
187
|
+
rescue *[Interrupt, EOFError]
|
188
|
+
puts "\nGoodbye!"
|
189
|
+
exit(0)
|
190
|
+
rescue Object => e
|
191
|
+
fail "too many retries" if retries == 0
|
192
|
+
puts "There was an error: #{e.class}:#{e.message}"
|
193
|
+
puts "Try again or hit CTRL-C"
|
194
|
+
value = nil
|
195
|
+
retries -= 1
|
196
|
+
retry
|
197
|
+
end
|
198
|
+
|
199
|
+
end
|
200
|
+
|
201
|
+
def init
|
202
|
+
### sooooo ugly
|
203
|
+
require 'pp'
|
204
|
+
fail "you are already inside a judo repository" if find_judo_dir(Dir.pwd)
|
205
|
+
fail "./.git not found - judo configurations must be kept in a git repo. type 'git init' to setup the git repo." unless File.exists? "./.git"
|
206
|
+
aws_id = collect(['AWS_ACCESS_KEY_ID', 'AMAZON_ACCESS_KEY_ID'], "Please enter your AWS access key")
|
207
|
+
aws_secret = collect(['AWS_SECRET_ACCESS_KEY', 'AMAZON_SECRET_ACCESS_KEY'], "Please enter your AWS secret key") do |aws_secret|
|
208
|
+
puts "Trying to connect to SimpleDB with #{aws_id}:#{aws_secret}"
|
209
|
+
@sdb = Aws::SdbInterface.new(aws_id, aws_secret, :logger => Logger.new(nil))
|
210
|
+
@sdb.create_domain("judo_servers")
|
211
|
+
@sdb.create_domain("judo_config")
|
212
|
+
end
|
213
|
+
|
214
|
+
puts "setting up an s3 bucket"
|
215
|
+
s3_bucket = ENV['SUMO_S3_BUCKET'] || "judo_#{rand(2**128).to_s(16)}"
|
216
|
+
Fog::AWS::S3.new( :aws_access_key_id => aws_id, :aws_secret_access_key => aws_secret).put_bucket(s3_bucket)
|
217
|
+
|
218
|
+
puts "setting up an .judo/config.yml"
|
219
|
+
system "mkdir .judo"
|
220
|
+
File.open(".judo/config.yml","w") { |f| f.write({ "access_id" => aws_id, "access_secret" => aws_secret, "s3_bucket" => s3_bucket }.to_yaml) }
|
221
|
+
|
222
|
+
puts "Setting up default config and keypair"
|
223
|
+
system "mkdir default"
|
224
|
+
keypair = "judo_#{rand(2**64).to_s(16)}"
|
225
|
+
@ec2 = Aws::Ec2.new(access_id, access_secret, :logger => Logger.new(nil))
|
226
|
+
material = @ec2.create_key_pair(keypair)[:aws_material]
|
227
|
+
File.open("default/#{keypair}.pem", 'w') { |f| f.write material }
|
228
|
+
File.chmod 0600, "default/#{keypair}.pem"
|
229
|
+
File.open("default/config.json","w") { |f| f.write default_config.merge({ "keypair" => keypair }) }
|
230
|
+
puts "congratulations! - you're ready to go!"
|
231
|
+
end
|
232
|
+
end
|
233
|
+
end
|
data/lib/group.rb
ADDED
@@ -0,0 +1,166 @@
|
|
1
|
+
module Judo
|
2
|
+
class Group
|
3
|
+
attr_accessor :name, :dir
|
4
|
+
|
5
|
+
def self.dirs
|
6
|
+
Dir["#{Judo::Config.repo_dir}/*/config.json"].map { |d| File.dirname(d) }
|
7
|
+
end
|
8
|
+
|
9
|
+
def self.all
|
10
|
+
@@all ||= (dirs.map { |d| new(d) } << Group.new(Judo::Config.judo_dir, "default"))
|
11
|
+
end
|
12
|
+
|
13
|
+
def self.find(name)
|
14
|
+
all.detect { |d| d.name == name }
|
15
|
+
end
|
16
|
+
|
17
|
+
def self.[](name)
|
18
|
+
find(name)
|
19
|
+
end
|
20
|
+
|
21
|
+
def self.current
|
22
|
+
all.detect { |d| Dir.pwd == d.dir or Dir.pwd =~ /^#{d.dir}\// } || find("default")
|
23
|
+
end
|
24
|
+
|
25
|
+
def initialize(dir, name = File.basename(dir))
|
26
|
+
@name = name
|
27
|
+
@dir = dir
|
28
|
+
end
|
29
|
+
|
30
|
+
def create_server(server_name)
|
31
|
+
abort("Server needs a name") if server_name.nil?
|
32
|
+
# abort("Already a server named #{server_name}") if Judo::Server.find_by_name(attrs[:name]) ## FIXME
|
33
|
+
# Judo::Config.read_config(attrs[:group]) ## make sure the config is valid ## FIXME
|
34
|
+
|
35
|
+
server = Judo::Server.new server_name, self
|
36
|
+
server.task("Creating server #{server_name}") do
|
37
|
+
server.update "name" => server_name, "group" => name, "virgin" => true, "secret" => rand(2 ** 128).to_s(36)
|
38
|
+
Judo::Config.sdb.put_attributes("judo_config", "groups", name => server_name)
|
39
|
+
end
|
40
|
+
server
|
41
|
+
end
|
42
|
+
|
43
|
+
def config
|
44
|
+
@config ||= self.class.load_all(self)
|
45
|
+
end
|
46
|
+
|
47
|
+
def server_names
|
48
|
+
Judo::Config.sdb.get_attributes("judo_config", "groups", @name)[:attributes][@name] || []
|
49
|
+
end
|
50
|
+
|
51
|
+
def servers
|
52
|
+
server_names.map { |n| Judo::Server.new(n, self) }
|
53
|
+
end
|
54
|
+
|
55
|
+
def compile
|
56
|
+
tmpdir = "/tmp/kuzushi/#{name}"
|
57
|
+
FileUtils.rm_rf(tmpdir)
|
58
|
+
FileUtils.mkdir_p(tmpdir)
|
59
|
+
Dir.chdir(tmpdir) do |d|
|
60
|
+
attachments.each do |to,from|
|
61
|
+
FileUtils.mkdir_p(File.dirname(to))
|
62
|
+
FileUtils.cp(from,to)
|
63
|
+
end
|
64
|
+
File.open("config.json", "w") { |f| f.write(config.to_json) }
|
65
|
+
Dir.chdir("..") do
|
66
|
+
system "tar czvf #{tar_file} #{name}"
|
67
|
+
puts "Uploading to s3..."
|
68
|
+
Judo::Config.s3_put(tar_file, File.new(tar_file))
|
69
|
+
end
|
70
|
+
end
|
71
|
+
end
|
72
|
+
|
73
|
+
def tar_file
|
74
|
+
"#{name}.tar.gz"
|
75
|
+
end
|
76
|
+
|
77
|
+
def s3_url
|
78
|
+
@url = Judo::Config.s3_url(tar_file)
|
79
|
+
end
|
80
|
+
|
81
|
+
def cp_file
|
82
|
+
FileUtil.mkdir_p(tmpdir)
|
83
|
+
end
|
84
|
+
|
85
|
+
def parent
|
86
|
+
self.class.find(config["import"])
|
87
|
+
end
|
88
|
+
|
89
|
+
def extract_file(type, name, files)
|
90
|
+
path = "#{dir}/#{type}s/#{name}"
|
91
|
+
found = Dir[path]
|
92
|
+
if not found.empty?
|
93
|
+
found.each { |f| files["#{type}s/#{File.basename(f)}"] = f }
|
94
|
+
elsif parent
|
95
|
+
parent.extract_file(type, name, files)
|
96
|
+
else
|
97
|
+
raise "Cannot find file #{name} of type #{type}"
|
98
|
+
end
|
99
|
+
end
|
100
|
+
|
101
|
+
def extract(config, files)
|
102
|
+
config.each do |key,value|
|
103
|
+
[value].flatten.each do |v| ### cover "packages" : ["a","b"], "packages" : "a", "packages":[{ "file" : "foo.pkg"}]
|
104
|
+
extract(v, files) if v.is_a? Hash
|
105
|
+
case key
|
106
|
+
when *[ "init", "before", "after" ]
|
107
|
+
extract_file(:script, v, files) unless v =~ /^#!/
|
108
|
+
when "local_packages"
|
109
|
+
extract_file(:package, "#{v}*", files)
|
110
|
+
when "template"
|
111
|
+
extract_file(:template, v, files)
|
112
|
+
when "source"
|
113
|
+
extract_file(:file, v, files) unless config["template"]
|
114
|
+
when "file"
|
115
|
+
extract_file(:file, File.basename(v), files) unless config["template"] or config["source"]
|
116
|
+
end
|
117
|
+
end
|
118
|
+
end
|
119
|
+
files
|
120
|
+
end
|
121
|
+
|
122
|
+
def keypair_file
|
123
|
+
(attachments.select { |a| File.basename(config["keypair"]) }) || fail("no keypair_file specified")
|
124
|
+
end
|
125
|
+
|
126
|
+
def attachments
|
127
|
+
extract(config, {})
|
128
|
+
end
|
129
|
+
|
130
|
+
def self.load_all(group, configs = [])
|
131
|
+
return configs.reverse.inject(Judo::Config.defaults) { |sum,conf| sum.merge(conf) } unless group
|
132
|
+
raw_config = group.read_config
|
133
|
+
load_all(find(raw_config["import"]), configs << raw_config)
|
134
|
+
end
|
135
|
+
|
136
|
+
def config_file
|
137
|
+
return "#{dir}/defaults.json" if name == "default"
|
138
|
+
"#{dir}/config.json"
|
139
|
+
end
|
140
|
+
|
141
|
+
def read_config
|
142
|
+
begin
|
143
|
+
JSON.parse(File.read(config_file))
|
144
|
+
rescue Errno::ENOENT
|
145
|
+
{}
|
146
|
+
end
|
147
|
+
end
|
148
|
+
|
149
|
+
def delete_server(server)
|
150
|
+
sdb.delete_attributes("judo_config", "groups", name => server.name)
|
151
|
+
end
|
152
|
+
|
153
|
+
def default?
|
154
|
+
false
|
155
|
+
end
|
156
|
+
|
157
|
+
def to_s
|
158
|
+
name
|
159
|
+
end
|
160
|
+
|
161
|
+
def sdb
|
162
|
+
Judo::Config.sdb
|
163
|
+
end
|
164
|
+
end
|
165
|
+
end
|
166
|
+
|
data/lib/server.rb
ADDED
@@ -0,0 +1,462 @@
|
|
1
|
+
### NEEDED for new gem launch
|
2
|
+
|
3
|
+
### 32 hrs to go - 12:00am Feb 26th - expected completion Mar 2
|
4
|
+
### [ ] judo init (2 hrs)
|
5
|
+
### [ ] implement real default config - remove special case code (3 hrs)
|
6
|
+
### [ ] complete slug compile - load into s3 (4 hrs)
|
7
|
+
### [ ] refactor availability_zone (2 hrs)
|
8
|
+
### [ ] refactor keypair.pem setup (3 hrs)
|
9
|
+
### [ ] implement auto security_group creation and setup (6 hrs)
|
10
|
+
### [ ] version in the db - require upgrade of gem if db version ahead (1 hr)
|
11
|
+
### [ ] write some examples - simple postgres/redis/couchdb server (5hrs)
|
12
|
+
### [ ] two phase delete (1 hr)
|
13
|
+
### [ ] write new README (4 hrs)
|
14
|
+
### [ ] realase new gem! (1 hr)
|
15
|
+
|
16
|
+
### [ ] user a logger service (1 hr)
|
17
|
+
### [ ] write 4 simple specs (1 hr)
|
18
|
+
|
19
|
+
### Do Later
|
20
|
+
### [ ] use amazon's new conditional write tools so we never have problems from concurrent updates
|
21
|
+
### [ ] is thor really what we want to use here?
|
22
|
+
### [ ] need to be able to pin a config to a version of kuzushi - gem updates can/will break a lot of things
|
23
|
+
### [ ] I want a "judo monitor" command that will make start servers if they go down, and poke a listed port to make sure a service is listening, would be cool if it also detects wrong ami, wrong secuirity group, missing/extra volumes, missing/extra elastic_ip - might not want to force a reboot quite yet in these cases
|
24
|
+
### [ ] Implement "judo snapshot [NAME]" to take a snapshot of the ebs's blocks
|
25
|
+
### [ ] ruby 1.9.1 support
|
26
|
+
### [ ] find a good way to set the hostname or prompt to :name
|
27
|
+
### [ ] remove fog/s3 dependancy
|
28
|
+
### [ ] enforce template files end in .erb to make room for other possible templates as defined by the extensions
|
29
|
+
### [ ] zerigo integration for automatic DNS setup
|
30
|
+
### [ ] How cool would it be if this was all reimplemented in eventmachine and could start lots of boxes in parallel? Would need to evented AWS api calls... Never seen a library to do that - would have to write our own... "Fog Machine?"
|
31
|
+
|
32
|
+
module Judo
|
33
|
+
class Server
|
34
|
+
attr_accessor :name, :group
|
35
|
+
|
36
|
+
def initialize(name, group)
|
37
|
+
@name = name
|
38
|
+
@group = group
|
39
|
+
end
|
40
|
+
|
41
|
+
### Getting things in and out of SimpleDB - would be nice if it were a plugin so we could use another store if need db (couchdb/mongo/sql)
|
42
|
+
|
43
|
+
def self.migrate
|
44
|
+
require 'pp'
|
45
|
+
x = {}
|
46
|
+
Judo::Config.sdb.delete_domain("judo_servers")
|
47
|
+
Judo::Config.sdb.create_domain("judo_servers")
|
48
|
+
Judo::Config.sdb.delete_domain("judo_config")
|
49
|
+
Judo::Config.sdb.create_domain("judo_config")
|
50
|
+
Judo::Config.sdb.select("SELECT * FROM sumo_server").items.each do |chunk|
|
51
|
+
chunk.each do |uid,item|
|
52
|
+
name = item["name"] || "missing_#{rand(2**32).to_s(36)}"
|
53
|
+
group = (item["group"] || "default")
|
54
|
+
data = {
|
55
|
+
"group" => (item["group"] || "default" ), "name" => name,
|
56
|
+
"secret" => item["secret"], "elastic_ip" => item["elastic_ip"],
|
57
|
+
"volumes" => (item["volumes_flat"] || JSON.parse(item["volumes_json"] || "[]").map { |k,v| "#{k}:#{v}" }),
|
58
|
+
"instance_id" => item["instance_id"], "virgin" => item["virgin"]
|
59
|
+
}.delete_if { |k,v| v == [] or v == nil or v == [nil] }
|
60
|
+
pp data
|
61
|
+
Judo::Config.sdb.put_attributes("judo_servers", name, data, :replace)
|
62
|
+
Judo::Config.sdb.put_attributes("judo_config", "groups", group => name)
|
63
|
+
end
|
64
|
+
end
|
65
|
+
"ok"
|
66
|
+
end
|
67
|
+
|
68
|
+
def domain
|
69
|
+
"judo_servers"
|
70
|
+
end
|
71
|
+
|
72
|
+
def sdb
|
73
|
+
Judo::Config.sdb
|
74
|
+
end
|
75
|
+
|
76
|
+
def fetch_state
|
77
|
+
Judo::Config.sdb.get_attributes(domain, name)[:attributes]
|
78
|
+
end
|
79
|
+
|
80
|
+
def super_state
|
81
|
+
@@state ||= {}
|
82
|
+
end
|
83
|
+
|
84
|
+
def state
|
85
|
+
super_state[name] ||= fetch_state
|
86
|
+
end
|
87
|
+
|
88
|
+
def get(key)
|
89
|
+
state[key] && [state[key]].flatten.first
|
90
|
+
end
|
91
|
+
|
92
|
+
def instance_id
|
93
|
+
get "instance_id"
|
94
|
+
end
|
95
|
+
|
96
|
+
def elastic_ip
|
97
|
+
get "elastic_ip"
|
98
|
+
end
|
99
|
+
|
100
|
+
def virgin?
|
101
|
+
get("virgin").to_s == "true" ## I'm going to set it to true and it will come back from the db as "true" -> could be "false" or false or nil also
|
102
|
+
end
|
103
|
+
|
104
|
+
def secret
|
105
|
+
get "secret"
|
106
|
+
end
|
107
|
+
|
108
|
+
def volumes
|
109
|
+
Hash[ (state["volumes"] || []).map { |a| a.split(":") } ]
|
110
|
+
end
|
111
|
+
|
112
|
+
def update(attrs)
|
113
|
+
sdb.put_attributes(domain, name, attrs, :replace)
|
114
|
+
state.merge! attrs
|
115
|
+
end
|
116
|
+
|
117
|
+
def add(key, value)
|
118
|
+
sdb.put_attributes(domain, name, { key => value })
|
119
|
+
(state[key] ||= []) << value
|
120
|
+
end
|
121
|
+
|
122
|
+
def remove(key, value = nil)
|
123
|
+
if value
|
124
|
+
sdb.delete_attributes(domain, name, key => value)
|
125
|
+
state[key] - [value]
|
126
|
+
else
|
127
|
+
sdb.delete_attributes(domain, name, [ key ])
|
128
|
+
state.delete(key)
|
129
|
+
end
|
130
|
+
end
|
131
|
+
|
132
|
+
def delete
|
133
|
+
group.delete_server(self)
|
134
|
+
sdb.delete_attributes(domain, name)
|
135
|
+
end
|
136
|
+
|
137
|
+
######## end simple DB access #######
|
138
|
+
|
139
|
+
def instance_size
|
140
|
+
config["instance_size"]
|
141
|
+
end
|
142
|
+
|
143
|
+
def config
|
144
|
+
group.config
|
145
|
+
end
|
146
|
+
|
147
|
+
def to_s
|
148
|
+
"#{group}:#{name}"
|
149
|
+
end
|
150
|
+
|
151
|
+
def allocate_resources
|
152
|
+
if config["volumes"]
|
153
|
+
config["volumes"].each do |volume_config|
|
154
|
+
device = volume_config["device"]
|
155
|
+
if volume_config["media"] == "ebs"
|
156
|
+
size = volume_config["size"]
|
157
|
+
if not volumes[device]
|
158
|
+
task("Creating EC2 Volume #{device} #{size}") do
|
159
|
+
### EC2 create_volume
|
160
|
+
volume_id = Judo::Config.ec2.create_volume(nil, size, config["availability_zone"])[:aws_id]
|
161
|
+
add_volume(volume_id, device)
|
162
|
+
end
|
163
|
+
else
|
164
|
+
puts "Volume #{device} already exists."
|
165
|
+
end
|
166
|
+
else
|
167
|
+
puts "device #{device || volume_config["mount"]} is not of media type 'ebs', skipping..."
|
168
|
+
end
|
169
|
+
end
|
170
|
+
end
|
171
|
+
|
172
|
+
begin
|
173
|
+
if config["elastic_ip"] and not elastic_ip
|
174
|
+
### EC2 allocate_address
|
175
|
+
task("Adding an elastic ip") do
|
176
|
+
ip = Judo::Config.ec2.allocate_address
|
177
|
+
add_ip(ip)
|
178
|
+
end
|
179
|
+
end
|
180
|
+
rescue Aws::AwsError => e
|
181
|
+
if e.message =~ /AddressLimitExceeded/
|
182
|
+
abort "Failed to allocate ip address: Limit Exceeded"
|
183
|
+
else
|
184
|
+
raise
|
185
|
+
end
|
186
|
+
end
|
187
|
+
end
|
188
|
+
|
189
|
+
def task(msg, &block)
|
190
|
+
self.class.task(msg, &block)
|
191
|
+
end
|
192
|
+
|
193
|
+
def self.task(msg, &block)
|
194
|
+
printf "---> %-24s ", "#{msg}..."
|
195
|
+
STDOUT.flush
|
196
|
+
start = Time.now
|
197
|
+
result = block.call
|
198
|
+
result = "done" unless result.is_a? String
|
199
|
+
finish = Time.now
|
200
|
+
time = sprintf("%0.1f", finish - start)
|
201
|
+
puts "#{result} (#{time}s)"
|
202
|
+
result
|
203
|
+
end
|
204
|
+
|
205
|
+
def has_ip?
|
206
|
+
!!elastic_ip
|
207
|
+
end
|
208
|
+
|
209
|
+
def has_volumes?
|
210
|
+
not volumes.empty?
|
211
|
+
end
|
212
|
+
|
213
|
+
def ec2_volumes
|
214
|
+
return [] if volumes.empty?
|
215
|
+
Judo::Config.ec2.describe_volumes( volumes.values )
|
216
|
+
end
|
217
|
+
|
218
|
+
def remove_ip
|
219
|
+
Judo::Config.ec2.release_address(elastic_ip) rescue nil
|
220
|
+
remove "elastic_ip"
|
221
|
+
end
|
222
|
+
|
223
|
+
def destroy
|
224
|
+
stop if running?
|
225
|
+
### EC2 release_address
|
226
|
+
task("Deleting Elastic Ip") { remove_ip } if has_ip?
|
227
|
+
volumes.each { |dev,v| remove_volume(v,dev) }
|
228
|
+
task("Destroying server #{name}") { delete }
|
229
|
+
end
|
230
|
+
|
231
|
+
def ec2_state
|
232
|
+
ec2_instance[:aws_state] rescue "offline"
|
233
|
+
end
|
234
|
+
|
235
|
+
def ec2_instance
|
236
|
+
### EC2 describe_instances
|
237
|
+
@@ec2_list ||= Config.ec2.describe_instances
|
238
|
+
@@ec2_list.detect { |e| e[:aws_instance_id] == instance_id } or {}
|
239
|
+
end
|
240
|
+
|
241
|
+
def running?
|
242
|
+
## other options are "terminated" and "nil"
|
243
|
+
["pending", "running", "shutting_down", "degraded"].include?(ec2_state)
|
244
|
+
end
|
245
|
+
|
246
|
+
def start
|
247
|
+
abort "Already running" if running?
|
248
|
+
task("Starting server #{name}") { launch_ec2 }
|
249
|
+
task("Acquire hostname") { wait_for_hostname }
|
250
|
+
task("Wait for ssh") { wait_for_ssh }
|
251
|
+
task("Attaching ip") { attach_ip } if elastic_ip
|
252
|
+
task("Attaching volumes") { attach_volumes } if has_volumes?
|
253
|
+
end
|
254
|
+
|
255
|
+
def restart
|
256
|
+
stop if running?
|
257
|
+
start
|
258
|
+
end
|
259
|
+
|
260
|
+
def generic_name?
|
261
|
+
name =~ /^#{group}[.]\d*$/
|
262
|
+
end
|
263
|
+
|
264
|
+
def generic?
|
265
|
+
volumes.empty? and not has_ip? and generic_name?
|
266
|
+
end
|
267
|
+
|
268
|
+
def stop
|
269
|
+
abort "not running" unless running?
|
270
|
+
## EC2 terminate_isntaces
|
271
|
+
task("Terminating instance") { Config.ec2.terminate_instances([ instance_id ]) }
|
272
|
+
task("Wait for volumes to detach") { wait_for_volumes_detached } if volumes.size > 0
|
273
|
+
remove "instance_id"
|
274
|
+
end
|
275
|
+
|
276
|
+
def launch_ec2
|
277
|
+
validate
|
278
|
+
|
279
|
+
## EC2 launch_instances
|
280
|
+
result = Config.ec2.launch_instances(ami,
|
281
|
+
:instance_type => config["instance_size"],
|
282
|
+
:availability_zone => config["availability_zone"],
|
283
|
+
:key_name => config["key_name"],
|
284
|
+
:group_ids => [config["security_group"]],
|
285
|
+
:user_data => user_data).first
|
286
|
+
|
287
|
+
update "instance_id" => result[:aws_instance_id], "virgin" => false
|
288
|
+
# remove "virgin"
|
289
|
+
end
|
290
|
+
|
291
|
+
def console_output
|
292
|
+
### EC2 get_console_output
|
293
|
+
Config.ec2.get_console_output(instance_id)[:aws_output]
|
294
|
+
end
|
295
|
+
|
296
|
+
def ami
|
297
|
+
ia32? ? config["ami32"] : config["ami64"]
|
298
|
+
end
|
299
|
+
|
300
|
+
def ia32?
|
301
|
+
["m1.small", "c1.medium"].include?(instance_size)
|
302
|
+
end
|
303
|
+
|
304
|
+
def ia64?
|
305
|
+
not ia32?
|
306
|
+
end
|
307
|
+
|
308
|
+
def hostname
|
309
|
+
ec2_instance[:dns_name] == "" ? nil : ec2_instance[:dns_name]
|
310
|
+
end
|
311
|
+
|
312
|
+
def wait_for_hostname
|
313
|
+
loop do
|
314
|
+
reload
|
315
|
+
return hostname if hostname
|
316
|
+
sleep 1
|
317
|
+
end
|
318
|
+
end
|
319
|
+
|
320
|
+
def wait_for_volumes_detached
|
321
|
+
loop do
|
322
|
+
break if ec2_volumes.reject { |v| v[:aws_status] == "available" }.empty?
|
323
|
+
sleep 2
|
324
|
+
end
|
325
|
+
end
|
326
|
+
|
327
|
+
def wait_for_termination
|
328
|
+
loop do
|
329
|
+
reload
|
330
|
+
break if ec2_instance[:aws_state] == "terminated"
|
331
|
+
sleep 1
|
332
|
+
end
|
333
|
+
end
|
334
|
+
|
335
|
+
def wait_for_ssh
|
336
|
+
abort "not running" unless running?
|
337
|
+
loop do
|
338
|
+
begin
|
339
|
+
Timeout::timeout(4) do
|
340
|
+
TCPSocket.new(hostname, 22)
|
341
|
+
return
|
342
|
+
end
|
343
|
+
rescue SocketError, Timeout::Error, Errno::ECONNREFUSED, Errno::EHOSTUNREACH
|
344
|
+
end
|
345
|
+
end
|
346
|
+
end
|
347
|
+
|
348
|
+
def add_ip(public_ip)
|
349
|
+
update "elastic_ip" => public_ip
|
350
|
+
attach_ip
|
351
|
+
end
|
352
|
+
|
353
|
+
def attach_ip
|
354
|
+
return unless running? and elastic_ip
|
355
|
+
### EC2 associate_address
|
356
|
+
Config.ec2.associate_address(instance_id, elastic_ip)
|
357
|
+
end
|
358
|
+
|
359
|
+
def dns_name
|
360
|
+
return nil unless elastic_ip
|
361
|
+
`dig +short -x #{elastic_ip}`.strip
|
362
|
+
end
|
363
|
+
|
364
|
+
def attach_volumes
|
365
|
+
return unless running?
|
366
|
+
volumes.each do |device,volume_id|
|
367
|
+
### EC2 attach_volume
|
368
|
+
Config.ec2.attach_volume(volume_id, instance_id, device)
|
369
|
+
end
|
370
|
+
end
|
371
|
+
|
372
|
+
def remove_volume(volume_id, device)
|
373
|
+
task("Deleting #{device} #{volume_id}") do
|
374
|
+
### EC2 delete_volume
|
375
|
+
Judo::Config.ec2.delete_volume(volume_id)
|
376
|
+
remove "volumes", "#{device}:#{volume_id}"
|
377
|
+
end
|
378
|
+
end
|
379
|
+
|
380
|
+
def add_volume(volume_id, device)
|
381
|
+
abort("Server already has a volume on that device") if volumes[device]
|
382
|
+
|
383
|
+
add "volumes", "#{device}:#{volume_id}"
|
384
|
+
|
385
|
+
Config.ec2.attach_volume(volume_id, instance_id, device) if running?
|
386
|
+
|
387
|
+
volume_id
|
388
|
+
end
|
389
|
+
|
390
|
+
def connect_ssh
|
391
|
+
abort "not running" unless running?
|
392
|
+
system "ssh -i #{group.keypair_file} #{config["user"]}@#{hostname}"
|
393
|
+
end
|
394
|
+
|
395
|
+
def self.commit
|
396
|
+
Config.group_dirs.each do |group_dir|
|
397
|
+
group = File.basename(group_dir)
|
398
|
+
next if Config.group and Config.group != group
|
399
|
+
puts "commiting #{group}"
|
400
|
+
doc = Config.couchdb.get(group) rescue {}
|
401
|
+
config = Config.read_config(group)
|
402
|
+
config['_id'] = group
|
403
|
+
config['_rev'] = doc['_rev'] if doc.has_key?('_rev')
|
404
|
+
response = Config.couchdb.save_doc(config)
|
405
|
+
doc = Config.couchdb.get(response['id'])
|
406
|
+
|
407
|
+
# walk subdirs and save as _attachments
|
408
|
+
['files', 'templates', 'packages', 'scripts'].each { |subdir|
|
409
|
+
Dir["#{group_dir}/#{subdir}/*"].each do |f|
|
410
|
+
puts "storing attachment #{f}"
|
411
|
+
doc.put_attachment("#{subdir}/#{File.basename(f)}", File.read(f))
|
412
|
+
end
|
413
|
+
}
|
414
|
+
end
|
415
|
+
end
|
416
|
+
|
417
|
+
def ip
|
418
|
+
hostname || config["state_ip"]
|
419
|
+
end
|
420
|
+
|
421
|
+
def reload
|
422
|
+
@@ec2_list = nil
|
423
|
+
super_state.delete(name)
|
424
|
+
end
|
425
|
+
|
426
|
+
def user_data
|
427
|
+
<<USER_DATA
|
428
|
+
#!/bin/sh
|
429
|
+
|
430
|
+
export DEBIAN_FRONTEND="noninteractive"
|
431
|
+
export DEBIAN_PRIORITY="critical"
|
432
|
+
export SECRET='#{secret}'
|
433
|
+
apt-get update
|
434
|
+
apt-get install ruby rubygems ruby-dev irb libopenssl-ruby libreadline-ruby -y
|
435
|
+
gem install kuzushi --no-rdoc --no-ri
|
436
|
+
GEM_BIN=`ruby -r rubygems -e "puts Gem.bindir"`
|
437
|
+
$GEM_BIN/kuzushi #{virgin? && "init" || "start"} #{url}
|
438
|
+
USER_DATA
|
439
|
+
end
|
440
|
+
|
441
|
+
def url
|
442
|
+
"#{Judo::Config.couch_url}/#{group}"
|
443
|
+
end
|
444
|
+
|
445
|
+
def validate
|
446
|
+
### EC2 create_security_group
|
447
|
+
Judo::Config.create_security_group
|
448
|
+
|
449
|
+
### EC2 desctibe_key_pairs
|
450
|
+
k = Judo::Config.ec2.describe_key_pairs.detect { |kp| kp[:aws_key_name] == config["key_name"] }
|
451
|
+
|
452
|
+
if k.nil?
|
453
|
+
if config["key_name"] == "judo"
|
454
|
+
Judo::Config.create_keypair
|
455
|
+
else
|
456
|
+
raise "cannot use key_pair #{config["key_name"]} b/c it does not exist"
|
457
|
+
end
|
458
|
+
end
|
459
|
+
end
|
460
|
+
|
461
|
+
end
|
462
|
+
end
|
data/spec/base.rb
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
require File.dirname(__FILE__) + '/../lib/all'
|
2
|
+
|
3
|
+
require 'bacon'
|
4
|
+
require 'mocha/standalone'
|
5
|
+
require 'mocha/object'
|
6
|
+
|
7
|
+
class Bacon::Context
|
8
|
+
include Mocha::API
|
9
|
+
|
10
|
+
def initialize(name, &block)
|
11
|
+
@name = name
|
12
|
+
@before, @after = [
|
13
|
+
[lambda { mocha_setup }],
|
14
|
+
[lambda { mocha_verify ; mocha_teardown }]
|
15
|
+
]
|
16
|
+
@block = block
|
17
|
+
end
|
18
|
+
|
19
|
+
def xit(desc, &bk)
|
20
|
+
end
|
21
|
+
end
|
data/spec/server_spec.rb
ADDED
metadata
ADDED
@@ -0,0 +1,104 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: judo
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.0.1
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- Orion Henry
|
8
|
+
autorequire:
|
9
|
+
bindir: bin
|
10
|
+
cert_chain: []
|
11
|
+
|
12
|
+
date: 2010-03-02 00:00:00 -08:00
|
13
|
+
default_executable: judo
|
14
|
+
dependencies:
|
15
|
+
- !ruby/object:Gem::Dependency
|
16
|
+
name: uuidtools
|
17
|
+
type: :runtime
|
18
|
+
version_requirement:
|
19
|
+
version_requirements: !ruby/object:Gem::Requirement
|
20
|
+
requirements:
|
21
|
+
- - ">="
|
22
|
+
- !ruby/object:Gem::Version
|
23
|
+
version: "0"
|
24
|
+
version:
|
25
|
+
- !ruby/object:Gem::Dependency
|
26
|
+
name: aws
|
27
|
+
type: :runtime
|
28
|
+
version_requirement:
|
29
|
+
version_requirements: !ruby/object:Gem::Requirement
|
30
|
+
requirements:
|
31
|
+
- - ">="
|
32
|
+
- !ruby/object:Gem::Version
|
33
|
+
version: "0"
|
34
|
+
version:
|
35
|
+
- !ruby/object:Gem::Dependency
|
36
|
+
name: thor
|
37
|
+
type: :runtime
|
38
|
+
version_requirement:
|
39
|
+
version_requirements: !ruby/object:Gem::Requirement
|
40
|
+
requirements:
|
41
|
+
- - ">="
|
42
|
+
- !ruby/object:Gem::Version
|
43
|
+
version: "0"
|
44
|
+
version:
|
45
|
+
- !ruby/object:Gem::Dependency
|
46
|
+
name: json
|
47
|
+
type: :runtime
|
48
|
+
version_requirement:
|
49
|
+
version_requirements: !ruby/object:Gem::Requirement
|
50
|
+
requirements:
|
51
|
+
- - ">="
|
52
|
+
- !ruby/object:Gem::Version
|
53
|
+
version: "0"
|
54
|
+
version:
|
55
|
+
description: A no-hassle way to launch one-off EC2 instances from the command line
|
56
|
+
email: orion@heroku.com
|
57
|
+
executables:
|
58
|
+
- judo
|
59
|
+
extensions: []
|
60
|
+
|
61
|
+
extra_rdoc_files:
|
62
|
+
- README.rdoc
|
63
|
+
files:
|
64
|
+
- README.rdoc
|
65
|
+
- Rakefile
|
66
|
+
- VERSION
|
67
|
+
- bin/judo
|
68
|
+
- lib/all.rb
|
69
|
+
- lib/config.rb
|
70
|
+
- lib/group.rb
|
71
|
+
- lib/server.rb
|
72
|
+
- spec/base.rb
|
73
|
+
- spec/server_spec.rb
|
74
|
+
has_rdoc: true
|
75
|
+
homepage: http://github.com/orionz/judo
|
76
|
+
licenses: []
|
77
|
+
|
78
|
+
post_install_message:
|
79
|
+
rdoc_options:
|
80
|
+
- --charset=UTF-8
|
81
|
+
require_paths:
|
82
|
+
- lib
|
83
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
84
|
+
requirements:
|
85
|
+
- - ">="
|
86
|
+
- !ruby/object:Gem::Version
|
87
|
+
version: "0"
|
88
|
+
version:
|
89
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
90
|
+
requirements:
|
91
|
+
- - ">="
|
92
|
+
- !ruby/object:Gem::Version
|
93
|
+
version: "0"
|
94
|
+
version:
|
95
|
+
requirements: []
|
96
|
+
|
97
|
+
rubyforge_project: judo
|
98
|
+
rubygems_version: 1.3.5
|
99
|
+
signing_key:
|
100
|
+
specification_version: 3
|
101
|
+
summary: A no-hassle way to launch one-off EC2 instances from the command line
|
102
|
+
test_files:
|
103
|
+
- spec/base.rb
|
104
|
+
- spec/server_spec.rb
|