iscale 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.gitignore +4 -0
- data/Gemfile +4 -0
- data/README.md +149 -0
- data/Rakefile +1 -0
- data/bin/iScale +331 -0
- data/config/iScale.yml +5 -0
- data/config/ssh_config +5 -0
- data/iscale.gemspec +20 -0
- data/lib/iScale.rb +5 -0
- data/lib/iscale/version.rb +3 -0
- data/open_iterm_tab.sh +16 -0
- data/open_iterm_window.sh +8 -0
- metadata +100 -0
data/.gitignore
ADDED
data/Gemfile
ADDED
data/README.md
ADDED
@@ -0,0 +1,149 @@
|
|
1
|
+
# iScale
|
2
|
+
|
3
|
+
Tool to manage large clusters at Scalarium from command line.
|
4
|
+
|
5
|
+
While Scalarium offers a powerful web dashboard to manage your cluster you are usually a lot faster working from command line. iScale is there to provide a role-based approach for displaying data or opening shells for a single instance, all instances of a role or multiple roles in a single command.
|
6
|
+
|
7
|
+
## Installation
|
8
|
+
|
9
|
+
* Download iScale files to local directory (used as `DIR` below).
|
10
|
+
* `mv DIR/config/iScale.yml ~/.iScale`
|
11
|
+
* Edit ~/iScale to add your Scalarium username, your Scalarium token and shortcuts to your cluster names.
|
12
|
+
* `cat DIR/config/ssh_config >> ~/.ssh/config`
|
13
|
+
|
14
|
+
### Dependencies
|
15
|
+
|
16
|
+
iScale depends on these gems to be available:
|
17
|
+
|
18
|
+
* rest-client
|
19
|
+
* json
|
20
|
+
* yaml
|
21
|
+
|
22
|
+
The open command depends that iScale runs on a Mac that has [iTerm](http://iterm.sourceforge.net/) installed. [Applescript](http://iterm.sourceforge.net/scripting.shtml) is used to open the shells.
|
23
|
+
All other commands run on any Linux or Unix machine.
|
24
|
+
|
25
|
+
## Commands
|
26
|
+
|
27
|
+
Command pattern is always `iScale <cluster shortcut> <command> [<names>]` but as always there is one exception: The `deploy` command is called without specifying a `cluster shortcut` (see below).
|
28
|
+
|
29
|
+
Cluster shortcuts can be defined in .iScale configuration file, otherwise specify the full name of the Scalarium cluster to use.
|
30
|
+
|
31
|
+
### roles
|
32
|
+
|
33
|
+
Syntax: `roles`
|
34
|
+
|
35
|
+
Displays all roles of a cluster.
|
36
|
+
|
37
|
+
PROMPT$ iScale.rb staging roles
|
38
|
+
db-master: 6 instances
|
39
|
+
db-slave: 0 instances
|
40
|
+
hudson-slave: 1 instances
|
41
|
+
lb: 1 instances
|
42
|
+
monitoring: 1 instances
|
43
|
+
monitoring-master: 1 instances
|
44
|
+
rails-app: 0 instances
|
45
|
+
redis-masters: 5 instances
|
46
|
+
redis-slaves: 0 instances
|
47
|
+
|
48
|
+
### load
|
49
|
+
|
50
|
+
Syntax: `load <roles>|all`
|
51
|
+
|
52
|
+
Executes `uptime` on specified servers and displays load information in a list. `<roles>` can be a single role or a list of roles.
|
53
|
+
|
54
|
+
PROMPT$ iScale.rb staging load db-master redis-masters
|
55
|
+
db-master
|
56
|
+
mws-xdb-m01: ssh -A jesper@IP.compute.amazonaws.com => load average: 0.07, 0.02, 0.00
|
57
|
+
mws-xdb-m02: ssh -A jesper@IP.compute.amazonaws.com => load average: 0.07, 0.02, 0.00
|
58
|
+
mws-xdb-m03: ssh -A jesper@IP.compute.amazonaws.com => load average: 0.07, 0.02, 0.00
|
59
|
+
mws-xdb-m04: ssh -A jesper@IP.compute.amazonaws.com => load average: 0.01, 0.01, 0.00
|
60
|
+
mws-xdb-mf1: ssh -A jesper@IP.compute.amazonaws.com => load average: 0.00, 0.00, 0.00
|
61
|
+
mws-xdb-testing1: ssh -A jesper@IP.compute.amazonaws.com => load average: 0.00, 0.00, 0.00
|
62
|
+
total load average: 0.04, 0.01, 0.00
|
63
|
+
total load: 0.22, 0.07, 0.00
|
64
|
+
redis-masters
|
65
|
+
mws-redis-m01a: ssh -A jesper@IP.compute.amazonaws.com => load average: 0.00, 0.00, 0.00
|
66
|
+
mws-redis-m02a: ssh -A jesper@IP.compute.amazonaws.com => load average: 0.00, 0.00, 0.00
|
67
|
+
mws-redis-m04c: ssh -A jesper@IP.compute.amazonaws.com => load average: 0.00, 0.00, 0.00
|
68
|
+
mws-redis-mu1: ssh -A jesper@IP.compute.amazonaws.com => load average: 0.00, 0.00, 0.00
|
69
|
+
mws-redis-mu2: ssh -A jesper@IP.compute.amazonaws.com => load average: 0.00, 0.00, 0.00
|
70
|
+
total load average: 0.00, 0.00, 0.00
|
71
|
+
total load: 0.00, 0.00, 0.00
|
72
|
+
|
73
|
+
### cpu
|
74
|
+
|
75
|
+
Syntax: `cpu <roles>|all`
|
76
|
+
|
77
|
+
Executes `iostat 3 2` on specified servers and displays cpu usage information in a list. `<roles>` can be a single role or a list of roles.
|
78
|
+
|
79
|
+
PROMPT$ iScale.rb staging cpu db-master redis-masters
|
80
|
+
db-master cpu average: %user %nice %system %iowait %steal %idle
|
81
|
+
mws-xdb-m01: ssh -A jesper@IP.compute.amazonaws.com => 0.00, 0.00, 0.00, 0.00, 0.00, 100.00
|
82
|
+
mws-xdb-m02: ssh -A jesper@IP.compute.amazonaws.com => 0.00, 0.00, 0.17, 0.00, 0.00, 99.83
|
83
|
+
mws-xdb-m03: ssh -A jesper@IP.compute.amazonaws.com => 0.00, 0.00, 0.00, 0.00, 0.00, 100.00
|
84
|
+
mws-xdb-m04: ssh -A jesper@IP.compute.amazonaws.com => 0.00, 0.00, 0.00, 0.00, 0.00, 100.00
|
85
|
+
mws-xdb-mf1: ssh -A jesper@IP.compute.amazonaws.com => 0.17, 0.00, 0.00, 0.00, 0.00, 99.83
|
86
|
+
mws-xdb-testing1: ssh -A jesper@IP.compute.amazonaws.com => 0.00, 0.00, 0.17, 0.00, 0.00, 99.83
|
87
|
+
total cpu average: 0.03, 0.00, 0.06, 0.00, 0.00, 99.92
|
88
|
+
total cpu: 0.17, 0.00, 0.34, 0.00, 0.00, 599.49
|
89
|
+
redis-masters cpu average: %user %nice %system %iowait %steal %idle
|
90
|
+
mws-redis-m01a: ssh -A jesper@IP.compute.amazonaws.com => 0.00, 0.00, 0.00, 0.00, 0.00, 100.00
|
91
|
+
mws-redis-m02a: ssh -A jesper@IP.compute.amazonaws.com => 0.00, 0.00, 0.00, 0.00, 0.00, 100.00
|
92
|
+
mws-redis-m04c: ssh -A jesper@IP.compute.amazonaws.com => 0.00, 0.00, 0.00, 0.00, 0.00, 100.00
|
93
|
+
mws-redis-mu1: ssh -A jesper@IP.compute.amazonaws.com => 0.00, 0.00, 0.00, 0.00, 0.00, 100.00
|
94
|
+
mws-redis-mu2: ssh -A jesper@IP.compute.amazonaws.com => 0.00, 0.00, 0.00, 0.00, 0.00, 100.00
|
95
|
+
total cpu average: 0.00, 0.00, 0.00, 0.00, 0.00, 100.00
|
96
|
+
total cpu: 0.00, 0.00, 0.00, 0.00, 0.00, 500.00
|
97
|
+
|
98
|
+
### open
|
99
|
+
|
100
|
+
Syntax: `open <names>`
|
101
|
+
|
102
|
+
Opens a shell using `ssh -A` to all specified instances and immediately executes `sudo -sEH` afterwards. This will allow you to use your local private key to connect to other instances within the cluster. `<names>` is a list that can contain role or instances names. Unless `<names>` is a single instance's name shell are opened in a new iTerm window.
|
103
|
+
|
104
|
+
PROMPT$ iScale.rb staging open db-master zeus mws-redis-mu1
|
105
|
+
opening new window...
|
106
|
+
|
107
|
+
### execute
|
108
|
+
|
109
|
+
Syntax: `execute <role> <command>`
|
110
|
+
|
111
|
+
Opens a shell using your configured user name on each instance of specified `<role>` and executes the specified `<command>`.
|
112
|
+
|
113
|
+
PROMPT$ iScale.rb staging execute db-master uptime
|
114
|
+
################################ mws-xdb-mf1 #################################
|
115
|
+
14:11:40 up 65 days, 23:24, 0 users, load average: 0.00, 0.00, 0.00
|
116
|
+
|
117
|
+
############################## mws-xdb-testing1 ##############################
|
118
|
+
14:11:39 up 113 days, 4:01, 0 users, load average: 0.02, 0.06, 0.02
|
119
|
+
|
120
|
+
################################ mws-xdb-m01 #################################
|
121
|
+
14:11:39 up 139 days, 12:20, 0 users, load average: 0.00, 0.00, 0.00
|
122
|
+
|
123
|
+
################################ mws-xdb-m02 #################################
|
124
|
+
14:11:39 up 211 days, 35 min, 0 users, load average: 0.00, 0.00, 0.00
|
125
|
+
|
126
|
+
################################ mws-xdb-m03 #################################
|
127
|
+
14:11:39 up 211 days, 35 min, 0 users, load average: 0.00, 0.00, 0.00
|
128
|
+
|
129
|
+
################################ mws-xdb-m04 #################################
|
130
|
+
14:11:39 up 211 days, 34 min, 0 users, load average: 0.00, 0.00, 0.00
|
131
|
+
|
132
|
+
### deploy
|
133
|
+
|
134
|
+
Syntax: `deploy <application`
|
135
|
+
|
136
|
+
Starts deployment of an application. Check the Scalarium web site on progress.
|
137
|
+
*Be careful is multiple applications have the same name!*
|
138
|
+
|
139
|
+
PROMPT$ iScale.rb deploy "MW SSL"
|
140
|
+
{"migration_instance_id":null,"recipes":null,"status":"running","command":"deploy","shift_between_restarts":0,"revision":null,"created_at":"2011/07/08 16:20:45 +0000","custom_json":null,"updated_at":"2011/07/08 16:20:45 +0000","comment":null,"successful":null,"completed_at":null,"migrate":null,...}
|
141
|
+
|
142
|
+
### md5sum
|
143
|
+
|
144
|
+
Syntax: `md5sum <role> <file>`
|
145
|
+
|
146
|
+
Compares `md5sum` output for specified `<file>` on every instance of `<role>`. Results are grouped together to quickly check if one or more instances have a different (configuration) file deployed.
|
147
|
+
|
148
|
+
PROMPT$ iScale.rb staging md5sum db-master /home/ubuntu/some.properties
|
149
|
+
6x 9028c2a1f3d95c9396ba1f218570f58f: mws-xdb-m01 mws-xdb-m02 mws-xdb-m03 mws-xdb-m04 mws-xdb-mf1 mws-xdb-testing1
|
data/Rakefile
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
require 'bundler/gem_tasks'
|
data/bin/iScale
ADDED
@@ -0,0 +1,331 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
|
3
|
+
require 'rubygems'
|
4
|
+
require 'rest-client'
|
5
|
+
require 'json'
|
6
|
+
require 'yaml'
|
7
|
+
|
8
|
+
API_URL = 'https://manage.scalarium.com/api/clouds'
|
9
|
+
|
10
|
+
### BEGIN Scalarium API handling
|
11
|
+
|
12
|
+
def api(uri = '')
|
13
|
+
JSON.parse(RestClient.get("#{API_URL}#{uri}", headers))
|
14
|
+
end
|
15
|
+
|
16
|
+
def headers
|
17
|
+
{'X-Scalarium-Token' => @token, 'Accept' => 'application/vnd.scalarium-v1+json'}
|
18
|
+
end
|
19
|
+
|
20
|
+
def load_cloud(name)
|
21
|
+
@cloud ||= api().detect{|cloud| cloud['name'] == name}
|
22
|
+
end
|
23
|
+
|
24
|
+
def cloud
|
25
|
+
@cloud
|
26
|
+
end
|
27
|
+
|
28
|
+
def applications
|
29
|
+
@applications ||= JSON.parse(RestClient.get("https://manage.scalarium.com/api/applications", headers))
|
30
|
+
end
|
31
|
+
|
32
|
+
def roles
|
33
|
+
@roles ||= api("/#{cloud['id']}/roles")
|
34
|
+
end
|
35
|
+
|
36
|
+
def instances
|
37
|
+
@instances ||= api("/#{cloud['id']}/instances").select{|instance| instance['status'] == 'online'}
|
38
|
+
end
|
39
|
+
|
40
|
+
def role_of_instance(instance)
|
41
|
+
roles.detect { |r| instance['role_ids'].include? r['id']}
|
42
|
+
end
|
43
|
+
|
44
|
+
def instances_of_role(role)
|
45
|
+
instances.select { |i| i['role_ids'].include? role['id'] }.sort{|i1, i2| i1['nickname'] <=> i2['nickname']}
|
46
|
+
end
|
47
|
+
|
48
|
+
def filtered_roles(filter)
|
49
|
+
if filter == 'all'
|
50
|
+
roles
|
51
|
+
else
|
52
|
+
roles.select { |role| role['shortname'] == filter }.sort{|r1, r2| r1['shortname'] <=> r2['shortname']}
|
53
|
+
end
|
54
|
+
end
|
55
|
+
|
56
|
+
def filtered_instance(filter)
|
57
|
+
instances.detect { |i| i['nickname'] == filter }
|
58
|
+
end
|
59
|
+
### END Scalarium API handling
|
60
|
+
|
61
|
+
|
62
|
+
### BEGIN base methods
|
63
|
+
|
64
|
+
def parse_command_line
|
65
|
+
if ARGV.count == 2 && ['roles', 'refresh'].include?(ARGV[1])
|
66
|
+
return cloud_name(ARGV[0]), ARGV[1], nil
|
67
|
+
elsif ARGV.count == 2 && ARGV[0] == 'deploy'
|
68
|
+
return nil, 'deploy', ARGV[1]
|
69
|
+
elsif ARGV.count >= 3 && ['load', 'open', 'cpu', 'execute', 'md5sum'].include?(ARGV[1])
|
70
|
+
return cloud_name(ARGV[0]), ARGV[1], ARGV[2..-1]
|
71
|
+
else
|
72
|
+
abort "Usage 1: #{file_name} <cloud> <command>\n" +
|
73
|
+
" cloud := Scalarium cloud name or shortcut defined in .iScale\n" +
|
74
|
+
" command := roles | \n" +
|
75
|
+
" load { <roles> | all } |\n" +
|
76
|
+
" cpu { <roles> | all } |\n" +
|
77
|
+
" open <roles_or_instances>\n" +
|
78
|
+
" execute <role> <command>\n" +
|
79
|
+
" md5sum <role> <file>\n" +
|
80
|
+
"Usage 2: #{file_name} deploy <application>"
|
81
|
+
end
|
82
|
+
end
|
83
|
+
|
84
|
+
def cloud_name(shortcut)
|
85
|
+
@shortcuts[shortcut] || shortcut
|
86
|
+
end
|
87
|
+
|
88
|
+
def load_config
|
89
|
+
config = YAML.load_file "#{ENV['HOME']}/.iScale"
|
90
|
+
@username = config['username']
|
91
|
+
@token = config['token']
|
92
|
+
@shortcuts = config['shortcuts']
|
93
|
+
config
|
94
|
+
end
|
95
|
+
|
96
|
+
def abort(message)
|
97
|
+
puts message
|
98
|
+
exit
|
99
|
+
end
|
100
|
+
|
101
|
+
def home_dir
|
102
|
+
File.join(File.expand_path(File.dirname(__FILE__)), "..")
|
103
|
+
end
|
104
|
+
|
105
|
+
def file_name
|
106
|
+
File.basename(__FILE__)
|
107
|
+
end
|
108
|
+
|
109
|
+
def collect(role, verbose = false)
|
110
|
+
workers = []
|
111
|
+
results = {}
|
112
|
+
instances = instances_of_role(role)
|
113
|
+
instances.each do |instance|
|
114
|
+
if verbose # TODO rather ugly, but currently we need two different kind of defaults in case of timeout
|
115
|
+
results[instance['nickname']] = {:instance => instance, :output => 'TIMEOUT'}
|
116
|
+
else
|
117
|
+
results[instance['nickname']] = 'TIMEOUT'
|
118
|
+
end
|
119
|
+
workers << Thread.new do
|
120
|
+
results[instance['nickname']] = yield instance
|
121
|
+
end
|
122
|
+
end
|
123
|
+
collectors = []
|
124
|
+
workers.each { |w| collectors << Thread.new { w.join(10) } }
|
125
|
+
collectors.each { |c| c.join }
|
126
|
+
results
|
127
|
+
end
|
128
|
+
|
129
|
+
def cut(text, length = 60, cut_string = "...")
|
130
|
+
if text
|
131
|
+
l = length - cut_string.length
|
132
|
+
(text.length > length ? text[0...l] + cut_string : text).to_s
|
133
|
+
end
|
134
|
+
end
|
135
|
+
|
136
|
+
### END base methods
|
137
|
+
|
138
|
+
|
139
|
+
### BEGIN commands
|
140
|
+
|
141
|
+
def list_roles
|
142
|
+
roles.sort{|r1, r2| r1['shortname'] <=> r2['shortname']}.each do |role|
|
143
|
+
puts "#{role['shortname']}: #{instances_of_role(role).count} instances"
|
144
|
+
end
|
145
|
+
end
|
146
|
+
|
147
|
+
def load_for_hosts_of_role(role)
|
148
|
+
result = {}
|
149
|
+
puts role['shortname']
|
150
|
+
outputs = collect(role, verbose = true) do | instance|
|
151
|
+
{ :instance => instance,
|
152
|
+
:output => `ssh #{@username}@#{instance['dns_name']} \"uptime | sed 's/.*load/load/'\"` }
|
153
|
+
end
|
154
|
+
outputs.each do |host, hash|
|
155
|
+
result[host] = {}
|
156
|
+
result[host][:address] = hash[:instance]['dns_name']
|
157
|
+
result[host][:output] = hash[:output]
|
158
|
+
from = 'load average: '.length
|
159
|
+
til = -2
|
160
|
+
result[host][:load_1m], result[host][:load_5m], result[host][:load_15m] = hash[:output][from..til].split(', ').map{|load| load.to_f} rescue puts "Error while getting load for #{host}"
|
161
|
+
end
|
162
|
+
load_total = {}
|
163
|
+
load_count = 0
|
164
|
+
result.keys.sort.each do |host|
|
165
|
+
server = 'ssh -A ' + @username + '@' + result[host][:address] + ' =>'
|
166
|
+
puts "#{(host + ':').ljust(14)} #{server.ljust(67)} #{result[host][:output]}" rescue puts("Error while printing #{host}")
|
167
|
+
load_count += 1
|
168
|
+
[:load_1m, :load_5m, :load_15m].each do |load|
|
169
|
+
load_total[load] ||= 0
|
170
|
+
if result[host][load]
|
171
|
+
load_total[load] += result[host][load]
|
172
|
+
end
|
173
|
+
end
|
174
|
+
end
|
175
|
+
puts "#{'total load average:'.rjust(96)} %1.2f, %1.2f, %1.2f" % [load_total[:load_1m] / load_count, load_total[:load_5m] / load_count, load_total[:load_15m] / load_count]
|
176
|
+
puts "#{'total load:'.rjust(96)} %1.2f, %1.2f, %1.2f" % [load_total[:load_1m], load_total[:load_5m], load_total[:load_15m]]
|
177
|
+
end
|
178
|
+
|
179
|
+
def cpu_for_hosts_of_role(role)
|
180
|
+
result = {}
|
181
|
+
puts "#{role['shortname'].ljust(69)} cpu average: %user %nice %system %iowait %steal %idle"
|
182
|
+
outputs = collect(role, verbose = true) do |instance|
|
183
|
+
{ :instance => instance,
|
184
|
+
:output => `ssh #{@username}@#{instance['dns_name']} \"iostat 3 2 | grep avg-cpu -C1 | tail -1\"`}
|
185
|
+
end
|
186
|
+
outputs.each do |host, hash|
|
187
|
+
result[host] = {}
|
188
|
+
result[host][:address] = hash[:instance]['dns_name']
|
189
|
+
from = 'avg-cpu: '.length
|
190
|
+
til = -1
|
191
|
+
result[host][:result] = hash[:output][from..til].split(' ').delete_if{|t| t == ''}.map{|n| n.to_f} rescue puts("Error while getting load for #{host}")
|
192
|
+
end
|
193
|
+
load_total = []
|
194
|
+
load_count = 0
|
195
|
+
result.keys.sort.each do |host|
|
196
|
+
server = 'ssh -A ' + @username + '@' + result[host][:address] + ' =>'
|
197
|
+
puts "#{(host + ':').ljust(14)} #{server.ljust(67)} %6.2f, %6.2f, %6.2f, %6.2f, %6.2f, %6.2f" % \
|
198
|
+
[result[host][:result][0], result[host][:result][1], result[host][:result][2], result[host][:result][3], result[host][:result][4], result[host][:result][5]] rescue(puts "Error while printing #{host}")
|
199
|
+
load_count += 1
|
200
|
+
(0..5).each do |i|
|
201
|
+
load_total[i] ||= 0
|
202
|
+
if result[host][:result] && result[host][:result][i]
|
203
|
+
load_total[i] += result[host][:result][i]
|
204
|
+
end
|
205
|
+
end
|
206
|
+
end
|
207
|
+
puts "#{'total cpu average:'.rjust(82)} %6.2f, %6.2f, %6.2f, %6.2f, %6.2f, %6.2f" % \
|
208
|
+
[load_total[0] / load_count, load_total[1] / load_count, load_total[2] / load_count, load_total[3] / load_count, load_total[4] / load_count, load_total[5] / load_count]
|
209
|
+
puts "#{'total cpu:'.rjust(82)} %6.2f, %6.2f, %6.2f, %6.2f, %6.2f, %6.2f" % \
|
210
|
+
[load_total[0], load_total[1], load_total[2], load_total[3], load_total[4], load_total[5]]
|
211
|
+
end
|
212
|
+
|
213
|
+
|
214
|
+
def run_commands_on_role(role, command)
|
215
|
+
results = collect(role) { |instance| `ssh #{@username}@#{instance['dns_name']} \"#{command}\"` }
|
216
|
+
results.keys.sort.each do |host|
|
217
|
+
puts " #{host} ".center(78, '#')
|
218
|
+
puts results[host]
|
219
|
+
puts
|
220
|
+
end
|
221
|
+
end
|
222
|
+
|
223
|
+
|
224
|
+
def deploy_application(name)
|
225
|
+
app = applications.detect{|application| application['name'] == name}
|
226
|
+
abort "Unknown application '#{name}'. Valid applications are #{applications.map{|a|a['name'].inspect}.join(' ')}" unless app
|
227
|
+
puts RestClient.post("https://manage.scalarium.com/api/applications/#{app['id']}/deploy", JSON.dump(:command => 'deploy'), headers)
|
228
|
+
end
|
229
|
+
|
230
|
+
|
231
|
+
def check_md5_for_hosts_of_role(role, file)
|
232
|
+
outputs = collect(role) { |instance| `ssh #{@username}@#{instance['dns_name']} \"md5sum #{file}\" | sed 's/ .*//'` }
|
233
|
+
results = outputs.inject(Hash.new) do |hash, output|
|
234
|
+
host, md5 = output
|
235
|
+
hash[md5.chomp] ||= []
|
236
|
+
hash[md5.chomp] << host
|
237
|
+
hash
|
238
|
+
end
|
239
|
+
results.keys.sort{|m1, m2| results[m2].length <=> results[m1].length}.each do |md5|
|
240
|
+
puts "#{(results[md5].length).to_s.rjust(2)}x #{md5}: #{cut(results[md5].sort.join(" "), 80)}"
|
241
|
+
end
|
242
|
+
end
|
243
|
+
### END commands
|
244
|
+
|
245
|
+
|
246
|
+
### BEGIN window handling
|
247
|
+
|
248
|
+
def open_tab(name, cmd_1, cmd2)
|
249
|
+
`#{home_dir}/open_iterm_tab.sh "#{name}" "#{cmd_1}" "#{cmd2}"`
|
250
|
+
end
|
251
|
+
|
252
|
+
def open_window()
|
253
|
+
puts "opening new window..."
|
254
|
+
`#{home_dir}/open_iterm_window.sh`
|
255
|
+
end
|
256
|
+
|
257
|
+
def open_tabs_with_hosts_for_role(role)
|
258
|
+
instances_of_role(role).each do |instance|
|
259
|
+
open_tab instance['nickname'].upcase, "ssh\ -A\ #{@username}@#{instance['dns_name']}", "sudo -sEH"
|
260
|
+
end
|
261
|
+
end
|
262
|
+
### END window handling
|
263
|
+
|
264
|
+
|
265
|
+
### BEGIN "MAIN" program
|
266
|
+
|
267
|
+
load_config
|
268
|
+
cloud_shortcut, command, details = parse_command_line
|
269
|
+
unless command == 'deploy'
|
270
|
+
load_cloud cloud_shortcut
|
271
|
+
abort "Unknown cloud #{cloud_shortcut.inspect}, use full cloud name or specify a shortcut in .iScale config file." unless cloud
|
272
|
+
end
|
273
|
+
|
274
|
+
case command
|
275
|
+
when 'roles'
|
276
|
+
list_roles
|
277
|
+
when 'load'
|
278
|
+
details.each do |detail|
|
279
|
+
if !(roles = filtered_roles(detail)).empty?
|
280
|
+
roles.each do |role|
|
281
|
+
load_for_hosts_of_role(role)
|
282
|
+
end
|
283
|
+
else
|
284
|
+
abort "Unknown role #{detail.inspect}, use command 'roles' to list all available roles."
|
285
|
+
end
|
286
|
+
end
|
287
|
+
when 'open'
|
288
|
+
open_window unless details.size == 1 && filtered_roles(details.first).size == 0 # don't open new window if we want connection to a single instance
|
289
|
+
details.each do |detail|
|
290
|
+
if !(roles = filtered_roles(detail)).empty?
|
291
|
+
roles.each do |role|
|
292
|
+
open_tabs_with_hosts_for_role(role)
|
293
|
+
end
|
294
|
+
else
|
295
|
+
instance = filtered_instance(detail)
|
296
|
+
if instance
|
297
|
+
open_tab detail.upcase, "ssh\ -A\ #{@username}@#{instance['dns_name']}", "sudo -sEH"
|
298
|
+
else
|
299
|
+
abort "Unknown role or host #{detail.inspect}, use command 'roles' to list all available roles."
|
300
|
+
end
|
301
|
+
end
|
302
|
+
end
|
303
|
+
when 'cpu'
|
304
|
+
details.each do |detail|
|
305
|
+
if !(roles = filtered_roles(detail)).empty?
|
306
|
+
roles.each do |role|
|
307
|
+
cpu_for_hosts_of_role(role)
|
308
|
+
end
|
309
|
+
else
|
310
|
+
abort "Unknown role #{detail.inspect}, use command 'roles' to list all available roles."
|
311
|
+
end
|
312
|
+
end
|
313
|
+
when 'execute'
|
314
|
+
if !(roles = filtered_roles(details.first)).empty?
|
315
|
+
run_commands_on_role(roles.first, details[1..-1].join(' '))
|
316
|
+
else
|
317
|
+
abort "Unknown role #{details.first.inspect}, use command 'roles' to list all available roles."
|
318
|
+
end
|
319
|
+
when 'deploy'
|
320
|
+
deploy_application(details)
|
321
|
+
when 'md5sum'
|
322
|
+
# still experimental, use with care
|
323
|
+
if !(roles = filtered_roles(details.first)).empty?
|
324
|
+
check_md5_for_hosts_of_role(roles.first, details[1..-1].join(' '))
|
325
|
+
else
|
326
|
+
abort "Unknown role #{details.first.inspect}, use command 'roles' to list all available roles."
|
327
|
+
end
|
328
|
+
else
|
329
|
+
abort "Unknown command '#{command}'"
|
330
|
+
end
|
331
|
+
### END "MAIN" program
|
data/config/iScale.yml
ADDED
data/config/ssh_config
ADDED
data/iscale.gemspec
ADDED
@@ -0,0 +1,20 @@
|
|
1
|
+
# -*- encoding: utf-8 -*-
|
2
|
+
$:.push File.expand_path("../lib", __FILE__)
|
3
|
+
require "iscale/version"
|
4
|
+
|
5
|
+
Gem::Specification.new do |s|
|
6
|
+
s.name = "iscale"
|
7
|
+
s.version = IScale::VERSION
|
8
|
+
s.authors = ["Jesper Richter-Reichhelm"]
|
9
|
+
s.email = ["jesper@wooga.com"]
|
10
|
+
s.homepage = "https://github.com/wooga/iScale"
|
11
|
+
s.summary = "Scalarium API CLI tool"
|
12
|
+
s.description = "Manage your scalarium cluster from the command line"
|
13
|
+
s.add_dependency "rest-client"
|
14
|
+
s.add_dependency "json"
|
15
|
+
s.add_development_dependency "rake"
|
16
|
+
s.files = `git ls-files`.split("\n")
|
17
|
+
s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n")
|
18
|
+
s.executables = `git ls-files -- bin/*`.split("\n").map{ |f| File.basename(f) }
|
19
|
+
s.require_paths = ["lib"]
|
20
|
+
end
|
data/lib/iScale.rb
ADDED
data/open_iterm_tab.sh
ADDED
@@ -0,0 +1,16 @@
|
|
1
|
+
#!/bin/sh
|
2
|
+
|
3
|
+
osascript <<ENDSCRIPT
|
4
|
+
tell application "iTerm"
|
5
|
+
activate
|
6
|
+
set myterm to (last terminal)
|
7
|
+
tell myterm
|
8
|
+
launch session "$1"
|
9
|
+
tell the last session
|
10
|
+
write text "$2"
|
11
|
+
write text "$3"
|
12
|
+
set name to "$1"
|
13
|
+
end tell
|
14
|
+
end tell
|
15
|
+
end tell
|
16
|
+
ENDSCRIPT
|
metadata
ADDED
@@ -0,0 +1,100 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: iscale
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
prerelease:
|
5
|
+
version: 0.0.1
|
6
|
+
platform: ruby
|
7
|
+
authors:
|
8
|
+
- Jesper Richter-Reichhelm
|
9
|
+
autorequire:
|
10
|
+
bindir: bin
|
11
|
+
cert_chain: []
|
12
|
+
|
13
|
+
date: 2011-07-10 00:00:00 +02:00
|
14
|
+
default_executable:
|
15
|
+
dependencies:
|
16
|
+
- !ruby/object:Gem::Dependency
|
17
|
+
name: rest-client
|
18
|
+
prerelease: false
|
19
|
+
requirement: &id001 !ruby/object:Gem::Requirement
|
20
|
+
none: false
|
21
|
+
requirements:
|
22
|
+
- - ">="
|
23
|
+
- !ruby/object:Gem::Version
|
24
|
+
version: "0"
|
25
|
+
type: :runtime
|
26
|
+
version_requirements: *id001
|
27
|
+
- !ruby/object:Gem::Dependency
|
28
|
+
name: json
|
29
|
+
prerelease: false
|
30
|
+
requirement: &id002 !ruby/object:Gem::Requirement
|
31
|
+
none: false
|
32
|
+
requirements:
|
33
|
+
- - ">="
|
34
|
+
- !ruby/object:Gem::Version
|
35
|
+
version: "0"
|
36
|
+
type: :runtime
|
37
|
+
version_requirements: *id002
|
38
|
+
- !ruby/object:Gem::Dependency
|
39
|
+
name: rake
|
40
|
+
prerelease: false
|
41
|
+
requirement: &id003 !ruby/object:Gem::Requirement
|
42
|
+
none: false
|
43
|
+
requirements:
|
44
|
+
- - ">="
|
45
|
+
- !ruby/object:Gem::Version
|
46
|
+
version: "0"
|
47
|
+
type: :development
|
48
|
+
version_requirements: *id003
|
49
|
+
description: Manage your scalarium cluster from the command line
|
50
|
+
email:
|
51
|
+
- jesper@wooga.com
|
52
|
+
executables:
|
53
|
+
- iScale
|
54
|
+
extensions: []
|
55
|
+
|
56
|
+
extra_rdoc_files: []
|
57
|
+
|
58
|
+
files:
|
59
|
+
- .gitignore
|
60
|
+
- Gemfile
|
61
|
+
- README.md
|
62
|
+
- Rakefile
|
63
|
+
- bin/iScale
|
64
|
+
- config/iScale.yml
|
65
|
+
- config/ssh_config
|
66
|
+
- iscale.gemspec
|
67
|
+
- lib/iScale.rb
|
68
|
+
- lib/iscale/version.rb
|
69
|
+
- open_iterm_tab.sh
|
70
|
+
- open_iterm_window.sh
|
71
|
+
has_rdoc: true
|
72
|
+
homepage: https://github.com/wooga/iScale
|
73
|
+
licenses: []
|
74
|
+
|
75
|
+
post_install_message:
|
76
|
+
rdoc_options: []
|
77
|
+
|
78
|
+
require_paths:
|
79
|
+
- lib
|
80
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
81
|
+
none: false
|
82
|
+
requirements:
|
83
|
+
- - ">="
|
84
|
+
- !ruby/object:Gem::Version
|
85
|
+
version: "0"
|
86
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
87
|
+
none: false
|
88
|
+
requirements:
|
89
|
+
- - ">="
|
90
|
+
- !ruby/object:Gem::Version
|
91
|
+
version: "0"
|
92
|
+
requirements: []
|
93
|
+
|
94
|
+
rubyforge_project:
|
95
|
+
rubygems_version: 1.6.2
|
96
|
+
signing_key:
|
97
|
+
specification_version: 3
|
98
|
+
summary: Scalarium API CLI tool
|
99
|
+
test_files: []
|
100
|
+
|