cluster_chef 3.0.5

Sign up to get free protection for your applications and to get access to all the features.
Files changed (46) hide show
  1. data/.gitignore +51 -0
  2. data/.rspec +3 -0
  3. data/CHANGELOG.md +63 -0
  4. data/Gemfile +18 -0
  5. data/LICENSE +201 -0
  6. data/README.md +332 -0
  7. data/Rakefile +92 -0
  8. data/TODO.md +8 -0
  9. data/VERSION +1 -0
  10. data/chefignore +41 -0
  11. data/cluster_chef.gemspec +115 -0
  12. data/clusters/website_demo.rb +65 -0
  13. data/config/client.rb +59 -0
  14. data/lib/cluster_chef/chef_layer.rb +297 -0
  15. data/lib/cluster_chef/cloud.rb +409 -0
  16. data/lib/cluster_chef/cluster.rb +118 -0
  17. data/lib/cluster_chef/compute.rb +144 -0
  18. data/lib/cluster_chef/cookbook_munger/README.md.erb +47 -0
  19. data/lib/cluster_chef/cookbook_munger/licenses.yaml +16 -0
  20. data/lib/cluster_chef/cookbook_munger/metadata.rb.erb +23 -0
  21. data/lib/cluster_chef/cookbook_munger.rb +588 -0
  22. data/lib/cluster_chef/deprecated.rb +33 -0
  23. data/lib/cluster_chef/discovery.rb +158 -0
  24. data/lib/cluster_chef/dsl_object.rb +123 -0
  25. data/lib/cluster_chef/facet.rb +144 -0
  26. data/lib/cluster_chef/fog_layer.rb +134 -0
  27. data/lib/cluster_chef/private_key.rb +110 -0
  28. data/lib/cluster_chef/role_implications.rb +49 -0
  29. data/lib/cluster_chef/security_group.rb +103 -0
  30. data/lib/cluster_chef/server.rb +265 -0
  31. data/lib/cluster_chef/server_slice.rb +259 -0
  32. data/lib/cluster_chef/volume.rb +93 -0
  33. data/lib/cluster_chef.rb +137 -0
  34. data/notes/aws_console_screenshot.jpg +0 -0
  35. data/rspec.watchr +29 -0
  36. data/spec/cluster_chef/cluster_spec.rb +13 -0
  37. data/spec/cluster_chef/facet_spec.rb +70 -0
  38. data/spec/cluster_chef/server_slice_spec.rb +19 -0
  39. data/spec/cluster_chef/server_spec.rb +112 -0
  40. data/spec/cluster_chef_spec.rb +193 -0
  41. data/spec/spec_helper/dummy_chef.rb +25 -0
  42. data/spec/spec_helper.rb +50 -0
  43. data/spec/test_config.rb +20 -0
  44. data/tasks/chef_config.rb +38 -0
  45. data/tasks/jeweler_use_alt_branch.rb +47 -0
  46. metadata +227 -0
@@ -0,0 +1,49 @@
1
+ module ClusterChef
2
+ ComputeBuilder.class_eval do
3
+
4
+ role_implication "nfs_server" do
5
+ self.cloud.security_group "nfs_server" do
6
+ authorize_group "nfs_client"
7
+ end
8
+ end
9
+
10
+ role_implication "nfs_client" do
11
+ self.cloud.security_group "nfs_client"
12
+ end
13
+
14
+ role_implication "ssh" do
15
+ self.cloud.security_group 'ssh' do
16
+ authorize_port_range 22..22
17
+ end
18
+ end
19
+
20
+ role_implication "chef_server" do
21
+ self.cloud.security_group "chef_server" do
22
+ authorize_port_range 4000..4000 # chef-server-api
23
+ authorize_port_range 4040..4040 # chef-server-webui
24
+ end
25
+ end
26
+
27
+ # web server? add the group "web_server" to open the web holes
28
+ role_implication "web_server" do
29
+ self.cloud.security_group("#{cluster_name}-web_server") do
30
+ authorize_port_range 80..80
31
+ authorize_port_range 443..443
32
+ end
33
+ end
34
+
35
+ # if you're a redis server, open the port and authorize redis clients in your group to talk to you
36
+ role_implication("redis_server") do
37
+ cluster_name = self.cluster_name # hack: put cluster_name is in scope
38
+ self.cloud.security_group("#{cluster_name}-redis_server") do
39
+ authorize_group("#{cluster_name}-redis_client")
40
+ end
41
+ end
42
+
43
+ # redis_clients gain rights to the redis_server
44
+ role_implication("redis_client") do
45
+ self.cloud.security_group("#{cluster_name}-redis_client")
46
+ end
47
+
48
+ end
49
+ end
@@ -0,0 +1,103 @@
1
+ module ClusterChef
2
+ module Cloud
3
+
4
+ class SecurityGroup < DslObject
5
+ has_keys :name, :description, :owner_id
6
+ attr_reader :group_authorizations
7
+ attr_reader :range_authorizations
8
+
9
+ def initialize cloud, group_name, group_description=nil, group_owner_id=nil
10
+ super()
11
+ set :name, group_name.to_s
12
+ description group_description || "cluster_chef generated group #{group_name}"
13
+ @cloud = cloud
14
+ @group_authorizations = []
15
+ @range_authorizations = []
16
+ owner_id group_owner_id || Chef::Config[:knife][:aws_account_id]
17
+ end
18
+
19
+ @@all = nil
20
+ def all
21
+ self.class.all
22
+ end
23
+ def self.all
24
+ return @@all if @@all
25
+ get_all
26
+ end
27
+ def self.get_all
28
+ groups_list = ClusterChef.fog_connection.security_groups.all
29
+ @@all = groups_list.inject(Mash.new) do |hsh, group|
30
+ hsh[group.name] = group ; hsh
31
+ end
32
+ end
33
+
34
+ def get
35
+ all[name] || ClusterChef.fog_connection.security_groups.get(name)
36
+ end
37
+
38
+ def self.get_or_create group_name, description
39
+ group = all[group_name] || ClusterChef.fog_connection.security_groups.get(group_name)
40
+ if ! group
41
+ self.step(group_name, "creating (#{description})", :blue)
42
+ group = all[group_name] = ClusterChef.fog_connection.security_groups.new(:name => group_name, :description => description, :connection => ClusterChef.fog_connection)
43
+ group.save
44
+ end
45
+ group
46
+ end
47
+
48
+ def authorize_group_and_owner group, owner_id=nil
49
+ @group_authorizations << [group.to_s, owner_id]
50
+ end
51
+
52
+ # Alias for authorize_group_and_owner
53
+ def authorize_group *args
54
+ authorize_group_and_owner *args
55
+ end
56
+
57
+ def authorize_port_range range, cidr_ip = '0.0.0.0/0', ip_protocol = 'tcp'
58
+ range = (range .. range) if range.is_a?(Integer)
59
+ @range_authorizations << [range, cidr_ip, ip_protocol]
60
+ end
61
+
62
+ def group_permission_already_set? group, authed_group, authed_owner
63
+ return false if group.ip_permissions.nil?
64
+ group.ip_permissions.any? do |existing_permission|
65
+ existing_permission["groups"].include?({"userId"=>authed_owner, "groupName"=>authed_group}) &&
66
+ existing_permission["fromPort"] == 1 &&
67
+ existing_permission["toPort"] == 65535
68
+ end
69
+ end
70
+
71
+ def range_permission_already_set? group, range, cidr_ip, ip_protocol
72
+ return false if group.ip_permissions.nil?
73
+ group.ip_permissions.include?({"groups"=>[], "ipRanges"=>[{"cidrIp"=>cidr_ip}], "ipProtocol"=>ip_protocol, "fromPort"=>range.first, "toPort"=>range.last})
74
+ end
75
+
76
+ def run
77
+ group = self.class.get_or_create name, description
78
+ @group_authorizations.uniq.each do |authed_group, authed_owner|
79
+ authed_owner ||= self.owner_id
80
+ next if group_permission_already_set?(group, authed_group, authed_owner)
81
+ step("authorizing access from all machines in #{authed_group}", :blue)
82
+ self.class.get_or_create(authed_group, "Authorized to access nfs server")
83
+ begin group.authorize_group_and_owner(authed_group, authed_owner)
84
+ rescue StandardError => e ; ui.warn e ; end
85
+ end
86
+ @range_authorizations.uniq.each do |range, cidr_ip, ip_protocol|
87
+ next if range_permission_already_set?(group, range, cidr_ip, ip_protocol)
88
+ step("opening #{ip_protocol} ports #{range} to #{cidr_ip}", :blue)
89
+ begin group.authorize_port_range(range, { :cidr_ip => cidr_ip, :ip_protocol => ip_protocol })
90
+ rescue StandardError => e ; ui.warn e ; end
91
+ end
92
+ end
93
+
94
+ def self.step(group_name, desc, *style)
95
+ ui.info(" group #{"%-15s" % (group_name+":")}\t#{ui.color(desc.to_s, *style)}")
96
+ end
97
+ def step(desc, *style)
98
+ self.class.step(self.name, desc, *style)
99
+ end
100
+
101
+ end
102
+ end
103
+ end
@@ -0,0 +1,265 @@
1
+ module ClusterChef
2
+
3
+ #
4
+ # A server is a specific (logical) member of a facet within a cluster.
5
+ #
6
+ # It may have extra attributes if it also exists in the Chef server,
7
+ # or if it exists in the real world (as revealed by Fog)
8
+ #
9
+ class Server < ClusterChef::ComputeBuilder
10
+ attr_reader :cluster, :facet, :facet_index, :tags
11
+ attr_accessor :chef_node, :fog_server
12
+
13
+ @@all ||= Mash.new
14
+
15
+ def initialize facet, idx
16
+ @cluster = facet.cluster
17
+ @facet = facet
18
+ @facet_index = idx
19
+ @fullname = [cluster_name, facet_name, facet_index].join('-')
20
+ super(@fullname)
21
+ @tags = { "name" => name, "cluster" => cluster_name, "facet" => facet_name, "index" => facet_index, }
22
+ ui.warn("Duplicate server #{[self, facet.name, idx]} vs #{@@all[fullname]}") if @@all[fullname]
23
+ @@all[fullname] = self
24
+ end
25
+
26
+ def fullname fn=nil
27
+ @fullname = fn if fn
28
+ @fullname
29
+ end
30
+
31
+ def cluster_name
32
+ cluster.name
33
+ end
34
+
35
+ def facet_name
36
+ facet.name
37
+ end
38
+
39
+ def servers
40
+ ClusterChef::ServerGroup.new(cluster, [self])
41
+ end
42
+
43
+ def bogosity val=nil
44
+ @settings[:bogosity] = val if not val.nil?
45
+ return @settings[:bogosity] if not @settings[:bogosity].nil?
46
+ return :bogus_facet if facet.bogus?
47
+ # return :out_of_range if (self.facet_index.to_i >= facet.instances)
48
+ false
49
+ end
50
+
51
+ def in_cloud?
52
+ !! fog_server
53
+ end
54
+
55
+ def in_chef?
56
+ chef_node || chef_client
57
+ end
58
+
59
+ def has_cloud_state?(*states)
60
+ in_cloud? && states.flatten.include?(fog_server.state)
61
+ end
62
+
63
+ def exists?
64
+ created? || in_chef?
65
+ end
66
+
67
+ def created?
68
+ in_cloud? && (not ['terminated', 'shutting-down'].include?(fog_server.state))
69
+ end
70
+
71
+ def running?
72
+ has_cloud_state?('running')
73
+ end
74
+
75
+ def startable?
76
+ has_cloud_state?('stopped')
77
+ end
78
+
79
+ def launchable?
80
+ not created?
81
+ end
82
+
83
+ def sshable?
84
+ in_chef?
85
+ end
86
+
87
+ def killable?
88
+ in_chef? || created?
89
+ end
90
+
91
+ def to_s
92
+ super[0..-3] + " chef: #{in_chef? && chef_node.name} fog: #{in_cloud? && fog_server.id}}>"
93
+ end
94
+
95
+ #
96
+ # Attributes
97
+ #
98
+
99
+ def tag key, value=nil
100
+ if value then @tags[key] = value ; end
101
+ @tags[key]
102
+ end
103
+
104
+ #
105
+ # Resolve:
106
+ #
107
+ def resolve!
108
+ reverse_merge!(facet)
109
+ reverse_merge!(cluster)
110
+ @settings[:run_list] = combined_run_list
111
+ #
112
+ cloud.reverse_merge!(facet.cloud)
113
+ cloud.reverse_merge!(cluster.cloud)
114
+ #
115
+ cloud.user_data({
116
+ :chef_server => Chef::Config.chef_server_url,
117
+ :validation_client_name => Chef::Config.validation_client_name,
118
+ #
119
+ :node_name => fullname,
120
+ :cluster_name => cluster_name,
121
+ :facet_name => facet_name,
122
+ :facet_index => facet_index,
123
+ #
124
+ :run_list => run_list,
125
+ })
126
+ #
127
+ if client_key.body then cloud.user_data({ :client_key => client_key.body, })
128
+ else cloud.user_data({ :validation_key => cloud.validation_key }) ; end
129
+ cloud.keypair(cluster_name) if cloud.keypair.nil?
130
+ #
131
+ self
132
+ end
133
+
134
+ #
135
+ # Assembles the combined runlist.
136
+ #
137
+ # * run_list :first items -- cluster then facet then server
138
+ # * run_list :normal items -- cluster then facet then server
139
+ # * run_list :last items -- cluster then facet then server
140
+ #
141
+ # ClusterChef.cluster(:my_cluster) do
142
+ # role('f', :last)
143
+ # role('c')
144
+ # facet(:my_facet) do
145
+ # role('d')
146
+ # role('e')
147
+ # role('b', :first)
148
+ # role('h', :last)
149
+ # end
150
+ # role('a', :first)
151
+ # role('g', :last)
152
+ # end
153
+ #
154
+ # produces
155
+ # cluster list [a] [c] [fg]
156
+ # facet list [b] [de] [h]
157
+ #
158
+ # yielding run_list
159
+ # ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
160
+ #
161
+ # Avoid duplicate conflicting declarations. If you say define things more
162
+ # than once, the *earliest encountered* one wins, even if it is elsewhere
163
+ # marked :last.
164
+ #
165
+ def combined_run_list
166
+ cg = @cluster.run_list_groups
167
+ fg = @facet.run_list_groups
168
+ sg = self.run_list_groups
169
+ [ cg[:first], fg[:first], sg[:first],
170
+ cg[:normal], fg[:normal], sg[:normal],
171
+ cg[:last], fg[:last], sg[:last], ].flatten.compact.uniq
172
+ end
173
+
174
+ #
175
+ # This prepares a composited view of the volumes -- it shows the cluster
176
+ # definition overlaid by the facet definition overlaid by the server
177
+ # definition.
178
+ #
179
+ # This method *does* auto-vivify an empty volume declaration on the server,
180
+ # but doesn't modify it.
181
+ #
182
+ # This code is pretty smelly, but so is the resolve! behavior. advice welcome.
183
+ #
184
+ def composite_volumes
185
+ vols = {}
186
+ facet.volumes.each do |vol_name, vol|
187
+ self.volumes[vol_name] ||= ClusterChef::Volume.new(:parent => self, :name => vol_name)
188
+ vols[vol_name] ||= self.volumes[vol_name].dup
189
+ vols[vol_name].reverse_merge!(vol)
190
+ end
191
+ cluster.volumes.each do |vol_name, vol|
192
+ self.volumes[vol_name] ||= ClusterChef::Volume.new(:parent => self, :name => vol_name)
193
+ vols[vol_name] ||= self.volumes[vol_name].dup
194
+ vols[vol_name].reverse_merge!(vol)
195
+ end
196
+ vols.each{|vol_name, vol| vol.availability_zone self.default_availability_zone }
197
+ vols
198
+ end
199
+
200
+ # FIXME -- this will break on some edge case wehre a bogus node is
201
+ # discovered after everything is resolve!d
202
+ def default_availability_zone
203
+ cloud.default_availability_zone
204
+ end
205
+
206
+ #
207
+ # retrieval
208
+ #
209
+ def self.get(cluster_name, facet_name, facet_index)
210
+ cluster = ClusterChef.cluster(cluster_name)
211
+ had_facet = cluster.has_facet?(facet_name)
212
+ facet = cluster.facet(facet_name)
213
+ facet.bogosity true unless had_facet
214
+ had_server = facet.has_server?( facet_index )
215
+ server = facet.server(facet_index)
216
+ server.bogosity :not_defined_in_facet unless had_server
217
+ return server
218
+ end
219
+
220
+ def self.all
221
+ @@all
222
+ end
223
+
224
+ #
225
+ # Actions!
226
+ #
227
+
228
+ def sync_to_cloud
229
+ step "Syncing to cloud"
230
+ attach_volumes
231
+ create_tags
232
+ associate_public_ip
233
+ end
234
+
235
+ def sync_to_chef
236
+ step "Syncing to chef server"
237
+ sync_chef_node
238
+ true
239
+ end
240
+
241
+ # FIXME: a lot of AWS logic in here. This probably lives in the facet.cloud
242
+ # but for the one or two things that come from the facet
243
+ def create_server
244
+ return nil if created? # only create a server if it does not already exist
245
+ fog_create_server
246
+ end
247
+
248
+ def create_tags
249
+ return unless created?
250
+ step(" labeling servers and volumes")
251
+ fog_create_tags(fog_server, self.fullname, tags)
252
+ composite_volumes.each do |vol_name, vol|
253
+ if vol.fog_volume
254
+ fog_create_tags(vol.fog_volume, vol.desc,
255
+ { "server" => self.fullname, "name" => "#{name}-#{vol.name}", "device" => vol.device, "mount_point" => vol.mount_point, "cluster" => cluster_name, "facet" => facet_name, "index" => facet_index, })
256
+ end
257
+ end
258
+ end
259
+
260
+ def block_device_mapping
261
+ composite_volumes.values.map(&:block_device_mapping).compact
262
+ end
263
+
264
+ end
265
+ end
@@ -0,0 +1,259 @@
1
+ module ClusterChef
2
+ #
3
+ # A server group is a set of actual or implied servers.
4
+ #
5
+ # The idea is we want to be able to smoothly roll up settings
6
+ #
7
+ #
8
+ class ServerSlice < ClusterChef::DslObject
9
+ attr_reader :name, :servers, :cluster
10
+
11
+ def initialize cluster, servers
12
+ super()
13
+ @name = "#{cluster.name} slice"
14
+ @cluster = cluster
15
+ @servers = servers
16
+ end
17
+
18
+ #
19
+ # Enumerable
20
+ #
21
+ include Enumerable
22
+ def each(&block)
23
+ @servers.each(&block)
24
+ end
25
+ def length
26
+ @servers.length
27
+ end
28
+ def empty?
29
+ length == 0
30
+ end
31
+ [:select, :find_all, :reject, :detect, :find, :drop_while].each do |method|
32
+ define_method(method) do |*args, &block|
33
+ ServerSlice.new cluster, @servers.send(method, *args, &block)
34
+ end
35
+ end
36
+
37
+ # Return the collection of servers that are not yet 'created'
38
+ def uncreated_servers
39
+ select{|svr| not svr.created? }
40
+ end
41
+
42
+ def bogus_servers
43
+ select(&:bogus?)
44
+ end
45
+
46
+ #
47
+ # Info!
48
+ #
49
+
50
+ def chef_nodes
51
+ servers.map(&:chef_node).compact
52
+ end
53
+
54
+ def fog_servers
55
+ servers.map(&:fog_server).compact
56
+ end
57
+
58
+ def security_groups
59
+ sg = {}
60
+ servers.each{|svr| sg.merge!(svr.cloud.security_groups) }
61
+ sg
62
+ end
63
+
64
+ def facets
65
+ servers.map(&:facet)
66
+ end
67
+
68
+ def chef_roles
69
+ [ cluster.chef_roles, facets.map(&:chef_roles) ].flatten.compact.uniq
70
+ end
71
+
72
+ # hack -- take the ssh_identity_file from the first server.
73
+ def ssh_identity_file
74
+ return if servers.empty?
75
+ servers.first.cloud.ssh_identity_file
76
+ end
77
+
78
+ #
79
+ # Actions!
80
+ #
81
+
82
+ def start
83
+ delegate_to_fog_servers( :start )
84
+ delegate_to_fog_servers( :reload )
85
+ end
86
+
87
+ def stop
88
+ delegate_to_fog_servers( :stop )
89
+ delegate_to_fog_servers( :reload )
90
+ end
91
+
92
+ def destroy
93
+ delegate_to_fog_servers( :destroy )
94
+ delegate_to_fog_servers( :reload )
95
+ end
96
+
97
+ def reload
98
+ delegate_to_fog_servers( :reload )
99
+ end
100
+
101
+ def create_servers
102
+ delegate_to_servers( :create_server )
103
+ end
104
+
105
+ def delete_chef
106
+ delegate_to_servers( :delete_chef, true )
107
+ end
108
+
109
+ def sync_to_cloud
110
+ sync_keypairs
111
+ delegate_to_servers( :sync_to_cloud )
112
+ end
113
+
114
+ def sync_to_chef
115
+ sync_roles
116
+ delegate_to_servers( :sync_to_chef )
117
+ end
118
+
119
+ #
120
+ # Display!
121
+ #
122
+
123
+ # FIXME: this is a jumble. we need to pass it in some other way.
124
+
125
+ MINIMAL_HEADINGS = ["Name", "Chef?", "State", "InstanceID", "Public IP", "Private IP", "Created At"].to_set.freeze
126
+ DEFAULT_HEADINGS = (MINIMAL_HEADINGS + ['Flavor', 'AZ', 'Env']).freeze
127
+ EXPANDED_HEADINGS = DEFAULT_HEADINGS + ['Image', 'Volumes', 'Elastic IP', 'SSH Key']
128
+
129
+ MACHINE_STATE_COLORS = {
130
+ 'running' => :green,
131
+ 'pending' => :yellow,
132
+ 'stopping' => :magenta,
133
+ 'shutting-down' => :magenta,
134
+ 'stopped' => :cyan,
135
+ 'terminated' => :blue,
136
+ 'not running' => :blue,
137
+ }
138
+
139
+ #
140
+ # This is a generic display routine for cluster-like sets of nodes. If you
141
+ # call it with no args, you get the basic table that knife cluster show
142
+ # draws. If you give it an array of strings, you can override the order and
143
+ # headings displayed. If you also give it a block you can add your own logic
144
+ # for generating content. The block is given a ClusterChef::Server instance
145
+ # for each item in the collection and should return a hash of Name,Value
146
+ # pairs to merge into the minimal fields.
147
+ #
148
+ def display hh = :default
149
+ headings =
150
+ case hh
151
+ when :minimal then MINIMAL_HEADINGS
152
+ when :default then DEFAULT_HEADINGS
153
+ when :expanded then EXPANDED_HEADINGS
154
+ else hh.to_set end
155
+ headings += ["Bogus"] if servers.any?(&:bogus?)
156
+ # probably not necessary any more
157
+ # servers = servers.sort{ |a,b| (a.facet_name <=> b.facet_name) *9 + (a.facet_index.to_i <=> b.facet_index.to_i)*3 + (a.facet_index <=> b.facet_index) }
158
+ defined_data = servers.map do |svr|
159
+ hsh = {
160
+ "Name" => svr.fullname,
161
+ "Facet" => svr.facet_name,
162
+ "Index" => svr.facet_index,
163
+ "Chef?" => (svr.chef_node? ? "yes" : "[red]no[reset]"),
164
+ "Bogus" => (svr.bogus? ? "[red]#{svr.bogosity}[reset]" : ''),
165
+ "Env" => svr.environment,
166
+ }
167
+ # if (cs = svr.chef_server)
168
+ # hsh.merge!(
169
+ # "Env" => cs.environment,
170
+ # )
171
+ # end
172
+ if (fs = svr.fog_server)
173
+ hsh.merge!(
174
+ "InstanceID" => (fs.id && fs.id.length > 0) ? fs.id : "???",
175
+ "Flavor" => fs.flavor_id,
176
+ "Image" => fs.image_id,
177
+ "AZ" => fs.availability_zone,
178
+ "SSH Key" => fs.key_name,
179
+ "State" => "[#{MACHINE_STATE_COLORS[fs.state] || 'white'}]#{fs.state}[reset]",
180
+ "Public IP" => fs.public_ip_address,
181
+ "Private IP" => fs.private_ip_address,
182
+ "Created At" => fs.created_at.strftime("%Y%m%d-%H%M%S")
183
+ )
184
+ else
185
+ hsh["State"] = "not running"
186
+ end
187
+ hsh['Volumes'] = []
188
+ svr.composite_volumes.each do |name, vol|
189
+ if vol.ephemeral_device? then next
190
+ elsif vol.volume_id then hsh['Volumes'] << vol.volume_id
191
+ elsif vol.create_at_launch? then hsh['Volumes'] << vol.snapshot_id
192
+ end
193
+ end
194
+ hsh['Volumes'] = hsh['Volumes'].join(',')
195
+ hsh['Elastic IP'] = svr.cloud.public_ip if svr.cloud.public_ip
196
+ if block_given?
197
+ extra_info = yield(svr)
198
+ hsh.merge!(extra_info)
199
+ headings += extra_info.keys
200
+ end
201
+ hsh
202
+ end
203
+ if defined_data.empty?
204
+ ui.info "Nothing to report"
205
+ else
206
+ Formatador.display_compact_table(defined_data, headings.to_a)
207
+ end
208
+ end
209
+
210
+ def to_s
211
+ str = super
212
+ str[0..-2] + " #{@servers.map(&:fullname)}>"
213
+ end
214
+
215
+ def joined_names
216
+ map(&:name).join(", ").gsub(/, ([^,]*)$/, ' and \1')
217
+ end
218
+
219
+ # Calls block on each server in parallel, each in its own thread
220
+ #
221
+ # @example
222
+ # target = ClusterChef::Cluster.slice('web_server')
223
+ # target.parallelize{|svr| svr.launch }
224
+ #
225
+ # @yield each server, in turn
226
+ #
227
+ # @return array (in same order as servers) of each block's result
228
+ def parallelize
229
+ servers.map do |svr|
230
+ sleep(0.1) # avoid hammering with simultaneous requests
231
+ Thread.new(svr){|svr| yield(svr) }
232
+ end
233
+ end
234
+
235
+ protected
236
+
237
+ # Helper methods for iterating through the servers to do things
238
+ #
239
+ # @param [Symbol] method -- method to call on each server
240
+ # @param [Boolean] threaded -- execute each call in own thread
241
+ #
242
+ # @return array (in same order as servers) of results for that method
243
+ def delegate_to_servers method, threaded = true
244
+ if threaded # Call in threads
245
+ threads = parallelize{|svr| svr.send(method) }
246
+ threads.map{|t| t.join.value } # Wait, returning array of results
247
+ else # Call the method for each server sequentially
248
+ servers.map{|svr| svr.send(method) }
249
+ end
250
+ end
251
+
252
+ def delegate_to_fog_servers method
253
+ fog_servers.compact.map do |fs|
254
+ fs.send(method)
255
+ end
256
+ end
257
+
258
+ end
259
+ end