cluster_chef 3.0.5
Sign up to get free protection for your applications and to get access to all the features.
- data/.gitignore +51 -0
- data/.rspec +3 -0
- data/CHANGELOG.md +63 -0
- data/Gemfile +18 -0
- data/LICENSE +201 -0
- data/README.md +332 -0
- data/Rakefile +92 -0
- data/TODO.md +8 -0
- data/VERSION +1 -0
- data/chefignore +41 -0
- data/cluster_chef.gemspec +115 -0
- data/clusters/website_demo.rb +65 -0
- data/config/client.rb +59 -0
- data/lib/cluster_chef/chef_layer.rb +297 -0
- data/lib/cluster_chef/cloud.rb +409 -0
- data/lib/cluster_chef/cluster.rb +118 -0
- data/lib/cluster_chef/compute.rb +144 -0
- data/lib/cluster_chef/cookbook_munger/README.md.erb +47 -0
- data/lib/cluster_chef/cookbook_munger/licenses.yaml +16 -0
- data/lib/cluster_chef/cookbook_munger/metadata.rb.erb +23 -0
- data/lib/cluster_chef/cookbook_munger.rb +588 -0
- data/lib/cluster_chef/deprecated.rb +33 -0
- data/lib/cluster_chef/discovery.rb +158 -0
- data/lib/cluster_chef/dsl_object.rb +123 -0
- data/lib/cluster_chef/facet.rb +144 -0
- data/lib/cluster_chef/fog_layer.rb +134 -0
- data/lib/cluster_chef/private_key.rb +110 -0
- data/lib/cluster_chef/role_implications.rb +49 -0
- data/lib/cluster_chef/security_group.rb +103 -0
- data/lib/cluster_chef/server.rb +265 -0
- data/lib/cluster_chef/server_slice.rb +259 -0
- data/lib/cluster_chef/volume.rb +93 -0
- data/lib/cluster_chef.rb +137 -0
- data/notes/aws_console_screenshot.jpg +0 -0
- data/rspec.watchr +29 -0
- data/spec/cluster_chef/cluster_spec.rb +13 -0
- data/spec/cluster_chef/facet_spec.rb +70 -0
- data/spec/cluster_chef/server_slice_spec.rb +19 -0
- data/spec/cluster_chef/server_spec.rb +112 -0
- data/spec/cluster_chef_spec.rb +193 -0
- data/spec/spec_helper/dummy_chef.rb +25 -0
- data/spec/spec_helper.rb +50 -0
- data/spec/test_config.rb +20 -0
- data/tasks/chef_config.rb +38 -0
- data/tasks/jeweler_use_alt_branch.rb +47 -0
- metadata +227 -0
@@ -0,0 +1,158 @@
|
|
1
|
+
module ClusterChef
|
2
|
+
class Cluster
|
3
|
+
|
4
|
+
def discover!
|
5
|
+
@aws_instance_hash = {}
|
6
|
+
discover_cluster_chef!
|
7
|
+
discover_chef_nodes!
|
8
|
+
discover_fog_servers!
|
9
|
+
discover_chef_clients!
|
10
|
+
discover_volumes!
|
11
|
+
end
|
12
|
+
|
13
|
+
def chef_clients
|
14
|
+
return @chef_clients if @chef_clients
|
15
|
+
@chef_clients = []
|
16
|
+
Chef::Search::Query.new.search(:client, "clientname:#{cluster_name}-*") do |client_hsh|
|
17
|
+
# Return values from Chef::Search seem to be inconsistent across chef
|
18
|
+
# versions (sometimes a hash, sometimes an object). Fix if necessary.
|
19
|
+
client_hsh = Chef::ApiClient.json_create(client_hsh) unless client_hsh.is_a?(Chef::ApiClient)
|
20
|
+
@chef_clients.push( client_hsh )
|
21
|
+
end
|
22
|
+
@chef_clients
|
23
|
+
end
|
24
|
+
|
25
|
+
# returns client with the given name if in catalog, nil otherwise
|
26
|
+
def find_client(cl_name)
|
27
|
+
chef_clients.find{|ccl| ccl.name == cl_name }
|
28
|
+
end
|
29
|
+
|
30
|
+
def chef_nodes
|
31
|
+
return @chef_nodes if @chef_nodes
|
32
|
+
@chef_nodes = []
|
33
|
+
Chef::Search::Query.new.search(:node,"cluster_name:#{cluster_name}") do |n|
|
34
|
+
@chef_nodes.push(n) unless n.blank? || (n.cluster_name != cluster_name.to_s)
|
35
|
+
end
|
36
|
+
@chef_nodes
|
37
|
+
end
|
38
|
+
|
39
|
+
# returns node with the given name if in catalog, nil otherwise
|
40
|
+
def find_node(nd_name)
|
41
|
+
chef_nodes.find{|nd| nd.name == nd_name }
|
42
|
+
end
|
43
|
+
|
44
|
+
protected
|
45
|
+
|
46
|
+
def fog_servers
|
47
|
+
@fog_servers ||= ClusterChef.fog_servers.select{|fs| fs.key_name == cluster_name.to_s && (fs.state != "terminated") }
|
48
|
+
end
|
49
|
+
|
50
|
+
# Walk the list of chef nodes and
|
51
|
+
# * vivify the server,
|
52
|
+
# * associate the chef node
|
53
|
+
# * if the chef node knows about its instance id, memorize that for lookup
|
54
|
+
# when we discover cloud instances.
|
55
|
+
def discover_chef_nodes!
|
56
|
+
chef_nodes.each do |chef_node|
|
57
|
+
if chef_node["cluster_name"] && chef_node["facet_name"] && chef_node["facet_index"]
|
58
|
+
cluster_name = chef_node["cluster_name"]
|
59
|
+
facet_name = chef_node["facet_name"]
|
60
|
+
facet_index = chef_node["facet_index"]
|
61
|
+
elsif chef_node.name
|
62
|
+
( cluster_name, facet_name, facet_index ) = chef_node.name.split(/-/)
|
63
|
+
else
|
64
|
+
next
|
65
|
+
end
|
66
|
+
svr = ClusterChef::Server.get(cluster_name, facet_name, facet_index)
|
67
|
+
svr.chef_node = chef_node
|
68
|
+
@aws_instance_hash[ chef_node.ec2.instance_id ] = svr if chef_node[:ec2] && chef_node.ec2.instance_id
|
69
|
+
end
|
70
|
+
end
|
71
|
+
|
72
|
+
# Walk the list of servers, asking each to discover its chef client.
|
73
|
+
def discover_chef_clients!
|
74
|
+
servers.each(&:chef_client)
|
75
|
+
end
|
76
|
+
|
77
|
+
# calling #servers vivifies each facet's ClusterChef::Server instances
|
78
|
+
def discover_cluster_chef!
|
79
|
+
self.servers
|
80
|
+
end
|
81
|
+
|
82
|
+
def discover_fog_servers!
|
83
|
+
# If the fog server is tagged with cluster/facet/index, then try to
|
84
|
+
# locate the corresponding machine in the cluster def
|
85
|
+
# Otherwise, try to get to it through mapping the aws instance id
|
86
|
+
# to the chef node name found in the chef node
|
87
|
+
fog_servers.each do |fs|
|
88
|
+
if fs.tags["cluster"] && fs.tags["facet"] && fs.tags["index"] && fs.tags["cluster"] == cluster_name.to_s
|
89
|
+
svr = ClusterChef::Server.get(fs.tags["cluster"], fs.tags["facet"], fs.tags["index"])
|
90
|
+
elsif @aws_instance_hash[fs.id]
|
91
|
+
svr = @aws_instance_hash[fs.id]
|
92
|
+
else
|
93
|
+
next
|
94
|
+
end
|
95
|
+
|
96
|
+
# If there already is a fog server there, then issue a warning and slap
|
97
|
+
# the just-discovered one onto a server with an arbitrary index, and
|
98
|
+
# mark both bogus
|
99
|
+
if existing_fs = svr.fog_server
|
100
|
+
if existing_fs.id != fs.id
|
101
|
+
ui.warn "Duplicate fog instance found for #{svr.fullname}: #{fs.id} and #{existing_fs.id}!!"
|
102
|
+
old_svr = svr
|
103
|
+
svr = old_svr.facet.server(1_000 + svr.facet_index.to_i)
|
104
|
+
old_svr.bogosity :duplicate
|
105
|
+
svr.bogosity :duplicate
|
106
|
+
end
|
107
|
+
end
|
108
|
+
svr.fog_server = fs
|
109
|
+
end
|
110
|
+
end
|
111
|
+
|
112
|
+
def discover_volumes!
|
113
|
+
servers.each(&:discover_volumes!)
|
114
|
+
end
|
115
|
+
|
116
|
+
def discover_addresses!
|
117
|
+
servers.each(&:discover_addresses!)
|
118
|
+
end
|
119
|
+
end
|
120
|
+
|
121
|
+
def self.fog_connection
|
122
|
+
@fog_connection ||= Fog::Compute.new({
|
123
|
+
:provider => 'AWS',
|
124
|
+
:aws_access_key_id => Chef::Config[:knife][:aws_access_key_id],
|
125
|
+
:aws_secret_access_key => Chef::Config[:knife][:aws_secret_access_key],
|
126
|
+
:region => Chef::Config[:knife][:region]
|
127
|
+
})
|
128
|
+
end
|
129
|
+
|
130
|
+
def self.fog_servers
|
131
|
+
return @fog_servers if @fog_servers
|
132
|
+
Chef::Log.debug("Using fog to catalog all servers")
|
133
|
+
@fog_servers = ClusterChef.fog_connection.servers.all
|
134
|
+
end
|
135
|
+
|
136
|
+
def self.fog_volumes
|
137
|
+
return @fog_volumes if @fog_volumes
|
138
|
+
Chef::Log.debug("Using fog to catalog all volumes")
|
139
|
+
@fog_volumes ||= ClusterChef.fog_connection.volumes
|
140
|
+
end
|
141
|
+
|
142
|
+
def self.fog_addresses
|
143
|
+
return @fog_addresses if @fog_addresses
|
144
|
+
Chef::Log.debug("Using fog to catalog all addresses")
|
145
|
+
@fog_addresses = {}.tap{|hsh| ClusterChef.fog_connection.addresses.each{|fa| hsh[fa.public_ip] = fa } }
|
146
|
+
end
|
147
|
+
|
148
|
+
def self.fog_keypairs
|
149
|
+
return @fog_keypairs if @fog_keypairs
|
150
|
+
Chef::Log.debug("Using fog to catalog all keypairs")
|
151
|
+
@fog_keypairs = {}.tap{|hsh| ClusterChef.fog_connection.key_pairs.each{|kp| hsh[kp.name] = kp } }
|
152
|
+
end
|
153
|
+
|
154
|
+
def safely *args, &block
|
155
|
+
ClusterChef.safely(*args, &block)
|
156
|
+
end
|
157
|
+
|
158
|
+
end
|
@@ -0,0 +1,123 @@
|
|
1
|
+
Mash.class_eval do
|
2
|
+
def reverse_merge!(other_hash)
|
3
|
+
# stupid mash doesn't take a block arg, which breaks the implementation of
|
4
|
+
# reverse_merge!
|
5
|
+
other_hash.each_pair do |key, value|
|
6
|
+
key = convert_key(key)
|
7
|
+
regular_writer(key, convert_value(value)) unless has_key?(key)
|
8
|
+
end
|
9
|
+
self
|
10
|
+
end
|
11
|
+
def to_mash
|
12
|
+
self.dup
|
13
|
+
end unless method_defined?(:to_mash)
|
14
|
+
end
|
15
|
+
|
16
|
+
Hash.class_eval do
|
17
|
+
def to_mash
|
18
|
+
Mash.new(self)
|
19
|
+
end unless method_defined?(:to_mash)
|
20
|
+
end
|
21
|
+
|
22
|
+
module ClusterChef
|
23
|
+
#
|
24
|
+
# Provides magic methods, defined with has_keys
|
25
|
+
#
|
26
|
+
# @example
|
27
|
+
# class Mom < ClusterChef::DslObject
|
28
|
+
# has_keys(:college, :combat_boots, :fat, :so_fat)
|
29
|
+
# end
|
30
|
+
#
|
31
|
+
# class Person
|
32
|
+
# def momma &block
|
33
|
+
# @momma ||= Mom.new
|
34
|
+
# @momma.configure(&block) if block
|
35
|
+
# end
|
36
|
+
# end
|
37
|
+
#
|
38
|
+
# yo = Person.new
|
39
|
+
# yo.mamma.combat_boots :wears
|
40
|
+
# yo.momma do
|
41
|
+
# fat true
|
42
|
+
# so_fat 'When she sits around the house, she sits *AROUND* the house'
|
43
|
+
# end
|
44
|
+
#
|
45
|
+
class DslObject
|
46
|
+
class_attribute :keys
|
47
|
+
self.keys = []
|
48
|
+
|
49
|
+
def initialize(attrs={})
|
50
|
+
@settings = attrs.to_mash || Mash.new
|
51
|
+
end
|
52
|
+
|
53
|
+
#
|
54
|
+
# Defines DSL attributes
|
55
|
+
#
|
56
|
+
# @params [Array(String)] key_names DSL attribute names
|
57
|
+
#
|
58
|
+
# @example
|
59
|
+
# class Mom < ClusterChef::DslObject
|
60
|
+
# has_keys(:fat, :so_fat)
|
61
|
+
# end
|
62
|
+
# yer_mom = Mom.new
|
63
|
+
# yer_mom.fat :quite
|
64
|
+
#
|
65
|
+
def self.has_keys(*key_names)
|
66
|
+
key_names.map!(&:to_sym)
|
67
|
+
self.keys += key_names
|
68
|
+
self.keys.uniq!
|
69
|
+
key_names.each do |key|
|
70
|
+
next if method_defined?(key)
|
71
|
+
define_method(key){|*args| set(key, *args) }
|
72
|
+
end
|
73
|
+
end
|
74
|
+
|
75
|
+
#
|
76
|
+
# Sets the DSL attribute, unless the given value is nil.
|
77
|
+
#
|
78
|
+
def set(key, val=nil)
|
79
|
+
@settings[key.to_s] = val unless val.nil?
|
80
|
+
@settings[key.to_s]
|
81
|
+
end
|
82
|
+
|
83
|
+
def to_hash
|
84
|
+
@settings.to_hash
|
85
|
+
end
|
86
|
+
|
87
|
+
def to_mash
|
88
|
+
@settings.dup
|
89
|
+
end
|
90
|
+
|
91
|
+
def to_s
|
92
|
+
"<#{self.class} #{to_hash.inspect}>"
|
93
|
+
end
|
94
|
+
|
95
|
+
def reverse_merge!(hsh)
|
96
|
+
@settings.reverse_merge!(hsh.to_hash)
|
97
|
+
end
|
98
|
+
|
99
|
+
def configure(hsh={}, &block)
|
100
|
+
@settings.merge!(hsh.to_hash)
|
101
|
+
instance_eval(&block) if block
|
102
|
+
self
|
103
|
+
end
|
104
|
+
|
105
|
+
# delegate to the knife ui presenter
|
106
|
+
def ui() ClusterChef.ui ; end
|
107
|
+
# delegate to the knife ui presenter
|
108
|
+
def self.ui() ClusterChef.ui ; end
|
109
|
+
|
110
|
+
def step(desc, *style)
|
111
|
+
ui.info(" #{"%-15s" % (name.to_s+":")}\t#{ui.color(desc.to_s, *style)}")
|
112
|
+
end
|
113
|
+
|
114
|
+
# helper method for bombing out of a script
|
115
|
+
def die(*args) ClusterChef.die(*args) ; end
|
116
|
+
|
117
|
+
# helper method for turning exceptions into warnings
|
118
|
+
def safely(*args, &block) ClusterChef.safely(*args, &block) ; end
|
119
|
+
|
120
|
+
# helper method for debugging only
|
121
|
+
def dump(*args) args.each{|arg| Chef::Log.debug( arg.inspect ) } end
|
122
|
+
end
|
123
|
+
end
|
@@ -0,0 +1,144 @@
|
|
1
|
+
module ClusterChef
|
2
|
+
class Facet < ClusterChef::ComputeBuilder
|
3
|
+
attr_reader :cluster
|
4
|
+
has_keys :instances
|
5
|
+
|
6
|
+
def initialize cluster, facet_name, attrs={}
|
7
|
+
super(facet_name.to_sym, attrs)
|
8
|
+
@cluster = cluster
|
9
|
+
@servers = Mash.new
|
10
|
+
@chef_roles = []
|
11
|
+
@settings[:instances] ||= 0
|
12
|
+
create_facet_role
|
13
|
+
create_facet_security_group unless attrs[:no_security_group]
|
14
|
+
end
|
15
|
+
|
16
|
+
def cluster_name
|
17
|
+
cluster.name
|
18
|
+
end
|
19
|
+
|
20
|
+
def facet_name
|
21
|
+
name
|
22
|
+
end
|
23
|
+
|
24
|
+
# The auto-generated role for this facet.
|
25
|
+
# Instance-evals the given block in the context of that role,
|
26
|
+
#
|
27
|
+
# @example
|
28
|
+
# facet_role do
|
29
|
+
# override_attributes({
|
30
|
+
# :time_machine => { :transition_speed => 88 },
|
31
|
+
# })
|
32
|
+
# end
|
33
|
+
#
|
34
|
+
# @return [Chef::Role] The auto-generated role for this facet.
|
35
|
+
def facet_role(&block)
|
36
|
+
@facet_role.instance_eval( &block ) if block_given?
|
37
|
+
@facet_role
|
38
|
+
end
|
39
|
+
|
40
|
+
def assign_volume_ids(volume_name, *volume_ids)
|
41
|
+
volume_ids.flatten.zip(servers).each do |volume_id, server|
|
42
|
+
server.volume(volume_name){ volume_id(volume_id) } if server
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
46
|
+
#
|
47
|
+
# Retrieve or define the given server
|
48
|
+
#
|
49
|
+
# @param [Integer] idx -- the index of the desired server
|
50
|
+
# @param [Hash] attrs -- attributes to configure on the object
|
51
|
+
# @yield a block to execute in the context of the object
|
52
|
+
#
|
53
|
+
# @return [ClusterChef::Facet]
|
54
|
+
#
|
55
|
+
def server(idx, attrs={}, &block)
|
56
|
+
idx = idx.to_i
|
57
|
+
@servers[idx] ||= ClusterChef::Server.new(self, idx)
|
58
|
+
@servers[idx].configure(attrs, &block)
|
59
|
+
@servers[idx]
|
60
|
+
end
|
61
|
+
|
62
|
+
# if the server has been added to this facet or is in range
|
63
|
+
def has_server? idx
|
64
|
+
(idx.to_i < instances) || @servers.include?(idx.to_i)
|
65
|
+
end
|
66
|
+
|
67
|
+
#
|
68
|
+
# Slicing
|
69
|
+
#
|
70
|
+
|
71
|
+
# All servers in this facet
|
72
|
+
#
|
73
|
+
# @return [ClusterChef::ServerSlice] slice containing all servers
|
74
|
+
def servers
|
75
|
+
slice(indexes)
|
76
|
+
end
|
77
|
+
|
78
|
+
#
|
79
|
+
# A slice of servers from this facet, in index order
|
80
|
+
#
|
81
|
+
# If +slice_indexes+ is nil, returns all servers.
|
82
|
+
# Otherwise, takes slice (given by +*args+) from the requested facet.
|
83
|
+
#
|
84
|
+
# @param [Array, String] slice_indexes -- servers in that facet (or nil for all in facet).
|
85
|
+
#
|
86
|
+
# @return [ClusterChef::ServerSlice] the requested slice
|
87
|
+
def slice(slice_indexes=nil)
|
88
|
+
slice_indexes = self.indexes if slice_indexes.blank?
|
89
|
+
slice_indexes = indexes_from_intervals(slice_indexes) if slice_indexes.is_a?(String)
|
90
|
+
svrs = Array(slice_indexes).map(&:to_i).sort!.select{|idx| has_server?(idx) }.map{|idx| server(idx) }
|
91
|
+
ClusterChef::ServerSlice.new(self.cluster, svrs)
|
92
|
+
end
|
93
|
+
|
94
|
+
# all valid server indexes
|
95
|
+
def valid_indexes
|
96
|
+
(0 ... instances).to_a # note the '...'
|
97
|
+
end
|
98
|
+
|
99
|
+
# indexes in the 0...instances range plus bogus ones that showed up
|
100
|
+
# (probably from chef or fog)
|
101
|
+
def indexes
|
102
|
+
[@servers.keys, valid_indexes].flatten.compact.uniq.sort
|
103
|
+
end
|
104
|
+
|
105
|
+
#
|
106
|
+
# Resolve:
|
107
|
+
#
|
108
|
+
def resolve!
|
109
|
+
servers.each(&:resolve!)
|
110
|
+
end
|
111
|
+
|
112
|
+
protected
|
113
|
+
|
114
|
+
def create_facet_security_group
|
115
|
+
cloud.security_group("#{cluster_name}-#{facet_name}")
|
116
|
+
end
|
117
|
+
|
118
|
+
# Creates a chef role named for the facet
|
119
|
+
def create_facet_role
|
120
|
+
@facet_role_name = "#{cluster_name}_#{facet_name}"
|
121
|
+
@facet_role = new_chef_role(@facet_role_name, cluster, self)
|
122
|
+
role(@facet_role_name, :last)
|
123
|
+
end
|
124
|
+
|
125
|
+
#
|
126
|
+
# Given a string enumerating indexes to select returns a flat array of
|
127
|
+
# indexes. The indexes will be unique but in an arbitrary order.
|
128
|
+
#
|
129
|
+
# @example
|
130
|
+
# facet = ClusterChef::Facet.new('foo', 'bar')
|
131
|
+
# facet.indexes_from_intervals('1,2-3,8-9,7') # [1, 2, 3, 8, 9, 7]
|
132
|
+
# facet.indexes_from_intervals('1,3-5,4,7') # [1, 3, 4, 5, 7]
|
133
|
+
#
|
134
|
+
def indexes_from_intervals intervals
|
135
|
+
intervals.split(",").map do |term|
|
136
|
+
if term =~ /^(\d+)-(\d+)$/ then ($1.to_i .. $2.to_i).to_a
|
137
|
+
elsif term =~ /^(\d+)$/ then $1.to_i
|
138
|
+
else ui.warn("Bad interval: #{term}") ; nil
|
139
|
+
end
|
140
|
+
end.flatten.compact.uniq
|
141
|
+
end
|
142
|
+
|
143
|
+
end
|
144
|
+
end
|
@@ -0,0 +1,134 @@
|
|
1
|
+
module ClusterChef
|
2
|
+
#
|
3
|
+
# ClusterChef::Server methods that handle Fog action
|
4
|
+
#
|
5
|
+
Server.class_eval do
|
6
|
+
|
7
|
+
def fog_create_server
|
8
|
+
step(" creating cloud server", :green)
|
9
|
+
fog_description = fog_description_for_launch
|
10
|
+
Chef::Log.debug(JSON.pretty_generate(fog_description))
|
11
|
+
safely do
|
12
|
+
@fog_server = ClusterChef.fog_connection.servers.create(fog_description)
|
13
|
+
end
|
14
|
+
end
|
15
|
+
|
16
|
+
def fog_description_for_launch
|
17
|
+
{
|
18
|
+
:image_id => cloud.image_id,
|
19
|
+
:flavor_id => cloud.flavor,
|
20
|
+
#
|
21
|
+
:groups => cloud.security_groups.keys,
|
22
|
+
:key_name => cloud.keypair.to_s,
|
23
|
+
# Fog does not actually create tags when it creates a server.
|
24
|
+
:tags => {
|
25
|
+
:cluster => cluster_name,
|
26
|
+
:facet => facet_name,
|
27
|
+
:index => facet_index, },
|
28
|
+
:user_data => JSON.pretty_generate(cloud.user_data),
|
29
|
+
:block_device_mapping => block_device_mapping,
|
30
|
+
# :disable_api_termination => cloud.permanent,
|
31
|
+
# :instance_initiated_shutdown_behavior => instance_initiated_shutdown_behavior,
|
32
|
+
:availability_zone => self.default_availability_zone,
|
33
|
+
:monitoring => cloud.monitoring,
|
34
|
+
}
|
35
|
+
end
|
36
|
+
|
37
|
+
#
|
38
|
+
# Takes key-value pairs and idempotently sets those tags on the cloud machine
|
39
|
+
#
|
40
|
+
def fog_create_tags(fog_obj, desc, tags)
|
41
|
+
tags.each do |key, value|
|
42
|
+
next if fog_obj.tags[key] == value.to_s
|
43
|
+
Chef::Log.debug( "tagging #{key} = #{value} on #{desc}" )
|
44
|
+
safely do
|
45
|
+
ClusterChef.fog_connection.tags.create({
|
46
|
+
:key => key, :value => value.to_s, :resource_id => fog_obj.id })
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
50
|
+
|
51
|
+
def fog_address
|
52
|
+
address_str = self.cloud.public_ip or return
|
53
|
+
ClusterChef.fog_addresses[address_str]
|
54
|
+
end
|
55
|
+
|
56
|
+
def discover_volumes!
|
57
|
+
composite_volumes.each do |vol_name, vol|
|
58
|
+
my_vol = volumes[vol_name]
|
59
|
+
next if my_vol.fog_volume
|
60
|
+
my_vol.fog_volume = ClusterChef.fog_volumes.find do |fv|
|
61
|
+
( # matches the explicit volume id
|
62
|
+
(vol.volume_id && (fv.id == vol.volume_id) ) ||
|
63
|
+
# OR this server's machine exists, and this volume is attached to
|
64
|
+
# it, and in the right place
|
65
|
+
( fog_server && fv.server_id && vol.device &&
|
66
|
+
(fv.server_id == fog_server.id) &&
|
67
|
+
(fv.device.to_s == vol.device.to_s) ) ||
|
68
|
+
# OR this volume is tagged as belonging to this machine
|
69
|
+
( fv.tags.present? &&
|
70
|
+
(fv.tags['server'] == self.fullname) &&
|
71
|
+
(fv.tags['device'] == vol.device.to_s) )
|
72
|
+
)
|
73
|
+
end
|
74
|
+
next unless my_vol.fog_volume
|
75
|
+
my_vol.volume_id(my_vol.fog_volume.id) unless my_vol.volume_id.present?
|
76
|
+
my_vol.availability_zone(my_vol.fog_volume.availability_zone) unless my_vol.availability_zone.present?
|
77
|
+
check_server_id_pairing(my_vol.fog_volume, my_vol.desc)
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
81
|
+
def attach_volumes
|
82
|
+
return unless in_cloud?
|
83
|
+
discover_volumes!
|
84
|
+
return if composite_volumes.empty?
|
85
|
+
step(" attaching volumes")
|
86
|
+
composite_volumes.each do |vol_name, vol|
|
87
|
+
next if vol.volume_id.blank? || (vol.attachable != :ebs)
|
88
|
+
if (not vol.in_cloud?) then Chef::Log.debug("Volume not found: #{vol.desc}") ; next ; end
|
89
|
+
if (vol.has_server?) then check_server_id_pairing(vol.fog_volume, vol.desc) ; next ; end
|
90
|
+
step(" - attaching #{vol.desc} -- #{vol.inspect}", :blue)
|
91
|
+
safely do
|
92
|
+
vol.fog_volume.device = vol.device
|
93
|
+
vol.fog_volume.server = fog_server
|
94
|
+
end
|
95
|
+
end
|
96
|
+
end
|
97
|
+
|
98
|
+
def associate_public_ip
|
99
|
+
address = self.cloud.public_ip
|
100
|
+
return unless self.in_cloud? && address
|
101
|
+
desc = "elastic ip #{address} for #{self.fullname}"
|
102
|
+
if (fog_address && fog_address.server_id) then check_server_id_pairing(fog_address, desc) ; return ; end
|
103
|
+
safely do
|
104
|
+
step(" assigning #{desc}", :blue)
|
105
|
+
ClusterChef.fog_connection.associate_address(self.fog_server.id, address)
|
106
|
+
end
|
107
|
+
end
|
108
|
+
|
109
|
+
def check_server_id_pairing thing, desc
|
110
|
+
return unless thing && thing.server_id && self.in_cloud?
|
111
|
+
type_of_thing = thing.class.to_s.gsub(/.*::/,"")
|
112
|
+
if thing.server_id != self.fog_server.id
|
113
|
+
ui.warn "#{type_of_thing} mismatch: #{desc} is on #{thing.server_id} not #{self.fog_server.id}: #{thing.inspect.gsub(/\s+/m,' ')}"
|
114
|
+
false
|
115
|
+
else
|
116
|
+
Chef::Log.debug("#{type_of_thing} paired: #{desc}")
|
117
|
+
true
|
118
|
+
end
|
119
|
+
end
|
120
|
+
|
121
|
+
end
|
122
|
+
|
123
|
+
class ServerSlice
|
124
|
+
def sync_keypairs
|
125
|
+
step("ensuring keypairs exist")
|
126
|
+
keypairs = servers.map{|svr| [svr.cluster.cloud.keypair, svr.cloud.keypair] }.flatten.map(&:to_s).reject(&:blank?).uniq
|
127
|
+
keypairs = keypairs - ClusterChef.fog_keypairs.keys
|
128
|
+
keypairs.each do |keypair_name|
|
129
|
+
keypair_obj = ClusterChef::Ec2Keypair.create!(keypair_name)
|
130
|
+
ClusterChef.fog_keypairs[keypair_name] = keypair_obj
|
131
|
+
end
|
132
|
+
end
|
133
|
+
end
|
134
|
+
end
|
@@ -0,0 +1,110 @@
|
|
1
|
+
require 'fileutils'
|
2
|
+
|
3
|
+
module ClusterChef
|
4
|
+
#
|
5
|
+
# A private key -- chef client key, ssh key, etc.
|
6
|
+
#
|
7
|
+
# The key is a pro
|
8
|
+
class PrivateKey < ClusterChef::DslObject
|
9
|
+
attr_reader :name
|
10
|
+
attr_reader :proxy
|
11
|
+
attr_reader :on_update
|
12
|
+
|
13
|
+
#
|
14
|
+
# PrivateKey.new('bob')
|
15
|
+
#
|
16
|
+
# @yield a block, executed in caller's context, when the body is updated
|
17
|
+
# @yieldparam the updated body
|
18
|
+
def initialize(name, proxy=nil, &on_update)
|
19
|
+
super()
|
20
|
+
@name = name
|
21
|
+
@proxy = proxy
|
22
|
+
@on_update = on_update
|
23
|
+
end
|
24
|
+
|
25
|
+
def filename
|
26
|
+
File.join(key_dir, "#{name}.pem")
|
27
|
+
end
|
28
|
+
|
29
|
+
def save
|
30
|
+
return unless @body
|
31
|
+
if ClusterChef.chef_config[:dry_run]
|
32
|
+
Chef::Log.debug(" key #{name} - dry run, not writing out key")
|
33
|
+
return
|
34
|
+
end
|
35
|
+
ui.info( " key #{name} - writing to #{filename}" )
|
36
|
+
FileUtils.mkdir_p(File.dirname(filename))
|
37
|
+
File.open(filename, "w", 0600){|f| f.print( @body ) }
|
38
|
+
end
|
39
|
+
|
40
|
+
def load
|
41
|
+
return unless File.exists?(filename)
|
42
|
+
self.body = File.read(filename).chomp
|
43
|
+
end
|
44
|
+
|
45
|
+
def body=(content)
|
46
|
+
@body = content
|
47
|
+
on_update.call(content) if on_update
|
48
|
+
content
|
49
|
+
end
|
50
|
+
|
51
|
+
def self.create!(name, *args, &block)
|
52
|
+
obj = self.new(name, *args, &block)
|
53
|
+
obj.create_proxy!
|
54
|
+
obj
|
55
|
+
end
|
56
|
+
|
57
|
+
def to_s
|
58
|
+
[super[0..-2], @name, @proxy, @body.to_s[32..64], '...', @body.to_s[-60..-30]].join(" ").gsub(/[\r\n\t]+/,'') + '>'
|
59
|
+
end
|
60
|
+
end
|
61
|
+
|
62
|
+
class ChefClientKey < PrivateKey
|
63
|
+
def body
|
64
|
+
return @body if @body
|
65
|
+
if proxy && proxy.private_key && (not proxy.private_key.empty?)
|
66
|
+
@body = proxy.private_key
|
67
|
+
else
|
68
|
+
load
|
69
|
+
end
|
70
|
+
@body
|
71
|
+
end
|
72
|
+
|
73
|
+
def key_dir
|
74
|
+
Chef::Config.client_key_dir || '/tmp/client_keys'
|
75
|
+
end
|
76
|
+
end
|
77
|
+
|
78
|
+
class Ec2Keypair < PrivateKey
|
79
|
+
def body
|
80
|
+
return @body if @body
|
81
|
+
if proxy && proxy.private_key && (not proxy.private_key.empty?)
|
82
|
+
@body = proxy.private_key
|
83
|
+
else
|
84
|
+
load
|
85
|
+
end
|
86
|
+
@body
|
87
|
+
end
|
88
|
+
|
89
|
+
def create_proxy!
|
90
|
+
safely do
|
91
|
+
step(" key #{name} - creating", :green)
|
92
|
+
@proxy = ClusterChef.fog_connection.key_pairs.create(:name => name.to_s)
|
93
|
+
end
|
94
|
+
ClusterChef.fog_keypairs[name] = proxy
|
95
|
+
self.body = proxy.private_key
|
96
|
+
save
|
97
|
+
end
|
98
|
+
|
99
|
+
def key_dir
|
100
|
+
if Chef::Config.ec2_key_dir
|
101
|
+
return Chef::Config.ec2_key_dir
|
102
|
+
else
|
103
|
+
dir = "#{ENV['HOME']}/.chef/ec2_keys"
|
104
|
+
warn "Please set 'ec2_key_dir' in your knife.rb -- using #{dir} as a default"
|
105
|
+
dir
|
106
|
+
end
|
107
|
+
end
|
108
|
+
end
|
109
|
+
|
110
|
+
end
|