ironfan 3.1.0.rc1
Sign up to get free protection for your applications and to get access to all the features.
- data/.gitignore +51 -0
- data/.rspec +3 -0
- data/CHANGELOG.md +130 -0
- data/Gemfile +26 -0
- data/LICENSE.md +201 -0
- data/README.md +328 -0
- data/Rakefile +104 -0
- data/TODO.md +16 -0
- data/VERSION +1 -0
- data/chefignore +41 -0
- data/cluster_chef-knife.gemspec +123 -0
- data/cluster_chef.gemspec +111 -0
- data/config/client.rb +59 -0
- data/config/proxy.pac +12 -0
- data/config/ubuntu10.04-ironfan.erb +157 -0
- data/config/ubuntu11.10-ironfan.erb +145 -0
- data/ironfan.gemspec +121 -0
- data/lib/chef/knife/bootstrap/ubuntu10.04-ironfan.erb +157 -0
- data/lib/chef/knife/bootstrap/ubuntu11.10-ironfan.erb +145 -0
- data/lib/chef/knife/cluster_bootstrap.rb +74 -0
- data/lib/chef/knife/cluster_kick.rb +94 -0
- data/lib/chef/knife/cluster_kill.rb +73 -0
- data/lib/chef/knife/cluster_launch.rb +164 -0
- data/lib/chef/knife/cluster_list.rb +50 -0
- data/lib/chef/knife/cluster_proxy.rb +126 -0
- data/lib/chef/knife/cluster_show.rb +61 -0
- data/lib/chef/knife/cluster_ssh.rb +141 -0
- data/lib/chef/knife/cluster_start.rb +40 -0
- data/lib/chef/knife/cluster_stop.rb +43 -0
- data/lib/chef/knife/cluster_sync.rb +77 -0
- data/lib/chef/knife/generic_command.rb +66 -0
- data/lib/chef/knife/knife_common.rb +195 -0
- data/lib/ironfan.rb +143 -0
- data/lib/ironfan/chef_layer.rb +299 -0
- data/lib/ironfan/cloud.rb +412 -0
- data/lib/ironfan/cluster.rb +118 -0
- data/lib/ironfan/compute.rb +153 -0
- data/lib/ironfan/deprecated.rb +33 -0
- data/lib/ironfan/discovery.rb +177 -0
- data/lib/ironfan/dsl_object.rb +124 -0
- data/lib/ironfan/facet.rb +144 -0
- data/lib/ironfan/fog_layer.rb +150 -0
- data/lib/ironfan/private_key.rb +130 -0
- data/lib/ironfan/role_implications.rb +58 -0
- data/lib/ironfan/security_group.rb +119 -0
- data/lib/ironfan/server.rb +281 -0
- data/lib/ironfan/server_slice.rb +260 -0
- data/lib/ironfan/volume.rb +157 -0
- data/spec/ironfan/cluster_spec.rb +13 -0
- data/spec/ironfan/facet_spec.rb +69 -0
- data/spec/ironfan/server_slice_spec.rb +19 -0
- data/spec/ironfan/server_spec.rb +112 -0
- data/spec/ironfan_spec.rb +193 -0
- data/spec/spec_helper.rb +50 -0
- data/spec/spec_helper/dummy_chef.rb +25 -0
- data/spec/test_config.rb +20 -0
- data/tasks/chef_config.rake +38 -0
- data/tasks/jeweler_use_alt_branch.rake +53 -0
- metadata +217 -0
@@ -0,0 +1,118 @@
|
|
1
|
+
module Ironfan
|
2
|
+
#
|
3
|
+
# A cluster has many facets. Any setting applied here is merged with the facet
|
4
|
+
# at resolve time; if the facet explicitly sets any attributes they will win out.
|
5
|
+
#
|
6
|
+
class Cluster < Ironfan::ComputeBuilder
|
7
|
+
attr_reader :facets, :undefined_servers
|
8
|
+
|
9
|
+
def initialize(name, attrs={})
|
10
|
+
super(name.to_sym, attrs)
|
11
|
+
@facets = Mash.new
|
12
|
+
@chef_roles = []
|
13
|
+
environment :_default if environment.blank?
|
14
|
+
create_cluster_role
|
15
|
+
create_cluster_security_group unless attrs[:no_security_group]
|
16
|
+
end
|
17
|
+
|
18
|
+
def cluster
|
19
|
+
self
|
20
|
+
end
|
21
|
+
|
22
|
+
def cluster_name
|
23
|
+
name
|
24
|
+
end
|
25
|
+
|
26
|
+
# The auto-generated role for this cluster.
|
27
|
+
# Instance-evals the given block in the context of that role
|
28
|
+
#
|
29
|
+
# @example
|
30
|
+
# cluster_role do
|
31
|
+
# override_attributes({
|
32
|
+
# :time_machine => { :transition_speed => 88 },
|
33
|
+
# })
|
34
|
+
# end
|
35
|
+
#
|
36
|
+
# @return [Chef::Role] The auto-generated role for this facet.
|
37
|
+
def cluster_role(&block)
|
38
|
+
@cluster_role.instance_eval( &block ) if block_given?
|
39
|
+
@cluster_role
|
40
|
+
end
|
41
|
+
|
42
|
+
#
|
43
|
+
# Retrieve or define the given facet
|
44
|
+
#
|
45
|
+
# @param [String] facet_name -- name of the desired facet
|
46
|
+
# @param [Hash] attrs -- attributes to configure on the object
|
47
|
+
# @yield a block to execute in the context of the object
|
48
|
+
#
|
49
|
+
# @return [Ironfan::Facet]
|
50
|
+
#
|
51
|
+
def facet(facet_name, attrs={}, &block)
|
52
|
+
facet_name = facet_name.to_sym
|
53
|
+
@facets[facet_name] ||= Ironfan::Facet.new(self, facet_name)
|
54
|
+
@facets[facet_name].configure(attrs, &block)
|
55
|
+
@facets[facet_name]
|
56
|
+
end
|
57
|
+
|
58
|
+
def has_facet? facet_name
|
59
|
+
@facets.include?(facet_name)
|
60
|
+
end
|
61
|
+
|
62
|
+
def find_facet(facet_name)
|
63
|
+
@facets[facet_name] or raise("Facet '#{facet_name}' is not defined in cluster '#{cluster_name}'")
|
64
|
+
end
|
65
|
+
|
66
|
+
# All servers in this facet, sorted by facet name and index
|
67
|
+
#
|
68
|
+
# @return [Ironfan::ServerSlice] slice containing all servers
|
69
|
+
def servers
|
70
|
+
svrs = @facets.sort.map{|name, facet| facet.servers.to_a }
|
71
|
+
Ironfan::ServerSlice.new(self, svrs.flatten)
|
72
|
+
end
|
73
|
+
|
74
|
+
#
|
75
|
+
# A slice of a cluster:
|
76
|
+
#
|
77
|
+
# If +facet_name+ is nil, returns all servers.
|
78
|
+
# Otherwise, takes slice (given by +*args+) from the requested facet.
|
79
|
+
#
|
80
|
+
# @param [String] facet_name -- facet to slice (or nil for all in cluster)
|
81
|
+
# @param [Array, String] slice_indexes -- servers in that facet (or nil for all in facet).
|
82
|
+
# You must specify a facet if you use slice_indexes.
|
83
|
+
#
|
84
|
+
# @return [Ironfan::ServerSlice] the requested slice
|
85
|
+
def slice facet_name=nil, slice_indexes=nil
|
86
|
+
return Ironfan::ServerSlice.new(self, self.servers) if facet_name.nil?
|
87
|
+
find_facet(facet_name).slice(slice_indexes)
|
88
|
+
end
|
89
|
+
|
90
|
+
def to_s
|
91
|
+
"#{super[0..-3]} @facets=>#{@facets.keys.inspect}}>"
|
92
|
+
end
|
93
|
+
|
94
|
+
#
|
95
|
+
# Resolve:
|
96
|
+
#
|
97
|
+
def resolve!
|
98
|
+
facets.values.each(&:resolve!)
|
99
|
+
end
|
100
|
+
|
101
|
+
protected
|
102
|
+
|
103
|
+
# Create a security group named for the cluster
|
104
|
+
# that is friends with everything in the cluster
|
105
|
+
def create_cluster_security_group
|
106
|
+
clname = self.name # put it in scope
|
107
|
+
cloud.security_group(clname){ authorize_group(clname) }
|
108
|
+
end
|
109
|
+
|
110
|
+
# Creates a chef role named for the cluster
|
111
|
+
def create_cluster_role
|
112
|
+
@cluster_role_name = "#{name}_cluster"
|
113
|
+
@cluster_role = new_chef_role(@cluster_role_name, cluster)
|
114
|
+
role(@cluster_role_name, :own)
|
115
|
+
end
|
116
|
+
|
117
|
+
end
|
118
|
+
end
|
@@ -0,0 +1,153 @@
|
|
1
|
+
module Ironfan
|
2
|
+
#
|
3
|
+
# Base class allowing us to layer settings for facet over cluster
|
4
|
+
#
|
5
|
+
class ComputeBuilder < Ironfan::DslObject
|
6
|
+
attr_reader :cloud, :volumes, :chef_roles
|
7
|
+
has_keys :name, :bogosity, :environment
|
8
|
+
@@role_implications ||= Mash.new
|
9
|
+
@@run_list_rank ||= 0
|
10
|
+
|
11
|
+
def initialize(builder_name, attrs={})
|
12
|
+
super(attrs)
|
13
|
+
set :name, builder_name
|
14
|
+
@run_list_info = attrs[:run_list] || Mash.new
|
15
|
+
@volumes = Mash.new
|
16
|
+
end
|
17
|
+
|
18
|
+
# set the bogosity to a descriptive reason. Anything truthy implies bogusness
|
19
|
+
def bogus?
|
20
|
+
!! self.bogosity
|
21
|
+
end
|
22
|
+
|
23
|
+
# Magic method to produce cloud instance:
|
24
|
+
# * returns the cloud instance, creating it if necessary.
|
25
|
+
# * executes the block in the cloud's object context
|
26
|
+
#
|
27
|
+
# @example
|
28
|
+
# cloud do
|
29
|
+
# image_name 'maverick'
|
30
|
+
# security_group :nagios
|
31
|
+
# end
|
32
|
+
#
|
33
|
+
# # defines ec2-specific behavior
|
34
|
+
# cloud(:ec2) do
|
35
|
+
# public_ip '1.2.3.4'
|
36
|
+
# region 'us-east-1d'
|
37
|
+
# end
|
38
|
+
#
|
39
|
+
def cloud(cloud_provider=nil, attrs={}, &block)
|
40
|
+
raise "Only have ec2 so far" if cloud_provider && (cloud_provider != :ec2)
|
41
|
+
@cloud ||= Ironfan::Cloud::Ec2.new(self)
|
42
|
+
@cloud.configure(attrs, &block)
|
43
|
+
@cloud
|
44
|
+
end
|
45
|
+
|
46
|
+
# sugar for cloud(:ec2)
|
47
|
+
def ec2(attrs={}, &block)
|
48
|
+
cloud(:ec2, attrs, &block)
|
49
|
+
end
|
50
|
+
|
51
|
+
# Magic method to describe a volume
|
52
|
+
# * returns the named volume, creating it if necessary.
|
53
|
+
# * executes the block (if any) in the volume's context
|
54
|
+
#
|
55
|
+
# @example
|
56
|
+
# # a 1 GB volume at '/data' from the given snapshot
|
57
|
+
# volume(:data) do
|
58
|
+
# size 1
|
59
|
+
# mount_point '/data'
|
60
|
+
# snapshot_id 'snap-12345'
|
61
|
+
# end
|
62
|
+
#
|
63
|
+
# @param volume_name [String] an arbitrary handle -- you can use the device
|
64
|
+
# name, or a descriptive symbol.
|
65
|
+
# @param attrs [Hash] a hash of attributes to pass down.
|
66
|
+
#
|
67
|
+
def volume(volume_name, attrs={}, &block)
|
68
|
+
volumes[volume_name] ||= Ironfan::Volume.new(:parent => self, :name => volume_name)
|
69
|
+
volumes[volume_name].configure(attrs, &block)
|
70
|
+
volumes[volume_name]
|
71
|
+
end
|
72
|
+
|
73
|
+
def raid_group(rg_name, attrs={}, &block)
|
74
|
+
volumes[rg_name] ||= Ironfan::RaidGroup.new(:parent => self, :name => rg_name)
|
75
|
+
volumes[rg_name].configure(attrs, &block)
|
76
|
+
volumes[rg_name].sub_volumes.each do |sv_name|
|
77
|
+
volume(sv_name){ in_raid(rg_name) ; mountable(false) ; tags({}) }
|
78
|
+
end
|
79
|
+
volumes[rg_name]
|
80
|
+
end
|
81
|
+
|
82
|
+
def root_volume(attrs={}, &block)
|
83
|
+
volume(:root, attrs, &block)
|
84
|
+
end
|
85
|
+
|
86
|
+
#
|
87
|
+
# Adds the given role to the run list, and invokes any role_implications it
|
88
|
+
# implies (for instance, defining and applying the 'ssh' security group if
|
89
|
+
# the 'ssh' role is applied.)
|
90
|
+
#
|
91
|
+
# You can specify placement of `:first`, `:normal` (or nil) or `:last`; the
|
92
|
+
# final runlist is assembled as
|
93
|
+
#
|
94
|
+
# * run_list :first items -- cluster, then facet, then server
|
95
|
+
# * run_list :normal items -- cluster, then facet, then server
|
96
|
+
# * run_list :last items -- cluster, then facet, then server
|
97
|
+
#
|
98
|
+
# (see Ironfan::Server#combined_run_list for full details though)
|
99
|
+
#
|
100
|
+
def role(role_name, placement=nil)
|
101
|
+
add_to_run_list("role[#{role_name}]", placement)
|
102
|
+
self.instance_eval(&@@role_implications[role_name]) if @@role_implications[role_name]
|
103
|
+
end
|
104
|
+
|
105
|
+
# Add the given recipe to the run list. You can specify placement of
|
106
|
+
# `:first`, `:normal` (or nil) or `:last`; the final runlist is assembled as
|
107
|
+
#
|
108
|
+
# * run_list :first items -- cluster, then facet, then server
|
109
|
+
# * run_list :normal items -- cluster, then facet, then server
|
110
|
+
# * run_list :last items -- cluster, then facet, then server
|
111
|
+
#
|
112
|
+
# (see Ironfan::Server#combined_run_list for full details though)
|
113
|
+
#
|
114
|
+
def recipe(name, placement=nil)
|
115
|
+
add_to_run_list(name, placement)
|
116
|
+
end
|
117
|
+
|
118
|
+
# Roles and recipes for this element only.
|
119
|
+
#
|
120
|
+
# See Ironfan::Server#combined_run_list for run_list order resolution
|
121
|
+
def run_list
|
122
|
+
groups = run_list_groups
|
123
|
+
[ groups[:first], groups[:normal], groups[:last] ].flatten.compact.uniq
|
124
|
+
end
|
125
|
+
|
126
|
+
# run list elements grouped into :first, :normal and :last
|
127
|
+
def run_list_groups
|
128
|
+
@run_list_info.keys.sort_by{|item| @run_list_info[item][:rank] }.group_by{|item| @run_list_info[item][:placement] }
|
129
|
+
end
|
130
|
+
|
131
|
+
#
|
132
|
+
# Some roles imply aspects of the machine that have to exist at creation.
|
133
|
+
# For instance, on an ec2 machine you may wish the 'ssh' role to imply a
|
134
|
+
# security group explicity opening port 22.
|
135
|
+
#
|
136
|
+
# @param [String] role_name -- the role that triggers the block
|
137
|
+
# @yield block will be instance_eval'd in the object that calls 'role'
|
138
|
+
#
|
139
|
+
def self.role_implication(name, &block)
|
140
|
+
@@role_implications[name] = block
|
141
|
+
end
|
142
|
+
|
143
|
+
protected
|
144
|
+
|
145
|
+
def add_to_run_list(item, placement)
|
146
|
+
raise "run_list placement must be one of :first, :normal, :last or nil (also means :normal)" unless [:first, :last, :own, nil].include?(placement)
|
147
|
+
@@run_list_rank += 1
|
148
|
+
placement ||= :normal
|
149
|
+
@run_list_info[item] ||= { :rank => @@run_list_rank, :placement => placement }
|
150
|
+
end
|
151
|
+
|
152
|
+
end
|
153
|
+
end
|
@@ -0,0 +1,33 @@
|
|
1
|
+
module Ironfan
|
2
|
+
|
3
|
+
class Cluster
|
4
|
+
#
|
5
|
+
# **DEPRECATED**: This doesn't really work -- use +reverse_merge!+ instead
|
6
|
+
#
|
7
|
+
def use(*clusters)
|
8
|
+
ui.warn "The 'use' statement is deprecated #{caller.inspect}"
|
9
|
+
clusters.each do |c|
|
10
|
+
other_cluster = Ironfan.load_cluster(c)
|
11
|
+
reverse_merge! other_cluster
|
12
|
+
end
|
13
|
+
self
|
14
|
+
end
|
15
|
+
|
16
|
+
end
|
17
|
+
|
18
|
+
class Server
|
19
|
+
# **DEPRECATED**: Please use +fullname+ instead.
|
20
|
+
def chef_node_name name
|
21
|
+
ui.warn "[DEPRECATION] `chef_node_name` is deprecated. Please use `fullname` instead."
|
22
|
+
fullname name
|
23
|
+
end
|
24
|
+
end
|
25
|
+
|
26
|
+
class Cloud::Ec2
|
27
|
+
# **DEPRECATED**: Please use +public_ip+ instead.
|
28
|
+
def elastic_ip(*args, &block)
|
29
|
+
public_ip(*args, &block)
|
30
|
+
end
|
31
|
+
end
|
32
|
+
|
33
|
+
end
|
@@ -0,0 +1,177 @@
|
|
1
|
+
module Ironfan
|
2
|
+
class Cluster
|
3
|
+
|
4
|
+
def discover!
|
5
|
+
@aws_instance_hash = {}
|
6
|
+
discover_ironfan!
|
7
|
+
discover_chef_nodes!
|
8
|
+
discover_fog_servers! unless Ironfan.chef_config[:cloud] == false
|
9
|
+
discover_chef_clients!
|
10
|
+
discover_volumes!
|
11
|
+
end
|
12
|
+
|
13
|
+
def chef_clients
|
14
|
+
return @chef_clients if @chef_clients
|
15
|
+
@chef_clients = []
|
16
|
+
|
17
|
+
# Oh for fuck's sake -- the key used to index clients changed from
|
18
|
+
# 'clientname' in 0.10.4-and-prev to 'name' in 0.10.8. Rather than index
|
19
|
+
# both 'clientname' and 'name', they switched it -- so we have to fall
|
20
|
+
# back. FIXME: While the Opscode platform is 0.10.4 I have clientname
|
21
|
+
# first (sorry, people of the future). When it switches to 0.10.8 we'll
|
22
|
+
# reverse them (suck it people of the past).
|
23
|
+
# Also sometimes the server returns results that are nil on
|
24
|
+
# recently-expired clients, so that's annoying too.
|
25
|
+
clients, wtf, num = Chef::Search::Query.new.search(:client, "clientname:#{cluster_name}-*") ; clients.compact!
|
26
|
+
clients, wtf, num = Chef::Search::Query.new.search(:client, "name:#{cluster_name}-*") if clients.blank?
|
27
|
+
clients.each do |client_hsh|
|
28
|
+
next if client_hsh.nil?
|
29
|
+
# Return values from Chef::Search seem to be inconsistent across chef
|
30
|
+
# versions (sometimes a hash, sometimes an object). Fix if necessary.
|
31
|
+
client_hsh = Chef::ApiClient.json_create(client_hsh) unless client_hsh.is_a?(Chef::ApiClient)
|
32
|
+
@chef_clients.push( client_hsh )
|
33
|
+
end
|
34
|
+
@chef_clients
|
35
|
+
end
|
36
|
+
|
37
|
+
# returns client with the given name if in catalog, nil otherwise
|
38
|
+
def find_client(cl_name)
|
39
|
+
chef_clients.find{|ccl| ccl.name == cl_name }
|
40
|
+
end
|
41
|
+
|
42
|
+
def chef_nodes
|
43
|
+
return @chef_nodes if @chef_nodes
|
44
|
+
@chef_nodes = []
|
45
|
+
Chef::Search::Query.new.search(:node,"cluster_name:#{cluster_name}") do |n|
|
46
|
+
@chef_nodes.push(n) unless n.blank? || (n.cluster_name != cluster_name.to_s)
|
47
|
+
end
|
48
|
+
@chef_nodes
|
49
|
+
end
|
50
|
+
|
51
|
+
# returns node with the given name if in catalog, nil otherwise
|
52
|
+
def find_node(nd_name)
|
53
|
+
chef_nodes.find{|nd| nd.name == nd_name }
|
54
|
+
end
|
55
|
+
|
56
|
+
protected
|
57
|
+
|
58
|
+
def fog_servers
|
59
|
+
@fog_servers ||= Ironfan.fog_servers.select{|fs| fs.key_name == cluster_name.to_s && (fs.state != "terminated") }
|
60
|
+
end
|
61
|
+
|
62
|
+
# Walk the list of chef nodes and
|
63
|
+
# * vivify the server,
|
64
|
+
# * associate the chef node
|
65
|
+
# * if the chef node knows about its instance id, memorize that for lookup
|
66
|
+
# when we discover cloud instances.
|
67
|
+
def discover_chef_nodes!
|
68
|
+
chef_nodes.each do |chef_node|
|
69
|
+
if chef_node["cluster_name"] && chef_node["facet_name"] && chef_node["facet_index"]
|
70
|
+
cluster_name = chef_node["cluster_name"]
|
71
|
+
facet_name = chef_node["facet_name"]
|
72
|
+
facet_index = chef_node["facet_index"]
|
73
|
+
elsif chef_node.name
|
74
|
+
( cluster_name, facet_name, facet_index ) = chef_node.name.split(/-/)
|
75
|
+
else
|
76
|
+
next
|
77
|
+
end
|
78
|
+
svr = Ironfan::Server.get(cluster_name, facet_name, facet_index)
|
79
|
+
svr.chef_node = chef_node
|
80
|
+
@aws_instance_hash[ chef_node.ec2.instance_id ] = svr if chef_node[:ec2] && chef_node.ec2.instance_id
|
81
|
+
end
|
82
|
+
end
|
83
|
+
|
84
|
+
# Walk the list of servers, asking each to discover its chef client.
|
85
|
+
def discover_chef_clients!
|
86
|
+
servers.each(&:chef_client)
|
87
|
+
end
|
88
|
+
|
89
|
+
# calling #servers vivifies each facet's Ironfan::Server instances
|
90
|
+
def discover_ironfan!
|
91
|
+
self.servers
|
92
|
+
end
|
93
|
+
|
94
|
+
def discover_fog_servers!
|
95
|
+
# If the fog server is tagged with cluster/facet/index, then try to
|
96
|
+
# locate the corresponding machine in the cluster def
|
97
|
+
# Otherwise, try to get to it through mapping the aws instance id
|
98
|
+
# to the chef node name found in the chef node
|
99
|
+
fog_servers.each do |fs|
|
100
|
+
if fs.tags["cluster"] && fs.tags["facet"] && fs.tags["index"] && fs.tags["cluster"] == cluster_name.to_s
|
101
|
+
svr = Ironfan::Server.get(fs.tags["cluster"], fs.tags["facet"], fs.tags["index"])
|
102
|
+
elsif @aws_instance_hash[fs.id]
|
103
|
+
svr = @aws_instance_hash[fs.id]
|
104
|
+
else
|
105
|
+
next
|
106
|
+
end
|
107
|
+
|
108
|
+
# If there already is a fog server there, then issue a warning and slap
|
109
|
+
# the just-discovered one onto a server with an arbitrary index, and
|
110
|
+
# mark both bogus
|
111
|
+
if existing_fs = svr.fog_server
|
112
|
+
if existing_fs.id != fs.id
|
113
|
+
ui.warn "Duplicate fog instance found for #{svr.fullname}: #{fs.id} and #{existing_fs.id}!!"
|
114
|
+
old_svr = svr
|
115
|
+
svr = old_svr.facet.server(1_000 + svr.facet_index.to_i)
|
116
|
+
old_svr.bogosity :duplicate
|
117
|
+
svr.bogosity :duplicate
|
118
|
+
end
|
119
|
+
end
|
120
|
+
svr.fog_server = fs
|
121
|
+
end
|
122
|
+
end
|
123
|
+
|
124
|
+
def discover_volumes!
|
125
|
+
servers.each(&:discover_volumes!)
|
126
|
+
end
|
127
|
+
|
128
|
+
def discover_addresses!
|
129
|
+
servers.each(&:discover_addresses!)
|
130
|
+
end
|
131
|
+
|
132
|
+
end # Ironfan::Cluster
|
133
|
+
end
|
134
|
+
|
135
|
+
module Ironfan
|
136
|
+
|
137
|
+
def self.fog_connection
|
138
|
+
@fog_connection ||= Fog::Compute.new({
|
139
|
+
:provider => 'AWS',
|
140
|
+
:aws_access_key_id => Chef::Config[:knife][:aws_access_key_id],
|
141
|
+
:aws_secret_access_key => Chef::Config[:knife][:aws_secret_access_key],
|
142
|
+
:region => Chef::Config[:knife][:region]
|
143
|
+
})
|
144
|
+
end
|
145
|
+
|
146
|
+
def self.fog_servers
|
147
|
+
return @fog_servers if @fog_servers
|
148
|
+
Chef::Log.debug("Using fog to catalog all servers")
|
149
|
+
@fog_servers = Ironfan.fog_connection.servers.all
|
150
|
+
end
|
151
|
+
|
152
|
+
def self.fog_addresses
|
153
|
+
return @fog_addresses if @fog_addresses
|
154
|
+
Chef::Log.debug("Using fog to catalog all addresses")
|
155
|
+
@fog_addresses = {}.tap{|hsh| Ironfan.fog_connection.addresses.each{|fa| hsh[fa.public_ip] = fa } }
|
156
|
+
end
|
157
|
+
|
158
|
+
def self.fog_volumes
|
159
|
+
@fog_volumes || fetch_fog_volumes
|
160
|
+
end
|
161
|
+
|
162
|
+
def self.fetch_fog_volumes
|
163
|
+
Chef::Log.debug("Using fog to catalog all volumes")
|
164
|
+
@fog_volumes = Ironfan.fog_connection.volumes
|
165
|
+
end
|
166
|
+
|
167
|
+
def self.fog_keypairs
|
168
|
+
return @fog_keypairs if @fog_keypairs
|
169
|
+
Chef::Log.debug("Using fog to catalog all keypairs")
|
170
|
+
@fog_keypairs = {}.tap{|hsh| Ironfan.fog_connection.key_pairs.each{|kp| hsh[kp.name] = kp } }
|
171
|
+
end
|
172
|
+
|
173
|
+
def safely *args, &block
|
174
|
+
Ironfan.safely(*args, &block)
|
175
|
+
end
|
176
|
+
|
177
|
+
end
|