ironfan 3.2.2 → 4.0.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (66) hide show
  1. data/CHANGELOG.md +5 -0
  2. data/VERSION +1 -1
  3. data/ironfan.gemspec +33 -20
  4. data/lib/chef/knife/cluster_kick.rb +17 -17
  5. data/lib/chef/knife/cluster_kill.rb +13 -7
  6. data/lib/chef/knife/cluster_launch.rb +60 -66
  7. data/lib/chef/knife/cluster_pry.rb +2 -2
  8. data/lib/chef/knife/cluster_show.rb +3 -6
  9. data/lib/chef/knife/cluster_ssh.rb +5 -11
  10. data/lib/chef/knife/cluster_start.rb +2 -4
  11. data/lib/chef/knife/cluster_stop.rb +1 -3
  12. data/lib/chef/knife/cluster_sync.rb +13 -21
  13. data/lib/chef/knife/ironfan_knife_common.rb +11 -9
  14. data/lib/chef/knife/ironfan_script.rb +2 -1
  15. data/lib/gorillib/resolution.rb +119 -0
  16. data/lib/ironfan/broker/computer.rb +316 -0
  17. data/lib/ironfan/broker/drive.rb +21 -0
  18. data/lib/ironfan/broker.rb +37 -0
  19. data/lib/ironfan/builder.rb +14 -0
  20. data/lib/ironfan/deprecated.rb +16 -58
  21. data/lib/ironfan/dsl/cloud.rb +21 -0
  22. data/lib/ironfan/dsl/cluster.rb +27 -0
  23. data/lib/ironfan/dsl/compute.rb +84 -0
  24. data/lib/ironfan/dsl/ec2.rb +260 -0
  25. data/lib/ironfan/dsl/facet.rb +25 -0
  26. data/lib/ironfan/dsl/role.rb +19 -0
  27. data/lib/ironfan/dsl/server.rb +31 -0
  28. data/lib/ironfan/dsl/virtualbox.rb +8 -0
  29. data/lib/ironfan/dsl/volume.rb +45 -0
  30. data/lib/ironfan/dsl.rb +7 -0
  31. data/lib/ironfan/headers.rb +58 -0
  32. data/lib/ironfan/provider/chef/client.rb +77 -0
  33. data/lib/ironfan/provider/chef/node.rb +133 -0
  34. data/lib/ironfan/provider/chef/role.rb +69 -0
  35. data/lib/ironfan/provider/chef.rb +28 -0
  36. data/lib/ironfan/provider/ec2/ebs_volume.rb +137 -0
  37. data/lib/ironfan/provider/ec2/elastic_ip.rb +10 -0
  38. data/lib/ironfan/provider/ec2/key_pair.rb +65 -0
  39. data/lib/ironfan/provider/ec2/machine.rb +258 -0
  40. data/lib/ironfan/provider/ec2/placement_group.rb +24 -0
  41. data/lib/ironfan/provider/ec2/security_group.rb +118 -0
  42. data/lib/ironfan/provider/ec2.rb +47 -0
  43. data/lib/ironfan/provider/virtualbox/machine.rb +10 -0
  44. data/lib/ironfan/provider/virtualbox.rb +8 -0
  45. data/lib/ironfan/provider.rb +139 -0
  46. data/lib/ironfan/requirements.rb +52 -0
  47. data/lib/ironfan.rb +44 -33
  48. metadata +34 -21
  49. data/lib/chef/knife/cluster_vagrant.rb +0 -144
  50. data/lib/chef/knife/vagrant/ironfan_environment.rb +0 -18
  51. data/lib/chef/knife/vagrant/ironfan_provisioners.rb +0 -27
  52. data/lib/chef/knife/vagrant/skeleton_vagrantfile.rb +0 -119
  53. data/lib/ironfan/chef_layer.rb +0 -300
  54. data/lib/ironfan/cloud.rb +0 -323
  55. data/lib/ironfan/cluster.rb +0 -118
  56. data/lib/ironfan/compute.rb +0 -139
  57. data/lib/ironfan/discovery.rb +0 -190
  58. data/lib/ironfan/dsl_builder.rb +0 -99
  59. data/lib/ironfan/facet.rb +0 -143
  60. data/lib/ironfan/fog_layer.rb +0 -196
  61. data/lib/ironfan/private_key.rb +0 -130
  62. data/lib/ironfan/role_implications.rb +0 -58
  63. data/lib/ironfan/security_group.rb +0 -133
  64. data/lib/ironfan/server.rb +0 -291
  65. data/lib/ironfan/server_slice.rb +0 -265
  66. data/lib/ironfan/volume.rb +0 -146
@@ -1,291 +0,0 @@
1
- module Ironfan
2
-
3
- #
4
- # A server is a specific (logical) member of a facet within a cluster.
5
- #
6
- # It may have extra attributes if it also exists in the Chef server,
7
- # or if it exists in the real world (as revealed by Fog)
8
- #
9
- class Server < Ironfan::ComputeBuilder
10
- magic :cluster, Cluster
11
- magic :facet, Facet
12
- magic :facet_index, Integer
13
- attr_reader :tags
14
-
15
- magic :chef_node, Whatever, :default => -> owner,name { owner.cluster.find_node(name) || false }
16
- attr_accessor :fog_server
17
-
18
- @@all ||= Mash.new
19
-
20
- def initialize facet, idx
21
- cluster facet.cluster
22
- facet facet
23
- facet_index idx
24
- @fullname = [cluster_name, facet_name, facet_index].join('-')
25
- super(@fullname)
26
- @tags = { "name" => name, "cluster" => cluster_name, "facet" => facet_name, "index" => facet_index, }
27
- ui.warn("Duplicate server #{[self, facet.name, idx]} vs #{@@all[fullname]}") if @@all[fullname]
28
- @@all[fullname] = self
29
- end
30
-
31
- def fullname fn=nil
32
- @fullname = fn if fn
33
- @fullname
34
- end
35
-
36
- def cluster_name
37
- cluster.name
38
- end
39
-
40
- def facet_name
41
- facet.name
42
- end
43
-
44
- def servers
45
- Ironfan::ServerSlice.new(cluster, [self])
46
- end
47
-
48
- # def bogosity val=nil
49
- # @settings[:bogosity] = val if not val.nil?
50
- # return @settings[:bogosity] if not @settings[:bogosity].nil?
51
- # return :bogus_facet if facet.bogus?
52
- # # return :out_of_range if (self.facet_index.to_i >= facet.instances)
53
- # false
54
- # end
55
-
56
- def in_cloud?
57
- !! fog_server
58
- end
59
-
60
- def in_chef?
61
- chef_node || chef_client
62
- end
63
-
64
- def has_cloud_state?(*states)
65
- in_cloud? && states.flatten.include?(fog_server.state)
66
- end
67
-
68
- def exists?
69
- created? || in_chef?
70
- end
71
-
72
- def created?
73
- in_cloud? && (not ['terminated', 'shutting-down'].include?(fog_server.state))
74
- end
75
-
76
- def running?
77
- has_cloud_state?('running')
78
- end
79
-
80
- def startable?
81
- has_cloud_state?('stopped')
82
- end
83
-
84
- def launchable?
85
- not created?
86
- end
87
-
88
- def sshable?
89
- in_chef?
90
- end
91
-
92
- def permanent?
93
- [true, :true, 'true'].include?(self.cloud.permanent)
94
- end
95
-
96
- def killable?
97
- return false if permanent?
98
- in_chef? || created?
99
- end
100
-
101
- def to_s
102
- super[0..-3] + " chef: #{in_chef? && chef_node.name} fog: #{in_cloud? && fog_server.id}}>"
103
- end
104
-
105
- #
106
- # Attributes
107
- #
108
-
109
- def tag key, value=nil
110
- if value then @tags[key] = value ; end
111
- @tags[key]
112
- end
113
-
114
- def public_hostname
115
- give_me_a_hostname_from_one_of_these_seven_ways_you_assholes
116
- end
117
-
118
- def chef_server_url() Chef::Config.chef_server_url ; end
119
- def validation_client_name() Chef::Config.validation_client_name ; end
120
- def validation_key() Chef::Config.validation_key ; end
121
- def organization() Chef::Config.organization ; end
122
- #
123
- # Resolve:
124
- #
125
- def resolve!
126
- facet.underlay = cluster
127
- self.underlay = facet
128
-
129
- facet.cloud.underlay = cluster.cloud
130
- cloud.underlay = facet.cloud
131
-
132
- cloud.user_data({
133
- :chef_server => chef_server_url,
134
- :validation_client_name => validation_client_name,
135
- #
136
- :node_name => fullname,
137
- :organization => organization,
138
- :cluster_name => cluster_name,
139
- :facet_name => facet_name,
140
- :facet_index => facet_index,
141
- #
142
- :run_list => combined_run_list,
143
- })
144
- #
145
- cloud.keypair(cluster_name) if cloud.keypair.nil?
146
- #
147
- self
148
- end
149
-
150
- #
151
- # Assembles the combined runlist.
152
- #
153
- # * run_list :first items -- cluster then facet then server
154
- # * run_list :normal items -- cluster then facet then server
155
- # * own roles: cluster_role then facet_role
156
- # * run_list :last items -- cluster then facet then server
157
- #
158
- # Ironfan.cluster(:my_cluster) do
159
- # role('f', :last)
160
- # role('c')
161
- # facet(:my_facet) do
162
- # role('d')
163
- # role('e')
164
- # role('b', :first)
165
- # role('h', :last)
166
- # end
167
- # role('a', :first)
168
- # role('g', :last)
169
- # end
170
- #
171
- # produces
172
- # cluster list [a] [c] [cluster_role] [fg]
173
- # facet list [b] [de] [facet_role] [h]
174
- #
175
- # yielding run_list
176
- # ['a', 'b', 'c', 'd', 'e', 'cr', 'fr', 'f', 'g', 'h']
177
- #
178
- # Avoid duplicate conflicting declarations. If you say define things more
179
- # than once, the *earliest encountered* one wins, even if it is elsewhere
180
- # marked :last.
181
- #
182
- def combined_run_list
183
- cg = @cluster.run_list_groups
184
- fg = @facet.run_list_groups
185
- sg = self.run_list_groups
186
- [ cg[:first], fg[:first], sg[:first],
187
- cg[:normal], fg[:normal], sg[:normal],
188
- cg[:own], fg[:own],
189
- cg[:last], fg[:last], sg[:last], ].flatten.compact.uniq
190
- end
191
-
192
- # FIXME -- this will break on some edge case wehre a bogus node is
193
- # discovered after everything is resolve!d
194
- def default_availability_zone
195
- cloud.default_availability_zone
196
- end
197
-
198
- #
199
- # retrieval
200
- #
201
- def self.get(cluster_name, facet_name, facet_index)
202
- cluster = Ironfan.cluster(cluster_name)
203
- had_facet = cluster.has_facet?(facet_name)
204
- facet = cluster.facet(facet_name)
205
- facet.bogosity true unless had_facet
206
- had_server = facet.has_server?( facet_index )
207
- server = facet.server(facet_index)
208
- server.bogosity :not_defined_in_facet unless had_server
209
- return server
210
- end
211
-
212
- def self.all
213
- @@all
214
- end
215
-
216
- #
217
- # Actions!
218
- #
219
-
220
- def sync_to_cloud
221
- step "Syncing to cloud"
222
- attach_volumes
223
- create_tags
224
- associate_public_ip
225
- ensure_placement_group
226
- set_instance_attributes
227
- end
228
-
229
- def sync_to_chef
230
- step "Syncing to chef server"
231
- sync_chef_node
232
- true
233
- end
234
-
235
- # FIXME: a lot of AWS logic in here. This probably lives in the facet.cloud
236
- # but for the one or two things that come from the facet
237
- def create_server
238
- return nil if created? # only create a server if it does not already exist
239
- fog_create_server
240
- end
241
-
242
- def create_tags
243
- return unless created?
244
- step(" labeling servers and volumes")
245
- fog_create_tags(fog_server, self.fullname, tags)
246
- volumes.each_pair do |vol_name, vol|
247
- if vol.fog_volume
248
- fog_create_tags(vol.fog_volume, vol.desc,
249
- { "server" => self.fullname, "name" => "#{name}-#{vol.name}", "device" => vol.device, "mount_point" => vol.mount_point, "cluster" => cluster_name, "facet" => facet_name, "index" => facet_index, })
250
- end
251
- end
252
- end
253
-
254
- def block_device_mapping
255
- volumes.values.map(&:block_device_mapping).compact
256
- end
257
-
258
- # ugh. non-dry below.
259
-
260
- def announce_as_started
261
- return unless chef_node
262
- announce_state('start')
263
- chef_node.save
264
- end
265
-
266
- def announce_as_stopped
267
- return unless chef_node
268
- announce_state('stop')
269
- chef_node.save
270
- end
271
-
272
- protected
273
-
274
- def give_me_a_hostname_from_one_of_these_seven_ways_you_assholes
275
- # note: there are not actually seven ways. That is the least absurd part of this situation.
276
- case
277
- when cloud.public_ip
278
- cloud.public_ip
279
- when fog_server && fog_server.respond_to?(:public_ip_address) && fog_server.public_ip_address.present?
280
- fog_server.public_ip_address
281
- when fog_server && fog_server.respond_to?(:ipaddress)
282
- fog_server.ipaddress
283
- when fog_server && fog_server.respond_to?(:dns_name)
284
- fog_server.dns_name
285
- else
286
- nil
287
- end
288
- end
289
-
290
- end
291
- end
@@ -1,265 +0,0 @@
1
- module Ironfan
2
- #
3
- # A server group is a set of actual or implied servers.
4
- #
5
- # The idea is we want to be able to smoothly roll up settings
6
- #
7
- #
8
- class ServerSlice < Ironfan::DslBuilderCollection
9
- attr_accessor :name
10
- attr_accessor :cluster
11
-
12
- def initialize cluster, servers
13
- @item_type = Ironfan::Server
14
- @key_method = :name
15
- super()
16
- self.name = "#{cluster.name} slice"
17
- self.cluster = cluster
18
- receive!(servers)
19
- end
20
-
21
- def servers
22
- @clxn.values
23
- end
24
-
25
- [:select, :find_all, :reject, :detect, :find, :drop_while].each do |method|
26
- define_method(method) do |*args, &block|
27
- ServerSlice.new cluster, servers.send(method, *args, &block)
28
- end
29
- end
30
- # true if slice contains a server with the given fullname (if arg is a
31
- # string) or same fullname as the given server (if a Server)
32
- #
33
- # @overload include?(server_fullname)
34
- # @param [String] server_fullname checks for a server with that fullname
35
- # @overload include?(server)
36
- # @param [Ironfan::Server] server checks for server with same fullname
37
- def include?(server)
38
- fullname = server.is_a?(String) ? server : server.fullname
39
- @servers.any?{|svr| svr.fullname == fullname }
40
- end
41
-
42
- # Return the collection of servers that are not yet 'created'
43
- def uncreated_servers
44
- select{|svr| not svr.created? }
45
- end
46
-
47
- def bogus_servers
48
- select(&:bogus?)
49
- end
50
-
51
- #
52
- # Info!
53
- #
54
-
55
- def chef_nodes
56
- servers.map(&:chef_node).compact
57
- end
58
-
59
- def fog_servers
60
- servers.map(&:fog_server).compact
61
- end
62
-
63
- def security_groups
64
- sg = {}
65
- servers.each{|svr| sg.merge!(svr.cloud.security_groups) }
66
- sg
67
- end
68
-
69
- def facets
70
- servers.map(&:facet)
71
- end
72
-
73
- def chef_roles
74
- [ cluster.chef_roles, facets.map(&:chef_roles) ].flatten.compact.uniq
75
- end
76
-
77
- # hack -- take the ssh_identity_file from the first server.
78
- def ssh_identity_file
79
- return if servers.empty?
80
- servers.first.cloud.ssh_identity_file
81
- end
82
-
83
- #
84
- # Actions!
85
- #
86
-
87
- def start
88
- delegate_to_fog_servers( :start )
89
- delegate_to_fog_servers( :reload )
90
- end
91
-
92
- def stop
93
- delegate_to_fog_servers( :stop )
94
- delegate_to_fog_servers( :reload )
95
- end
96
-
97
- def destroy
98
- delegate_to_fog_servers( :destroy )
99
- delegate_to_fog_servers( :reload )
100
- end
101
-
102
- def reload
103
- delegate_to_fog_servers( :reload )
104
- end
105
-
106
- def create_servers
107
- delegate_to_servers( :create_server )
108
- end
109
-
110
- def delete_chef
111
- delegate_to_servers( :delete_chef, true )
112
- end
113
-
114
- def sync_to_cloud
115
- sync_keypairs
116
- sync_security_groups
117
- delegate_to_servers( :sync_to_cloud )
118
- end
119
-
120
- def sync_to_chef
121
- sync_roles
122
- delegate_to_servers( :sync_to_chef )
123
- end
124
-
125
- #
126
- # Display!
127
- #
128
-
129
- # FIXME: this is a jumble. we need to pass it in some other way.
130
-
131
- MINIMAL_HEADINGS = ["Name", "Chef?", "State", "InstanceID", "Public IP", "Private IP", "Created At"].to_set.freeze
132
- DEFAULT_HEADINGS = (MINIMAL_HEADINGS + ['Flavor', 'AZ', 'Env']).freeze
133
- EXPANDED_HEADINGS = DEFAULT_HEADINGS + ['Image', 'Volumes', 'Elastic IP', 'SSH Key']
134
-
135
- MACHINE_STATE_COLORS = {
136
- 'running' => :green,
137
- 'pending' => :yellow,
138
- 'stopping' => :magenta,
139
- 'shutting-down' => :magenta,
140
- 'stopped' => :cyan,
141
- 'terminated' => :blue,
142
- 'not running' => :blue,
143
- }
144
-
145
- #
146
- # This is a generic display routine for cluster-like sets of nodes. If you
147
- # call it with no args, you get the basic table that knife cluster show
148
- # draws. If you give it an array of strings, you can override the order and
149
- # headings displayed. If you also give it a block you can add your own logic
150
- # for generating content. The block is given a Ironfan::Server instance
151
- # for each item in the collection and should return a hash of Name,Value
152
- # pairs to merge into the minimal fields.
153
- #
154
- def display hh = :default
155
- headings =
156
- case hh
157
- when :minimal then MINIMAL_HEADINGS
158
- when :default then DEFAULT_HEADINGS
159
- when :expanded then EXPANDED_HEADINGS
160
- else hh.to_set end
161
- headings += ["Bogus"] if servers.any?(&:bogus?)
162
- # probably not necessary any more
163
- # servers = servers.sort{ |a,b| (a.facet_name <=> b.facet_name) *9 + (a.facet_index.to_i <=> b.facet_index.to_i)*3 + (a.facet_index <=> b.facet_index) }
164
- defined_data = servers.map do |svr|
165
- hsh = {
166
- "Name" => svr.fullname,
167
- "Facet" => svr.facet_name,
168
- "Index" => svr.facet_index,
169
- "Chef?" => (svr.chef_node? ? "yes" : "[red]no[reset]"),
170
- "Bogus" => (svr.bogus? ? "[red]#{svr.bogosity}[reset]" : ''),
171
- "Env" => svr.environment,
172
- }
173
- # if (cs = svr.chef_server)
174
- # hsh.merge!(
175
- # "Env" => cs.environment,
176
- # )
177
- # end
178
- if (fs = svr.fog_server)
179
- hsh.merge!(
180
- "InstanceID" => (fs.id && fs.id.length > 0) ? fs.id : "???",
181
- "Flavor" => fs.flavor_id,
182
- "Image" => fs.image_id,
183
- "AZ" => fs.availability_zone,
184
- "SSH Key" => fs.key_name,
185
- "State" => "[#{MACHINE_STATE_COLORS[fs.state] || 'white'}]#{fs.state}[reset]",
186
- "Public IP" => fs.public_ip_address,
187
- "Private IP" => fs.private_ip_address,
188
- "Created At" => fs.created_at.strftime("%Y%m%d-%H%M%S")
189
- )
190
- else
191
- hsh["State"] = "not running"
192
- end
193
- hsh['Volumes'] = []
194
- svr.volumes.each_pair do |name, vol|
195
- if vol.ephemeral_device? then next
196
- elsif vol.volume_id then hsh['Volumes'] << vol.volume_id
197
- elsif vol.create_at_launch? then hsh['Volumes'] << vol.snapshot_id
198
- end
199
- end
200
- hsh['Volumes'] = hsh['Volumes'].join(',')
201
- hsh['Elastic IP'] = svr.cloud.public_ip if svr.cloud.public_ip
202
- if block_given?
203
- extra_info = yield(svr)
204
- hsh.merge!(extra_info)
205
- headings += extra_info.keys
206
- end
207
- hsh
208
- end
209
- if defined_data.empty?
210
- ui.info "Nothing to report"
211
- else
212
- Formatador.display_compact_table(defined_data, headings.to_a)
213
- end
214
- end
215
-
216
- def to_s
217
- str = super
218
- str[0..-2] + " #{@servers.map(&:fullname)}>"
219
- end
220
-
221
- def joined_names
222
- map(&:name).join(", ").gsub(/, ([^,]*)$/, ' and \1')
223
- end
224
-
225
- # Calls block on each server in parallel, each in its own thread
226
- #
227
- # @example
228
- # target = Ironfan::Cluster.slice('web_server')
229
- # target.parallelize{|svr| svr.launch }
230
- #
231
- # @yield each server, in turn
232
- #
233
- # @return [Array] array (in same order as servers) of each block's result
234
- def parallelize
235
- servers.map do |svr|
236
- sleep(0.1) # avoid hammering with simultaneous requests
237
- Thread.new(svr){|svr| yield(svr) }
238
- end
239
- end
240
-
241
- protected
242
-
243
- # Helper methods for iterating through the servers to do things
244
- #
245
- # @param [Symbol] method -- method to call on each server
246
- # @param [Boolean] threaded -- execute each call in own thread
247
- #
248
- # @return [Array] array (in same order as servers) of results for that method
249
- def delegate_to_servers method, threaded = true
250
- if threaded # Call in threads
251
- threads = parallelize{|svr| svr.send(method) }
252
- threads.map{|t| t.join.value } # Wait, returning array of results
253
- else # Call the method for each server sequentially
254
- servers.map{|svr| svr.send(method) }
255
- end
256
- end
257
-
258
- def delegate_to_fog_servers method
259
- fog_servers.compact.map do |fs|
260
- fs.send(method)
261
- end
262
- end
263
-
264
- end
265
- end
@@ -1,146 +0,0 @@
1
- module Ironfan
2
- #
3
- # Internal or external storage
4
- #
5
- class Volume < Ironfan::DslBuilder
6
- field :parent, String
7
- attr_accessor :fog_volume
8
-
9
- # mountable volume attributes
10
- magic :device, String
11
- magic :mount_point, String
12
- magic :mount_options, String, :default => 'defaults,nouuid,noatime'
13
- magic :fstype, String, :default => 'xfs'
14
- magic :mount_dump, String
15
- magic :mount_pass, String
16
- magic :mountable, Whatever, :default => true
17
- magic :formattable, Whatever, :default => false
18
- magic :resizable, Whatever, :default => false
19
- magic :in_raid, Whatever, :default => false
20
- # cloud volume attributes
21
- magic :attachable, Whatever, :default => :ebs
22
- magic :create_at_launch, Whatever, :default => false
23
- magic :volume_id, String
24
- magic :snapshot_id, String
25
- magic :size, String
26
- magic :keep, Whatever, :default => true
27
- magic :availability_zone, String
28
- # arbitrary tags
29
- magic :tags, Hash, :default => {}
30
-
31
- # Snapshot for snapshot_name method.
32
- # Set your own by adding
33
- #
34
- # VOLUME_IDS = Mash.new unless defined?(VOLUME_IDS)
35
- # VOLUME_IDS.merge!({ :your_id => 'snap-whatever' })
36
- #
37
- # to your organization's knife.rb
38
- #
39
- VOLUME_IDS = Mash.new unless defined?(VOLUME_IDS)
40
- VOLUME_IDS.merge!({
41
- :blank_xfs => 'snap-d9c1edb1',
42
- })
43
-
44
- # Describes a volume
45
- #
46
- # @example
47
- # Ironfan::Volume.new( :name => 'redis',
48
- # :device => '/dev/sdp', :mount_point => '/data/redis', :fstype => 'xfs', :mount_options => 'defaults,nouuid,noatime'
49
- # :size => 1024, :snapshot_id => 'snap-66494a08', :volume_id => 'vol-12312',
50
- # :tags => {}, :keep => true )
51
- #
52
- def initialize attrs={}
53
- parent = attrs.delete(:owner)
54
- super(attrs)
55
- end
56
-
57
- # human-readable description for logging messages and such
58
- def desc
59
- container = parent.name rescue nil
60
- "#{name} on #{container} (#{volume_id} @ #{device})"
61
- end
62
-
63
- def ephemeral_device?
64
- volume_id =~ /^ephemeral/
65
- end
66
-
67
- # Named snapshots, as defined in Ironfan::Volume::VOLUME_IDS
68
- def snapshot_name(name)
69
- snap_id = VOLUME_IDS[name.to_sym]
70
- raise "Unknown snapshot name #{name} - is it defined in Ironfan::Volume::VOLUME_IDS?" unless snap_id
71
- self.snapshot_id(snap_id)
72
- end
73
-
74
- # With snapshot specified but volume missing, have it auto-created at launch
75
- #
76
- # Be careful with this -- you can end up with multiple volumes claiming to
77
- # be the same thing.
78
- #
79
- def create_at_launch?
80
- volume_id.blank? && self.create_at_launch
81
- end
82
-
83
- def in_cloud?
84
- !! fog_volume
85
- end
86
-
87
- def has_server?
88
- in_cloud? && fog_volume.server_id.present?
89
- end
90
-
91
- # def reverse_merge!(other_hsh)
92
- # super(other_hsh)
93
- # self.tags.reverse_merge!(other_hsh.tags) if other_hsh.respond_to?(:tags) && other_hsh.tags.present?
94
- # self
95
- # end
96
-
97
- # An array of hashes with dorky-looking keys, just like Fog wants it.
98
- def block_device_mapping
99
- hsh = { 'DeviceName' => device }
100
- if ephemeral_device?
101
- hsh['VirtualName'] = volume_id
102
- elsif create_at_launch?
103
- raise "Must specify a size or a snapshot ID for #{self}" if snapshot_id.blank? && size.blank?
104
- hsh['Ebs.SnapshotId'] = snapshot_id if snapshot_id.present?
105
- hsh['Ebs.VolumeSize'] = size.to_s if size.present?
106
- hsh['Ebs.DeleteOnTermination'] = (! keep).to_s
107
- else
108
- return
109
- end
110
- hsh
111
- end
112
-
113
- end
114
-
115
-
116
- #
117
- # Consider raising the chunk size to 256 and setting read_ahead 65536 if you are raid'ing EBS volumes
118
- #
119
- # * http://victortrac.com/EC2_Ephemeral_Disks_vs_EBS_Volumes
120
- # * http://orion.heroku.com/past/2009/7/29/io_performance_on_ebs/
121
- # * http://tech.blog.greplin.com/aws-best-practices-and-benchmarks
122
- # * http://stu.mp/2009/12/disk-io-and-throughput-benchmarks-on-amazons-ec2.html
123
- #
124
- class RaidGroup < Volume
125
- # volumes that comprise this raid group
126
- magic :sub_volumes, Array, :default => []
127
- # RAID level (http://en.wikipedia.org/wiki/RAID#Standard_levels)
128
- magic :level, String
129
- # Raid chunk size (https://raid.wiki.kernel.org/articles/r/a/i/RAID_setup_cbb2.html)
130
- magic :chunk, String
131
- # read-ahead buffer
132
- magic :read_ahead, String
133
-
134
- # Overrides of Volume field defaults
135
- magic :attachable, Whatever, :default => false
136
- magic :formattable, Whatever, :default => true
137
- magic :mount_options, String, :default => 'defaults,nobootwait,noatime,nouuid,comment=ironfan'
138
-
139
- def desc
140
- "#{name} on #{parent.fullname} (#{volume_id} @ #{device} from #{sub_volumes.join(',')})"
141
- end
142
-
143
- # attr_reader :parent
144
- attr_accessor :fog_volume
145
- end
146
- end