poolparty 1.6.8 → 1.6.9

Sign up to get free protection for your applications and to get access to all the features.
@@ -32,6 +32,56 @@ module PoolParty
32
32
  def override_attributes(hsh={}, &block)
33
33
  @override_attributes ||= ChefAttribute.new(hsh, &block)
34
34
  end
35
+
36
+
37
+ # === Description
38
+ #
39
+ # Provides the ability to specify steps that can be
40
+ # run via chef
41
+ #
42
+ # pool "mycluster" do
43
+ # cloud "mycloud" do
44
+ #
45
+ # on_step :download_install do
46
+ # recipe "myrecipes::download"
47
+ # recipe "myrecipes::install"
48
+ # end
49
+ #
50
+ # on_step :run => :download_install do
51
+ # recipe "myrecipes::run"
52
+ # end
53
+ # end
54
+ # end
55
+ #
56
+ # Then from the command line you can do
57
+ #
58
+ # cloud-configure --step=download_install
59
+ #
60
+ # to only do the partial job or
61
+ #
62
+ # cloud-configure --step=run
63
+ #
64
+ # to do everything
65
+ #
66
+ def on_step action, &block
67
+ if action.is_a? Hash
68
+ t = action
69
+ action = t.keys[0]
70
+ depends = t.values[0]
71
+ else
72
+ depends = nil
73
+ end
74
+ change_attr :@_current_action, action do
75
+ yield
76
+ if depends
77
+ # Merge the recipes of the dependency into
78
+ # the current recipes
79
+ _recipes(depends).each do |r|
80
+ recipe r
81
+ end
82
+ end
83
+ end
84
+ end
35
85
 
36
86
  # Adds a chef recipe to the cloud
37
87
  #
@@ -101,7 +151,7 @@ module PoolParty
101
151
  " && echo OK || echo MISSING"
102
152
 
103
153
  r = remote_instance.ssh(cmd, :do_sudo => false )
104
- r.lines.to_a.last.chomp == "OK"
154
+ r.split("\n").to_a.last.chomp == "OK"
105
155
  end
106
156
 
107
157
  def node_bootstrap!(remote_instance, force=false)
@@ -125,20 +175,35 @@ module PoolParty
125
175
  ]
126
176
 
127
177
  remote_instance.ssh(bootstrap_cmds)
178
+ end
179
+
180
+
181
+ def _recipes action = nil
182
+ action = action.to_sym unless action.nil?
183
+ @_recipes ||= {:default => [] }
184
+ key = action || _current_action
185
+ @_recipes[key] ||= []
128
186
  end
129
187
 
130
188
  private
131
189
 
190
+ def _current_action
191
+ @_current_action ||= :default
192
+ end
193
+
132
194
  def chef_cmd
195
+
196
+ if ENV["CHEF_DEBUG"]
197
+ debug = "-l debug"
198
+ else
199
+ debug = ""
200
+ end
201
+
133
202
  return <<-CMD
134
- PATH="$PATH:$GEM_BIN" #{chef_bin} -j /etc/chef/dna.json -c /etc/chef/client.rb -d -i 1800 -s 20
203
+ PATH="$PATH:$GEM_BIN" #{chef_bin} -j /etc/chef/dna.json -c /etc/chef/client.rb -d -i 1800 -s 20 #{debug}
135
204
  CMD
136
205
  end
137
206
 
138
- def _recipes
139
- @_recipes ||= []
140
- end
141
-
142
207
  def method_missing(m,*args,&block)
143
208
  if cloud.respond_to?(m)
144
209
  cloud.send(m,*args,&block)
@@ -3,7 +3,7 @@ require 'uri' # for URI.parse in write_bootstrap_files
3
3
  module PoolParty
4
4
  # Chef class bootstrapping chef-client.
5
5
  class ChefClient < Chef
6
- dsl_methods :server_url,:validation_token, :validation_key
6
+ dsl_methods :server_url,:validation_token, :validation_key, :validation_client_name
7
7
 
8
8
  # When init_style.nil?, old behavior is used (just run the client).
9
9
  # If init_style is specified, bootstrap::client cookbook is executed
@@ -84,6 +84,7 @@ openid_url "#{openid_url}"
84
84
  }
85
85
  content+="validation_token \"#{validation_token}\"\n" if validation_token
86
86
  content+="validation_key \"/etc/chef/#{File.basename validation_key}\"\n" if validation_key
87
+ content+="validation_client_name \"#{validation_client_name}\"\n" if validation_client_name
87
88
  File.open(to, "w") do |f|
88
89
  f << content
89
90
  end
@@ -109,12 +110,15 @@ recipe_url "http://s3.amazonaws.com/chef-solo/bootstrap-latest.tar.gz"
109
110
  :init_style => init_style,
110
111
  :path => "/srv/chef",
111
112
  :serve_path => "/srv/chef",
112
- :server_fqdn => uri.host,
113
+ :server_fqdn => uri.host + uri.path,
113
114
  :server_port => uri.port,
114
115
  },
115
116
  },
116
117
  :run_list => [ 'recipe[bootstrap::client]' ],
117
118
  }
119
+ if validation_client_name
120
+ bootstrap_json[:bootstrap][:chef][:validation_client_name] = validation_client_name
121
+ end
118
122
  ChefAttribute.new(bootstrap_json).to_dna([], chef_json)
119
123
  end
120
124
  end
@@ -9,6 +9,20 @@ module PoolParty
9
9
  "chef-solo"
10
10
  end
11
11
 
12
+ def chef_cmd
13
+
14
+ if ENV["CHEF_DEBUG"]
15
+ debug = "-l debug"
16
+ else
17
+ debug = ""
18
+ end
19
+
20
+ return <<-CMD
21
+ PATH="$PATH:$GEM_BIN" #{chef_bin} -j /etc/chef/dna.json -c /etc/chef/solo.rb #{debug}
22
+ CMD
23
+ end
24
+
25
+
12
26
  # The NEW actual chef resolver.
13
27
  def build_tmp_dir
14
28
  base_directory = tmp_path/"etc"/"chef"
@@ -17,6 +31,7 @@ module PoolParty
17
31
  puts "Copying the chef-repo into the base directory from #{repo}"
18
32
 
19
33
  FileUtils.mkdir_p base_directory
34
+ FileUtils.mkdir_p roles_dir # Why do we need this??!?
20
35
  if File.directory?(repo)
21
36
  if File.exist?(base_directory)
22
37
  # First remove the directory
@@ -38,7 +53,7 @@ module PoolParty
38
53
 
39
54
  def write_solo_dot_rb(to=tmp_path/"etc"/"chef"/"solo.rb")
40
55
  content = <<-EOE
41
- cookbook_path ["/etc/chef/cookbooks", "/etc/chef/site-cookbooks"]
56
+ cookbook_path ["/etc/chef/cookbooks/cookbooks", "/etc/chef/cookbooks/site-cookbooks"]
42
57
  role_path "/etc/chef/roles"
43
58
  log_level :info
44
59
  EOE
@@ -56,6 +71,7 @@ log_level :info
56
71
  :poolparty => {
57
72
  :parent_name => cloud.parent.name,
58
73
  :name => cloud.name,
74
+ :pool_info => pool.to_hash
59
75
  }
60
76
  }
61
77
 
@@ -68,7 +84,7 @@ log_level :info
68
84
  :override_attributes => override_attributes.init_opts,
69
85
  :description => description
70
86
  })
71
- ca.to_dna _recipes.map {|a| File.basename(a) }, to
87
+ ca.to_dna _recipes(pool.chef_step).map {|a| File.basename(a) }, to
72
88
  end
73
89
  end
74
90
  end
@@ -55,6 +55,13 @@ You did not specify a cloud provider in your clouds.rb. Make sure you have a blo
55
55
  end
56
56
  end
57
57
 
58
+
59
+ # Upload the source to dest ( using rsync )
60
+ def upload source, dest
61
+ @uploads ||= []
62
+ @uploads << { :source => source, :dest => dest }
63
+ end
64
+
58
65
 
59
66
  # The pool can either be the parent (the context where the object is declared)
60
67
  # or the global pool object
@@ -178,6 +185,12 @@ No autoscalers defined
178
185
  end
179
186
 
180
187
  def compile!
188
+ unless @uploads.nil?
189
+ puts "Uploading files via rsync"
190
+ @uploads.each do |upload|
191
+ rsync upload[:source], upload[:dest]
192
+ end
193
+ end
181
194
  @chef.compile! unless @chef.nil?
182
195
  end
183
196
 
@@ -0,0 +1,432 @@
1
+ module PoolParty
2
+ class Cloud < Base
3
+ default_options(
4
+ :description => "PoolParty cloud",
5
+ :minimum_instances => 1,
6
+ :maximum_instances => 3
7
+ )
8
+
9
+ # returns an instance of Keypair
10
+ # You can pass either a filename which will be searched for in ~/.ec2/ and ~/.ssh/
11
+ # Or you can pass a full filepath
12
+ def keypair(n=nil, extra_paths=[])
13
+ return @keypair if @keypair
14
+ @keypair = case n
15
+ when String
16
+ Keypair.new(n, extra_paths)
17
+ when nil
18
+ fpath = CloudProviders::CloudProvider.default_keypair_path/"#{proper_name}"
19
+ File.exists?(fpath) ? Keypair.new(fpath, extra_paths) : generate_keypair(extra_paths)
20
+ else
21
+ raise ArgumentError, "There was an error when defining the keypair"
22
+ end
23
+ end
24
+
25
+ private
26
+ def generate_keypair(extra_paths=[])
27
+ puts "Generate the keypair for this cloud because its not found: #{proper_name}"
28
+ cloud_provider.send :generate_keypair, proper_name
29
+ Keypair.new(proper_name, extra_paths)
30
+ end
31
+
32
+ def after_initialized
33
+ raise PoolParty::PoolPartyError.create("NoCloudProvider", <<-EOE
34
+ You did not specify a cloud provider in your clouds.rb. Make sure you have a block that looks like:
35
+
36
+ using :ec2
37
+ EOE
38
+ ) unless cloud_provider
39
+ security_group(proper_name, :authorize => {:from_port => 22, :to_port => 22}) if security_groups.empty?
40
+ end
41
+
42
+ public
43
+ def instances(arg)
44
+ case arg
45
+ when Range
46
+ minimum_instances arg.first
47
+ maximum_instances arg.last
48
+ when Fixnum
49
+ minimum_instances arg
50
+ maximum_instances arg
51
+ when Hash
52
+ nodes(arg)
53
+ else
54
+ raise PoolParty::PoolPartyError.create("DslMethodCall", "You must call instances with either a number, a range or a hash (for a list of nodes)")
55
+ end
56
+ end
57
+
58
+ <<<<<<< HEAD:lib/poolparty/cloud.rb
59
+ # Chef
60
+ def chef_repo(filepath=nil)
61
+ return @chef_repo if @chef_repo
62
+ @chef_repo = filepath.nil? ? nil : File.expand_path(filepath)
63
+ end
64
+
65
+ def chef_attributes(hsh={}, &block)
66
+ @chef_attributes ||= ChefAttribute.new(hsh, &block)
67
+ end
68
+
69
+ def chef_override_attributes(hsh={}, &block)
70
+ @chef_override_attributes ||= ChefAttribute.new(hsh, &block)
71
+ end
72
+
73
+ # Upload the source to dest ( using rsync )
74
+ def upload source, dest
75
+ @uploads ||= []
76
+ @uploads << { :source => source, :dest => dest }
77
+ end
78
+
79
+ # === Description
80
+ #
81
+ # Set the recipe set and yield to a block. At the end
82
+ # of the block the current recipe set is returned to
83
+ # :default
84
+ #
85
+ # chef_set :install_mysql do
86
+ # reciepe "mysql::download"
87
+ # end
88
+ #
89
+ # chef_set :write_conffiles do
90
+ # recipe "mysql::download", :hosts => hosts
91
+ # end
92
+ #
93
+ # chef_set :boot_master do
94
+ # recipe "mysql::master", :hosts => hosts
95
+ # end
96
+ #
97
+ # This enables you to select different recipe
98
+ # sets from the command line.
99
+ #
100
+ # cloud-start --recipe-set=write_confiles.
101
+ #
102
+ # Why is this usefull? Bootstrapping a cluster may
103
+ # require several stages of initialization. For example
104
+ # setting up a MySQL cluster first all the conf files
105
+ # must be written with the prior knowledge of all IP
106
+ # addresses in the cluster.
107
+ #
108
+ # After the conf files are written then the machines
109
+ # must be started in a specific order.
110
+ def chef_set name, &block
111
+ prev = current_chef_set
112
+ current_chef_set name
113
+ yield
114
+ current_chef_set prev
115
+ end
116
+
117
+
118
+ # Adds a chef recipe to the cloud
119
+ #
120
+ # The hsh parameter is inserted into the chef_override_attributes.
121
+ # The insertion is performed as follows. If
122
+ # the recipe name = "foo::bar" then effectively the call is
123
+ #
124
+ # chef_override_attributes.merge! { :foo => { :bar => hsh } }
125
+ def recipe(recipe_name, hsh={})
126
+ _recipes << recipe_name unless _recipes.include?(recipe_name)
127
+
128
+ head = {}
129
+ tail = head
130
+ recipe_name.split("::").each do |key|
131
+ unless key == "default"
132
+ n = {}
133
+ tail[key] = n
134
+ tail = n
135
+ end
136
+ end
137
+ tail.replace hsh
138
+
139
+ chef_override_attributes.merge!(head) unless hsh.empty?
140
+
141
+ end
142
+
143
+ def recipes(*recipes)
144
+ recipes.each do |r|
145
+ recipe(r)
146
+ end
147
+ end
148
+
149
+ private
150
+
151
+ def current_chef_set set = nil
152
+ if set
153
+ @current_chef_set = set
154
+ end
155
+ @current_chef_set ||= :default
156
+ end
157
+
158
+ # === Description
159
+ #
160
+ # Return a list of recipes from one of
161
+ # the chef recipe sets.
162
+ #
163
+ # === Parameters
164
+ #
165
+ # * set : Return the recipes from set chef_set
166
+ # If set is nil then return the current chef_set
167
+ #
168
+ # === See
169
+ #
170
+ # The doc for method chef_set
171
+ def _recipes set = nil
172
+ @_recipes ||= {}
173
+ @_recipes[:default] ||= []
174
+ @_recipes[current_chef_set] ||= []
175
+ @_recipes[set || current_chef_set]
176
+ end
177
+
178
+ # The NEW actual chef resolver.
179
+ def build_tmp_dir
180
+ base_directory = tmp_path/"etc"/"chef"
181
+ FileUtils.rm_rf base_directory
182
+ puts "Copying the chef-repo into the base directory from #{chef_repo}"
183
+ FileUtils.mkdir_p base_directory/"roles"
184
+ if File.directory?(chef_repo)
185
+ if File.exist?(base_directory)
186
+ # First remove the directory
187
+ FileUtils.remove_entry base_directory, :force => true
188
+ end
189
+ FileUtils.cp_r "#{chef_repo}/.", base_directory
190
+ else
191
+ raise "#{chef_repo} chef repo directory does not exist"
192
+ end
193
+ puts "Creating the dna.json"
194
+ chef_attributes.to_dna [], base_directory/"dna.json", {:run_list => ["role[#{name}]"]}
195
+ write_solo_dot_rb
196
+ write_chef_role_json tmp_path/"etc"/"chef"/"roles/#{name}.json"
197
+ end
198
+
199
+ def write_solo_dot_rb(to=tmp_path/"etc"/"chef"/"solo.rb")
200
+ content = <<-EOE
201
+ cookbook_path ["/etc/chef/site-cookbooks", "/etc/chef/cookbooks"]
202
+ role_path "/etc/chef/roles"
203
+ log_level :info
204
+ EOE
205
+
206
+ File.open(to, "w") do |f|
207
+ f << content
208
+ end
209
+ end
210
+
211
+ def write_chef_role_json(to=tmp_path/"etc"/"chef"/"dna.json")
212
+
213
+ # Add the parent name and the name of the cloud to
214
+ # the role for easy access in recipes.
215
+ pp = {
216
+ :poolparty => {
217
+ :parent_name => parent.name,
218
+ :name => name,
219
+ :pool_info => pool.to_hash
220
+ }
221
+ }
222
+
223
+ chef_override_attributes.merge! pp
224
+
225
+ ca = ChefAttribute.new({
226
+ :name => name,
227
+ :json_class => "Chef::Role",
228
+ :chef_type => "role",
229
+ :default_attributes => chef_attributes.init_opts,
230
+ :override_attributes => chef_override_attributes.init_opts,
231
+ :description => description
232
+ })
233
+ puts "================="
234
+ puts "Recipe Set #{pool.chef_set}"
235
+ puts _recipes(pool.chef_set)
236
+ if _recipes(pool.chef_set)
237
+ ca.to_dna _recipes(pool.chef_set).map {|a| File.basename(a) }, to
238
+ else
239
+ puts "No recipe set #{pool.chef_set} for this node"
240
+ end
241
+ puts "================="
242
+ end
243
+ =======
244
+ >>>>>>> 81a7e0df2d02545ff9b22572194a4f115f73906a:lib/poolparty/cloud.rb
245
+
246
+ # The pool can either be the parent (the context where the object is declared)
247
+ # or the global pool object
248
+ def pool
249
+ parent || pool
250
+ end
251
+
252
+ def tmp_path
253
+ "/tmp/poolparty" / pool.name / name
254
+ end
255
+
256
+ public
257
+
258
+ attr_reader :cloud_provider
259
+ def using(provider_name, &block)
260
+ return @cloud_provider if @cloud_provider
261
+ @cloud_provider = "#{provider_name}".constantize(CloudProviders).send(:new, provider_name, :cloud => self, &block)
262
+ # Decorate the cloud with the cloud_provider methods
263
+ (class << self; self; end).instance_variable_set('@cloud_provider', @cloud_provider)
264
+ (class << self; self; end).class_eval do
265
+ @cloud_provider.public_methods(false).each do |meth|
266
+ next if respond_to?(meth) || method_defined?(meth) || private_method_defined?(meth)
267
+ eval <<-EOE
268
+ def #{meth}(*args, &block)
269
+ @cloud_provider.send(:#{meth}, *args, &block)
270
+ end
271
+ EOE
272
+ end
273
+ end
274
+ end
275
+
276
+ def chef(chef_type=:solo, &block)
277
+ raise ArgumentError, "Chef type must be one of #{Chef.types.map{|v| ":" + v.to_s}.join(",")}." unless Chef.types.include?(chef_type)
278
+ @chef||=Chef.get_chef(chef_type,self,&block)
279
+ end
280
+ # compile the cloud spec and execute the compiled system and remote calls
281
+ def run
282
+ puts " running on #{cloud_provider.class}"
283
+ cloud_provider.run
284
+ <<<<<<< HEAD:lib/poolparty/cloud.rb
285
+ unless @uploads.nil?
286
+ puts "Uploading files via rsync"
287
+ @uploads.each do |upload|
288
+ rsync upload[:source], upload[:dest]
289
+ end
290
+ end
291
+ unless chef_repo.nil?
292
+ =======
293
+ unless @chef.nil?
294
+ >>>>>>> 81a7e0df2d02545ff9b22572194a4f115f73906a:lib/poolparty/cloud.rb
295
+ compile!
296
+ bootstrap!
297
+ end
298
+ end
299
+
300
+
301
+ # TODO: Incomplete and needs testing
302
+ # Shutdown and delete the load_balancers, auto_scaling_groups, launch_configurations,
303
+ # security_groups, triggers and instances defined by this cloud
304
+ def teardown
305
+ raise "Only Ec2 teardown supported" unless cloud_provider.name.to_s == 'ec2'
306
+ puts "! Tearing down cloud #{name}"
307
+ # load_balancers.each do |name, lb|
308
+ # puts "! Deleting load_balaner #{lb_name}"
309
+ # lb.teardown
310
+ # end
311
+ load_balancers.each do |lb|
312
+ puts "-----> Tearing down load balancer: #{lb.name}"
313
+ lb.teardown
314
+ end
315
+
316
+ rds_instances.each do |rds|
317
+ puts "-----> Tearing down RDS Instance: #{rds.name}"
318
+ rds.teardown
319
+ end
320
+ # instances belonging to an auto_scaling group must be deleted before the auto_scaling group
321
+ #THIS SCARES ME! nodes.each{|n| n.terminate_instance!}
322
+ # loop {nodes.size>0 ? sleep(4) : break }
323
+ if autoscalers.empty?
324
+ nodes.each do |node|
325
+ node.terminate!
326
+ end
327
+ else
328
+ autoscalers.each do |a|
329
+ puts "-----> Tearing down autoscaler #{a.name}"
330
+ a.teardown
331
+ end
332
+ end
333
+ # autoscalers.keys.each do |as_name|
334
+ # puts "! Deleting auto_scaling_group #{as_name}"
335
+ # cloud_provider.as.delete_autoscaling_group('AutoScalingGroupName' => as_name)
336
+ # end
337
+ #TODO: keypair.delete # Do we want to delete the keypair? probably, but not certain
338
+ end
339
+
340
+ def reboot!
341
+ orig_nodes = nodes
342
+ if autoscalers.empty?
343
+ puts <<-EOE
344
+ No autoscalers defined
345
+ Launching new nodes and then shutting down original nodes
346
+ EOE
347
+ # Terminate the nodes
348
+ orig_nodes.each_with_index do |node, i|
349
+ # Start new nodes
350
+ print "Starting node: #{i}...\n"
351
+ expand_by(1)
352
+ print "Terminating node: #{i}...\n"
353
+ node.terminate!
354
+ puts ""
355
+ end
356
+ else
357
+ # Terminate the nodes
358
+ @num_nodes = orig_nodes.size
359
+ orig_nodes.each do |node|
360
+ node.terminate!
361
+ puts "----> Terminated node: #{node.instance_id}"
362
+ # Wait for the autoscaler to boot the next node
363
+ puts "----> Waiting for new node to boot via the autoscaler"
364
+ loop do
365
+ reset!
366
+ break if nodes.size == @num_nodes
367
+ $stdout.print "."
368
+ $stdout.flush
369
+ sleep 1
370
+ end
371
+ end
372
+ end
373
+ run
374
+ puts ""
375
+ end
376
+
377
+ def compile!
378
+ @chef.compile! unless @chef.nil?
379
+ end
380
+
381
+ def bootstrap!
382
+ cloud_provider.bootstrap_nodes!(tmp_path)
383
+ end
384
+
385
+ def configure!
386
+ compile!
387
+ cloud_provider.configure_nodes!(tmp_path)
388
+ end
389
+
390
+ def reset!
391
+ cloud_provider.reset!
392
+ end
393
+
394
+ def ssh(num=0)
395
+ nodes[num].ssh
396
+ end
397
+
398
+ def rsync(source, dest)
399
+ nodes.each do |node|
400
+ node.rsync(:source => source, :destination => dest)
401
+ end
402
+ end
403
+
404
+ # TODO: list of nodes needs to be consistentley sorted
405
+ def nodes
406
+ cloud_provider.nodes.select {|a| a.in_service? }
407
+ end
408
+
409
+ # Run command/s on all nodes in the cloud.
410
+ # Returns a hash of instance_id=>result pairs
411
+ def cmd(commands, opts={})
412
+ key_by = opts.delete(:key_by) || :instance_id
413
+ results = {}
414
+ threads = nodes.collect do |n|
415
+ puts "result for #{n.instance_id} ==> n.ssh(#{commands.inspect}, #{opts.inspect})"
416
+ Thread.new{ results[ n.send(key_by) ] = n.ssh(commands, opts) }
417
+ end
418
+ threads.each{ |aThread| aThread.join }
419
+ results
420
+ end
421
+
422
+ # Explicit proxies to cloud_provider methods
423
+ def run_instance(o={}); cloud_provider.run_instance(o);end
424
+ def terminate_instance!(o={}); cloud_provider.terminate_instance!(o);end
425
+ def describe_instances(o={}); cloud_provider.describe_instances(o);end
426
+ def describe_instance(o={}); cloud_provider.describe_instance(o);end
427
+
428
+ def proper_name
429
+ "#{parent.name}-#{name}"
430
+ end
431
+ end
432
+ end