poolparty 1.6.1 → 1.6.2

Sign up to get free protection for your applications and to get access to all the features.
data/README.rdoc CHANGED
@@ -51,6 +51,7 @@ There are a number of commands PoolParty offers to interact with your cloud. The
51
51
  * <tt>cloud list</tt>
52
52
  * <tt>cloud show</tt>
53
53
  * <tt>cloud ssh</tt>
54
+ * <tt>cloud run</tt>
54
55
 
55
56
  Clouds are distinguished by security groups. If a security group is not specified in your cloud block, one will be created based on the naming convention poolname-cloudname.
56
57
 
data/VERSION.yml CHANGED
@@ -1,5 +1,5 @@
1
1
  ---
2
2
  :minor: 6
3
- :patch: 1
3
+ :patch: 2
4
4
  :major: 1
5
5
  :build:
data/bin/cloud CHANGED
@@ -40,6 +40,10 @@ EOS
40
40
 
41
41
  @loaded_pool = pool
42
42
  @loaded_clouds = command[:name] ? [pool.clouds[command[:name]]] : pool.clouds.map {|name,cld|cld}
43
+ if @loaded_clouds.count == 0
44
+ puts "No clouds loaded. Check your clouds.rb or -n option"
45
+ exit
46
+ end
43
47
  end
44
48
 
45
49
  run do |command|
@@ -4,11 +4,13 @@ pool "poolparty" do
4
4
  cloud "chef" do
5
5
  instances 1
6
6
  using :ec2
7
- chef_repo File.dirname(__FILE__)+"/chef_cloud/chef_repo"
8
- recipe "apache2"
9
- recipe "rsyslog::server"
10
- recipe "collectd"
11
- chef_attributes :apache2 => {:listen_ports => ["80", "8080"]}
7
+ chef :solo do
8
+ repo File.dirname(__FILE__)+"/chef_cloud/chef_repo"
9
+ recipe "apache2"
10
+ recipe "rsyslog::server"
11
+ recipe "collectd"
12
+ attributes :apache2 => {:listen_ports => ["80", "8080"]}
13
+ end
12
14
  user_data open(File.dirname(__FILE__)+"/chef_cloud/user_data").read
13
15
  security_group do
14
16
  authorize :from_port => "22", :to_port => "22"
@@ -21,7 +21,7 @@ module CloudProviders
21
21
  sleep(2)
22
22
  end
23
23
  end
24
- false
24
+ connected
25
25
  end
26
26
 
27
27
  def run(commands, o={})
@@ -38,6 +38,8 @@ module CloudProviders
38
38
  # Get the environment hash out of
39
39
  # the extra_ssh_ops and then delete
40
40
  # the element
41
+ ssh_error_msg="SSH is not available for this node. perhaps you need to authorize it?"
42
+ raise PoolParty::PoolPartyError.create("SSHError", ssh_error_msg) unless ssh_available?
41
43
  env = extra_ssh_ops[:env] || {}
42
44
  extra_ssh_ops.delete :env
43
45
 
@@ -47,6 +49,7 @@ module CloudProviders
47
49
  do_sudo = extra_ssh_ops[:do_sudo]
48
50
  extra_ssh_ops.delete :do_sudo
49
51
  end
52
+ do_sudo=user!="root"
50
53
 
51
54
  envstring = env.collect {|k,v| "#{k}=#{v}"}.join ' && '
52
55
  envstring += " && " unless envstring.size == 0
@@ -86,9 +89,12 @@ module CloudProviders
86
89
 
87
90
  def rsync( opts={} )
88
91
  raise StandardError.new("You must pass a :source=>uri option to rsync") unless opts[:source]
92
+ ssh_error_msg="SSH is not available for this node. perhaps you need to authorize it?"
93
+ raise PoolParty::PoolPartyError.create("SSHError", ssh_error_msg) unless ssh_available?
89
94
  destination_path = opts[:destination] || opts[:source]
90
95
  rsync_opts = opts[:rsync_opts] || '-va'
91
- rsync_opts += %q% --rsync-path="sudo rsync" --exclude=.svn --exclude=.git --exclude=.cvs %
96
+ rsync_opts += %q% --rsync-path="sudo rsync"% unless user=="root"
97
+ rsync_opts += %q% --exclude=.svn --exclude=.git --exclude=.cvs %
92
98
  cmd_string = "rsync -L -e 'ssh #{ssh_options}' #{rsync_opts} #{opts[:source]} #{user}@#{host}:#{destination_path}"
93
99
  out = system_run(cmd_string)
94
100
  out
@@ -59,7 +59,7 @@ module CloudProviders
59
59
 
60
60
  # Load credentials from file
61
61
  def self.load_keys_from_credential_file(filename=default_credential_file, caching=true)
62
- return {:access_key => @access_key, :secret_access_key => @secret_access_key} if @access_key and @secret_access_key
62
+ return {:access_key => @access_key, :secret_access_key => @secret_access_key} if @access_key and @secret_access_key and caching
63
63
  return {} if filename.nil? or not File.exists?(filename)
64
64
  puts("Reading keys from file: #{filename}")
65
65
  File.open(filename).each_line { |line|
@@ -230,6 +230,10 @@ module CloudProviders
230
230
  end
231
231
 
232
232
  def bootstrap_nodes!(tmp_path=nil)
233
+ unless security_groups.map {|a| a.authorizes.map {|t| t.from_port.to_i }.flatten }.flatten.include?(22)
234
+ warn "Cloud security_groups are not authorized for ssh. Cannot bootstrap."
235
+ return
236
+ end
233
237
  tmp_path ||= cloud.tmp_path
234
238
  nodes.each do |node|
235
239
  next unless node.in_service?
@@ -241,6 +245,10 @@ module CloudProviders
241
245
  end
242
246
 
243
247
  def configure_nodes!(tmp_path=nil)
248
+ unless security_groups.map {|a| a.authorizes.map {|t| t.from_port.to_i }.flatten }.flatten.include?(22)
249
+ warn "Cloud security_groups are not authorized for ssh. Cannot configure."
250
+ return
251
+ end
244
252
  tmp_path ||= cloud.tmp_path
245
253
  nodes.each do |node|
246
254
  next unless node.in_service?
@@ -248,6 +256,9 @@ module CloudProviders
248
256
  node.rsync_dir(tmp_path) if tmp_path
249
257
  node.run_chef!
250
258
  end
259
+ ebs_volume_groups.each do |vol_grp|
260
+ vol_grp.verify_attachments nodes
261
+ end
251
262
  end
252
263
 
253
264
  def assign_elastic_ips
@@ -384,7 +395,7 @@ module CloudProviders
384
395
  # size 200
385
396
  # end
386
397
  def ebs_volumes(name=nil, &block)
387
- ebs_volume_groups << ElasticBlockStoreGroup.new(sub_opts,&block)
398
+ ebs_volume_groups << ElasticBlockStoreGroup.new(sub_opts,&block) if block
388
399
  end
389
400
 
390
401
  def assign_ebs_volumes
@@ -404,25 +415,29 @@ module CloudProviders
404
415
  # The function will return volumes matching *all* filters. A volume is a filter match if *any* one of the filter values equals the volume parameter value.
405
416
  def list_ec2_volumes(filters=nil)
406
417
  @volumes_on_ec2=ec2.describe_volumes.volumeSet.item unless @volumes_on_ec2
407
- return @volumes_on_ec2 if filters.nil? # no filter to check, so return at once
408
- @volumes_on_ec2.select{|vol| # select volumes for which no filter failed
409
- not filters.map {|filter_key, filter_val|
410
- filter_key=filter_key.to_s if filter_key.is_a?(Symbol) # filter_key may be given as a symbol
411
- raise ArgumentError, "Filter key #{filter_key} is invalid" unless vol.has_key?(filter_key)
412
- if filter_val.is_a?(Array) # Deal with multiple filter values
413
- filter_val.map{|val| val.is_a?(String) ? val : val.to_s}.member?(vol[filter_key]) # make sure fiter_val array values are Strings before checking for match
414
- else
415
- filter_val.is_a?(String) ? filter_val : filter_val.to_s==vol[filter_key] # make sure fiter_val is a String before comparing
416
- end
417
- }.member?(false) # Check if a filter failed, the 'not' statement at the beginning of the map block negates this so 'select' will choose only when no filter failed
418
- }.compact # remove nil results from volume set.
418
+ (if filters.nil? # no filter to check, so return at once
419
+ @volumes_on_ec2
420
+ else
421
+ @volumes_on_ec2.select{|vol| # select volumes for which no filter failed
422
+ not filters.map {|filter_key, filter_val|
423
+ filter_key=filter_key.to_s if filter_key.is_a?(Symbol) # filter_key may be given as a symbol
424
+ raise ArgumentError, "Filter key #{filter_key} is invalid" unless vol.has_key?(filter_key)
425
+ if filter_val.is_a?(Array) # Deal with multiple filter values
426
+ filter_val.map{|val| val.is_a?(String) ? val : val.to_s}.member?(vol[filter_key]) # make sure fiter_val array values are Strings before checking for match
427
+ else
428
+ (filter_val.is_a?(String) ? filter_val : filter_val.to_s)==vol[filter_key] # make sure fiter_val is a String before comparing
429
+ end
430
+ }.member?(false) # Check if a filter failed, the 'not' statement at the beginning of the map block negates this so 'select' will choose only when no filter failed
431
+ }.compact # remove nil results from volume set.
432
+ end
433
+ ).map{|vol| ElasticBlockStore.new(vol,:cloud => cloud)}
419
434
  end
420
435
 
421
436
  # Read credentials from credential_file if one exists
422
437
  def credential_file(file=nil)
423
438
  unless file.nil?
424
- dsl_options[:credential_file]=file
425
- dsl_options.merge((Ec2.load_keys_from_credential_file(file)))
439
+ dsl_options[:credential_file]=file
440
+ dsl_options.merge!(Ec2.load_keys_from_credential_file(file))
426
441
  else
427
442
  fetch(:credential_file)
428
443
  end
@@ -41,6 +41,10 @@ module CloudProviders
41
41
  def reachable?
42
42
  ping_port self.public_ip, 22
43
43
  end
44
+
45
+ def ssh_available?
46
+ cloud.security_groups.map {|a| a.authorizes.map {|t| t.from_port.to_i }.flatten }.flatten.include?(22) and reachable? and in_service?
47
+ end
44
48
 
45
49
  def in_service?
46
50
  running?
@@ -2,13 +2,14 @@ module CloudProviders
2
2
  class ElasticBlockStore < Ec2Helper
3
3
 
4
4
  # instance methods
5
- attr_accessor :volumeId, :size, :snapshotId, :status, :attachments, :device, :availabilityZone, :instance_id
6
- attr_reader :createTime
5
+ attr_reader :volumeId, :size, :snapshotId, :status, :attachments, :device, :availabilityZone, :instanceId
6
+ attr_reader :createTime
7
7
 
8
8
  alias :volume_id :volumeId
9
9
  alias :snapshot_id :snapshotId
10
10
  alias :availability_zone :availabilityZone
11
11
  alias :create_time :createTime
12
+ alias :instance_id :instanceId
12
13
 
13
14
  def createTime(create_time)
14
15
  unless create_time.class==DateTime
@@ -24,10 +25,14 @@ module CloudProviders
24
25
 
25
26
  def parse_raw_response(raw_response)
26
27
  @raw_respons = raw_response
27
- raw_response.each{|k,v| send k+"=", v if respond_to?(k+"=") }
28
- if raw_response.attachmentSet.respond_to?(:item)
28
+ raw_response.each{|k,v| instance_variable_set("@"+k,v) if respond_to?(k) }
29
+ unless raw_response.attachmentSet.nil?
29
30
  @attachments=raw_response.attachmentSet.item
30
- @attachments.each{|attch| instance_id=attch.instanceId if attch.status=="attached"}
31
+ @attachments.each{|attch| if attch.status=="attached" or attch.status=="attaching"
32
+ @instanceId=attch.instanceId
33
+ @device=attch.device
34
+ end
35
+ }
31
36
  end
32
37
  end
33
38
 
@@ -43,11 +48,19 @@ module CloudProviders
43
48
  end
44
49
 
45
50
  def attach(ec2_instance,device)
46
- ec2.attach_volume(:volume_id => volume_id, :instance_id => ec2_instance.instance_id, :device => device).return=="true"
51
+ if ec2.attach_volume(:volume_id => volume_id, :instance_id => ec2_instance.instance_id, :device => device).return=="true"
52
+ update!
53
+ return true
54
+ end
55
+ false
47
56
  end
48
57
 
49
58
  def detach
50
- ec2.detach_volume(:volume_id => volume_id).return=="true"
59
+ if ec2.detach_volume(:volume_id => volume_id).return=="true"
60
+ update!
61
+ return true
62
+ end
63
+ false
51
64
  end
52
65
 
53
66
  def detach!
@@ -57,17 +57,23 @@ module CloudProviders
57
57
  # Check no volumes are attached to node on device
58
58
  skip_node=false
59
59
  cloud.list_ec2_volumes.each{|vol|
60
- if vol.attached?(node.instance_id)and vol.device == device
60
+ if vol.attached?(node.instance_id) and vol.device == device
61
61
  warn "A volume is allready attached to device #{device} of instance #{node.instance_id}"
62
62
  skip_node = true
63
63
  end
64
64
  }
65
65
  unless skip_node
66
66
  vol=get_free_volume(node.zone)
67
- vol.device=device
68
67
  vol.attach(node,device)
69
68
  end
70
69
  }
71
70
  end
71
+
72
+ def verify_attachments(nodes)
73
+ nodes_without_volume=nodes.select do |node|
74
+ volumes_attached_to(node.id).size=0
75
+ end
76
+ attach nodes_without_volume if nodes_without_volume.any?
77
+ end
72
78
  end
73
79
  end
@@ -38,29 +38,15 @@ module CloudProviders
38
38
 
39
39
  def chef_bootstrapped?
40
40
  # do_sudo is false cause we want to capture the return code of the call
41
- @chef_bootstrapped ||= !ssh(["gem list | grep chef"], :do_sudo => false).empty?
41
+ @chef_bootstrapped ||= cloud.chef.node_bootsrapped?(self)
42
42
  end
43
43
 
44
44
  def bootstrap_chef!
45
- unless chef_bootstrapped?
46
- ssh([
47
- 'apt-get update',
48
- 'apt-get autoremove -y',
49
- 'apt-get install -y ruby ruby-dev rubygems git-core libopenssl-ruby',
50
- 'gem sources -a http://gems.opscode.com',
51
- 'gem install chef ohai --no-rdoc --no-ri' ])
52
- end
53
- ssh(bootstrap_gems.collect { |gem| "gem install #{gem} --no-rdoc --no-ri" } )
45
+ cloud.chef.node_bootstrap(self) unless chef_bootstrapped?
54
46
  end
55
47
 
56
48
  def run_chef!
57
- chef_solo_cmd = <<-CMD
58
- $GEM_BIN/chef-solo -j /etc/chef/dna.json -c /etc/chef/solo.rb
59
- CMD
60
- envhash = {
61
- :GEM_BIN => %q%$(gem env | grep "EXECUTABLE DIRECTORY" | awk "{print \\$4}")%
62
- }
63
- ssh([chef_solo_cmd.strip.squeeze(' ')], :env => envhash )
49
+ cloud.chef.node_run!(self)
64
50
  end
65
51
 
66
52
  def run
@@ -127,6 +113,9 @@ module CloudProviders
127
113
  Time.now - Time.parse(launch_time)
128
114
  end
129
115
 
116
+ def ssh_available?
117
+ warn "Implemented in cloudprovider instance class. something is wrong"
118
+ end
130
119
  # def to_s
131
120
  # (cloud ? to_hash.merge(:cloud=>cloud.name) : to_hash)
132
121
  # end
@@ -0,0 +1,80 @@
1
+ module PoolParty
2
+ class Chef < Base
3
+ def self.types
4
+ return [:solo,:client]
5
+ end
6
+
7
+ def self.get_chef(type,cloud,&block)
8
+ ("Chef" + type.to_s.capitalize).constantize(PoolParty).send(:new,type,:cloud => cloud,&block)
9
+ end
10
+ # Chef
11
+
12
+ def attributes(hsh={}, &block)
13
+ @attributes ||= ChefAttribute.new(hsh, &block)
14
+ end
15
+
16
+ def override_attributes(hsh={}, &block)
17
+ @override_attributes ||= ChefAttribute.new(hsh, &block)
18
+ end
19
+
20
+ # Adds a chef recipe to the cloud
21
+ #
22
+ # The hsh parameter is inserted into the override_attributes.
23
+ # The insertion is performed as follows. If
24
+ # the recipe name = "foo::bar" then effectively the call is
25
+ #
26
+ # override_attributes.merge! { :foo => { :bar => hsh } }
27
+ def recipe(recipe_name, hsh={})
28
+ _recipes << recipe_name unless _recipes.include?(recipe_name)
29
+
30
+ head = {}
31
+ tail = head
32
+ recipe_name.split("::").each do |key|
33
+ unless key == "default"
34
+ n = {}
35
+ tail[key] = n
36
+ tail = n
37
+ end
38
+ end
39
+ tail.replace hsh
40
+
41
+ override_attributes.merge!(head) unless hsh.empty?
42
+ end
43
+
44
+ def recipes(*recipes)
45
+ recipes.each do |r|
46
+ recipe(r)
47
+ end
48
+ end
49
+
50
+ def node_run!(remote_instance)
51
+ envhash = {
52
+ :GEM_BIN => %q%$(gem env | grep "EXECUTABLE DIRECTORY" | awk "{print \\$4}")%
53
+ }
54
+ remote_instance.ssh([chef_cmd.strip.squeeze(' ')], :env => envhash )
55
+ end
56
+
57
+ def node_bootsrapped?(remote_instance)
58
+ remote_instance.ssh(["(gem list; dpkg -l chef) | grep -q chef && echo 'chef installed'"], :do_sudo => false).empty?
59
+ end
60
+ def node_bootstrap!(remote_instance)
61
+ remote_instance.ssh([
62
+ 'apt-get update',
63
+ 'apt-get autoremove -y',
64
+ 'apt-get install -y ruby ruby-dev rubygems git-core libopenssl-ruby',
65
+ 'gem sources -a http://gems.opscode.com',
66
+ 'gem install chef ohai --no-rdoc --no-ri' ])
67
+ remote_instance.ssh(remote_instance.bootstrap_gems.collect { |gem| "gem install #{gem} --no-rdoc --no-ri" } )
68
+ end
69
+ private
70
+
71
+ def _recipes
72
+ @_recipes ||= []
73
+ end
74
+
75
+ def method_missing(m,*args,&block)
76
+ cloud.send(m,*args,&block) if cloud.respond_to?(m)
77
+ end
78
+
79
+ end
80
+ end
@@ -33,4 +33,4 @@ module PoolParty
33
33
  end
34
34
 
35
35
  end
36
- end
36
+ end
@@ -0,0 +1,63 @@
1
+ module PoolParty
2
+ # Chef class bootstrapping chef-client.
3
+ class ChefClient < Chef
4
+ dsl_methods :server_url,:validation_token
5
+
6
+ def openid_url(url=nil)
7
+ if url.nil?
8
+ return @openid_url||= (u=URI.parse(server_url)
9
+ u.port=4001
10
+ openid_url u.to_s)
11
+ else
12
+ @openid_url=url
13
+ end
14
+ end
15
+
16
+ def roles(*roles)
17
+ return @_roles||=[cloud.name] if roles.empty?
18
+ @_roles=roles
19
+ end
20
+
21
+ def compile!
22
+ build_tmp_dir
23
+ end
24
+
25
+ private
26
+ def after_initialized
27
+ raise PoolPartyError.create("ChefArgumentMissing", "server_url must be specified!") unless server_url
28
+ end
29
+ def chef_cmd
30
+ return <<-CMD
31
+ PATH="$PATH:$GEM_BIN" chef-client -j /etc/chef/dna.json -c /etc/chef/client.rb -d -i 1800 -s 20
32
+ CMD
33
+ end
34
+ # The NEW actual chef resolver.
35
+ def build_tmp_dir
36
+ base_directory = tmp_path/"etc"/"chef"
37
+ FileUtils.rm_rf base_directory
38
+ FileUtils.mkdir_p base_directory
39
+ puts "Creating the dna.json"
40
+ attributes.to_dna [], base_directory/"dna.json", {:run_list => roles.map{|r| "role[#{r}]"} + _recipes.map{|r| "recipe[#{r}]"}}.merge(attributes.init_opts)
41
+ write_client_dot_rb
42
+ end
43
+
44
+ def write_client_dot_rb(to=tmp_path/"etc"/"chef"/"client.rb")
45
+ content = <<-EOE
46
+ log_level :info
47
+ log_location "/var/log/chef/client.log"
48
+ ssl_verify_mode :verify_none
49
+ file_cache_path "/var/cache/chef"
50
+ pid_file "/var/run/chef/client.pid"
51
+ Chef::Log::Formatter.show_time = true
52
+ openid_url "#{openid_url}"
53
+ EOE
54
+ %w(search_url role_url remotefile_url template_url registration_url).each{|url|
55
+ content+="#{url} \"#{server_url}\"\n"
56
+ }
57
+ content+="validation_token \"#{validation_token}\"\n" if validation_token
58
+ File.open(to, "w") do |f|
59
+ f << content
60
+ end
61
+ end
62
+ end
63
+ end
@@ -0,0 +1,70 @@
1
+ module PoolParty
2
+ class ChefSolo < Chef
3
+ dsl_methods :repo
4
+ def compile!
5
+ build_tmp_dir
6
+ end
7
+
8
+ private
9
+ def chef_cmd
10
+ return <<-CMD
11
+ PATH="$PATH:$GEM_BIN" chef-solo -j /etc/chef/dna.json -c /etc/chef/solo.rb
12
+ CMD
13
+ end
14
+ # The NEW actual chef resolver.
15
+ def build_tmp_dir
16
+ base_directory = tmp_path/"etc"/"chef"
17
+ FileUtils.rm_rf base_directory
18
+ puts "Copying the chef-repo into the base directory from #{repo}"
19
+ FileUtils.mkdir_p base_directory/"roles"
20
+ if File.directory?(repo)
21
+ if File.exist?(base_directory)
22
+ # First remove the directory
23
+ FileUtils.remove_entry base_directory, :force => true
24
+ end
25
+ FileUtils.cp_r "#{repo}/.", base_directory
26
+ else
27
+ raise "#{repo} chef repo directory does not exist"
28
+ end
29
+ puts "Creating the dna.json"
30
+ attributes.to_dna [], base_directory/"dna.json", {:run_list => ["role[#{cloud.name}]"]}
31
+ write_solo_dot_rb
32
+ write_chef_role_json tmp_path/"etc"/"chef"/"roles/#{cloud.name}.json"
33
+ end
34
+
35
+ def write_solo_dot_rb(to=tmp_path/"etc"/"chef"/"solo.rb")
36
+ content = <<-EOE
37
+ cookbook_path ["/etc/chef/site-cookbooks", "/etc/chef/cookbooks"]
38
+ role_path "/etc/chef/roles"
39
+ log_level :info
40
+ EOE
41
+
42
+ File.open(to, "w") do |f|
43
+ f << content
44
+ end
45
+ end
46
+
47
+ def write_chef_role_json(to=tmp_path/"etc"/"chef"/"dna.json")
48
+
49
+ # Add the parent name and the name of the cloud to
50
+ # the role for easy access in recipes.
51
+ pp = {
52
+ :poolparty => {
53
+ :parent_name => cloud.parent.name,
54
+ :name => cloud.name,
55
+ }
56
+ }
57
+
58
+ override_attributes.merge! pp
59
+ ca = ChefAttribute.new({
60
+ :name => cloud.name,
61
+ :json_class => "Chef::Role",
62
+ :chef_type => "role",
63
+ :default_attributes => attributes.init_opts,
64
+ :override_attributes => override_attributes.init_opts,
65
+ :description => description
66
+ })
67
+ ca.to_dna _recipes.map {|a| File.basename(a) }, to
68
+ end
69
+ end
70
+ end
@@ -55,112 +55,6 @@ You did not specify a cloud provider in your clouds.rb. Make sure you have a blo
55
55
  end
56
56
  end
57
57
 
58
- # Chef
59
- def chef_repo(filepath=nil)
60
- return @chef_repo if @chef_repo
61
- @chef_repo = filepath.nil? ? nil : File.expand_path(filepath)
62
- end
63
-
64
- def chef_attributes(hsh={}, &block)
65
- @chef_attributes ||= ChefAttribute.new(hsh, &block)
66
- end
67
-
68
- def chef_override_attributes(hsh={}, &block)
69
- @chef_override_attributes ||= ChefAttribute.new(hsh, &block)
70
- end
71
-
72
- # Adds a chef recipe to the cloud
73
- #
74
- # The hsh parameter is inserted into the chef_override_attributes.
75
- # The insertion is performed as follows. If
76
- # the recipe name = "foo::bar" then effectively the call is
77
- #
78
- # chef_override_attributes.merge! { :foo => { :bar => hsh } }
79
- def recipe(recipe_name, hsh={})
80
- _recipes << recipe_name unless _recipes.include?(recipe_name)
81
-
82
- head = {}
83
- tail = head
84
- recipe_name.split("::").each do |key|
85
- unless key == "default"
86
- n = {}
87
- tail[key] = n
88
- tail = n
89
- end
90
- end
91
- tail.replace hsh
92
-
93
- chef_override_attributes.merge!(head) unless hsh.empty?
94
-
95
- end
96
-
97
- def recipes(*recipes)
98
- recipes.each do |r|
99
- recipe(r)
100
- end
101
- end
102
-
103
- private
104
-
105
- def _recipes
106
- @_recipes ||= []
107
- end
108
-
109
- # The NEW actual chef resolver.
110
- def build_tmp_dir
111
- base_directory = tmp_path/"etc"/"chef"
112
- FileUtils.rm_rf base_directory
113
- puts "Copying the chef-repo into the base directory from #{chef_repo}"
114
- FileUtils.mkdir_p base_directory/"roles"
115
- if File.directory?(chef_repo)
116
- if File.exist?(base_directory)
117
- # First remove the directory
118
- FileUtils.remove_entry base_directory, :force => true
119
- end
120
- FileUtils.cp_r "#{chef_repo}/.", base_directory
121
- else
122
- raise "#{chef_repo} chef repo directory does not exist"
123
- end
124
- puts "Creating the dna.json"
125
- chef_attributes.to_dna [], base_directory/"dna.json", {:run_list => ["role[#{name}]"]}
126
- write_solo_dot_rb
127
- write_chef_role_json tmp_path/"etc"/"chef"/"roles/#{name}.json"
128
- end
129
-
130
- def write_solo_dot_rb(to=tmp_path/"etc"/"chef"/"solo.rb")
131
- content = <<-EOE
132
- cookbook_path ["/etc/chef/site-cookbooks", "/etc/chef/cookbooks"]
133
- role_path "/etc/chef/roles"
134
- log_level :info
135
- EOE
136
-
137
- File.open(to, "w") do |f|
138
- f << content
139
- end
140
- end
141
-
142
- def write_chef_role_json(to=tmp_path/"etc"/"chef"/"dna.json")
143
-
144
- # Add the parent name and the name of the cloud to
145
- # the role for easy access in recipes.
146
- pp = {
147
- :poolparty => {
148
- :parent_name => parent.name,
149
- :name => name,
150
- }
151
- }
152
-
153
- chef_override_attributes.merge! pp
154
- ca = ChefAttribute.new({
155
- :name => name,
156
- :json_class => "Chef::Role",
157
- :chef_type => "role",
158
- :default_attributes => chef_attributes.init_opts,
159
- :override_attributes => chef_override_attributes.init_opts,
160
- :description => description
161
- })
162
- ca.to_dna _recipes.map {|a| File.basename(a) }, to
163
- end
164
58
 
165
59
  # The pool can either be the parent (the context where the object is declared)
166
60
  # or the global pool object
@@ -191,12 +85,16 @@ log_level :info
191
85
  end
192
86
  end
193
87
  end
194
-
88
+
89
+ def chef(chef_type=:solo, &block)
90
+ raise ArgumentError, "Chef type must be one of #{Chef.types.map{|v| ":" + v.to_s}.join(",")}." unless Chef.types.include?(chef_type)
91
+ @chef||=Chef.get_chef(chef_type,self,&block)
92
+ end
195
93
  # compile the cloud spec and execute the compiled system and remote calls
196
94
  def run
197
95
  puts " running on #{cloud_provider.class}"
198
96
  cloud_provider.run
199
- unless chef_repo.nil?
97
+ unless @chef.nil?
200
98
  compile!
201
99
  bootstrap!
202
100
  end
@@ -280,7 +178,7 @@ No autoscalers defined
280
178
  end
281
179
 
282
180
  def compile!
283
- build_tmp_dir unless chef_repo.nil?
181
+ @chef.compile! unless @chef.nil?
284
182
  end
285
183
 
286
184
  def bootstrap!
data/lib/poolparty.rb CHANGED
@@ -45,6 +45,9 @@ require "keypair"
45
45
  $LOAD_PATH.unshift(File.dirname(__FILE__)/"poolparty")
46
46
  %w( base
47
47
  chef_attribute
48
+ chef
49
+ chef_solo
50
+ chef_client
48
51
  cloud pool
49
52
  ).each do |lib|
50
53
  require "poolparty/#{lib}"
@@ -52,4 +55,4 @@ end
52
55
 
53
56
  require 'cloud_providers'
54
57
 
55
- puts "PoolParty core loadtime: #{Time.now-t}"
58
+ puts "PoolParty core loadtime: #{Time.now-t}"
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: poolparty
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.6.1
4
+ version: 1.6.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Ari Lerner
@@ -11,7 +11,7 @@ autorequire:
11
11
  bindir: bin
12
12
  cert_chain: []
13
13
 
14
- date: 2010-02-20 00:00:00 -08:00
14
+ date: 2010-03-09 00:00:00 -08:00
15
15
  default_executable:
16
16
  dependencies: []
17
17
 
@@ -269,7 +269,10 @@ files:
269
269
  - lib/keypair.rb
270
270
  - lib/poolparty.rb
271
271
  - lib/poolparty/base.rb
272
+ - lib/poolparty/chef.rb
272
273
  - lib/poolparty/chef_attribute.rb
274
+ - lib/poolparty/chef_client.rb
275
+ - lib/poolparty/chef_solo.rb
273
276
  - lib/poolparty/cloud.rb
274
277
  - lib/poolparty/pool.rb
275
278
  - lib/poolparty/pool_party_error.rb