kytoon 1.1.0 → 1.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/CHANGELOG CHANGED
@@ -1,3 +1,7 @@
1
+ * Thu Aug 23 2012 Dan Prince <dprince@redhat.com> - 1.1.1
2
+ - Add 'libvirt_use_sudo' option to config file.
3
+ - Move config parameter checks into specific providers.
4
+
1
5
  * Wed Aug 22 2012 Dan Prince <dprince@redhat.com> - 1.1.0
2
6
  - Add local libvirt provider based on virt-clone.
3
7
  - Libvirt: Support creating qcow2 disks during group creation.
data/README.rdoc CHANGED
@@ -12,7 +12,7 @@ Inspired by and based on the Chef VPC Toolkit.
12
12
 
13
13
  - Libvirt: manage instances on local machine w/ libvirt, virt-clone, and libguestfs
14
14
  - XenServer: manage instances on a remote XenServer box (via ssh)
15
- - Cloud Servers VPC: API driven. Supports Rackspace and OpenStack
15
+ - Cloud Servers VPC: API driven. Supports Rackspace Cloud and OpenStack.
16
16
 
17
17
  == Installation
18
18
 
@@ -27,10 +27,23 @@ Inspired by and based on the Chef VPC Toolkit.
27
27
 
28
28
  3) Create a .kytoon.conf file in your HOME directory that contains the following:
29
29
 
30
- # Set one of the following group_types
30
+ For libvirt:
31
+
32
+ # The default group type. Override with GROUP_TYPE
31
33
  group_type: libvirt
32
- #group_type: xenserver
33
- #group_type: cloud_server_vpc
34
+
35
+ # Whether commands to create local group should use sudo
36
+ libvirt_use_sudo: False
37
+
38
+ For XenServer:
39
+
40
+ # The default group type. Override with GROUP_TYPE
41
+ group_type: xenserver
42
+
43
+ For Cloud Servers VPC:
44
+
45
+ # The default group type. Override with GROUP_TYPE
46
+ group_type: cloud_servers_vpc
34
47
 
35
48
  # Cloud Servers VPC credentials
36
49
  cloud_servers_vpc_url: https://your.vpc.url/
data/VERSION CHANGED
@@ -1 +1 @@
1
- 1.1.0
1
+ 1.1.1
@@ -4,3 +4,8 @@ require 'kytoon/providers/cloud_servers_vpc/server'
4
4
  require 'kytoon/providers/cloud_servers_vpc/server_group'
5
5
  require 'kytoon/providers/cloud_servers_vpc/ssh_public_key'
6
6
  require 'kytoon/providers/cloud_servers_vpc/vpn_network_interface'
7
+ require 'kytoon/util'
8
+
9
+ Kytoon::Util.check_config_param('cloud_servers_vpc_url')
10
+ Kytoon::Util.check_config_param('cloud_servers_vpc_username')
11
+ Kytoon::Util.check_config_param('cloud_servers_vpc_password')
@@ -33,10 +33,12 @@ class ServerGroup
33
33
 
34
34
  attr_accessor :id
35
35
  attr_accessor :name
36
+ attr_accessor :use_sudo
36
37
 
37
38
  def initialize(options={})
38
39
  @id = options[:id] || Time.now.to_i
39
40
  @name = options[:name]
41
+ @use_sudo = options[:use_sudo]
40
42
  @servers=[]
41
43
  end
42
44
 
@@ -57,9 +59,13 @@ class ServerGroup
57
59
 
58
60
  json_hash=JSON.parse(json)
59
61
 
62
+ configs = Util.load_configs
63
+ use_sudo = ENV['LIBVIRT_USE_SUDO'] || configs['libvirt_use_sudo']
64
+
60
65
  sg=ServerGroup.new(
61
66
  :id => json_hash["id"],
62
- :name => json_hash["name"]
67
+ :name => json_hash["name"],
68
+ :use_sudo => use_sudo
63
69
  )
64
70
  json_hash["servers"].each do |server_hash|
65
71
 
@@ -125,16 +131,18 @@ class ServerGroup
125
131
  end
126
132
 
127
133
  def delete
134
+ sudo = @use_sudo =~ /(true|t|yes|y|1)$/i ? "sudo" : ""
128
135
  servers.each do |server|
129
- ServerGroup.cleanup_instances(@id, server['hostname'], server['disk_path'])
136
+ ServerGroup.cleanup_instances(@id, server['hostname'], server['disk_path'], sudo)
130
137
  end
131
138
  out_file=File.join(@@data_dir, "#{@id}.json")
132
139
  File.delete(out_file) if File.exists?(out_file)
133
140
  end
134
141
 
135
142
  def self.create(sg)
136
- ssh_public_key = Kytoon::Util.load_public_key
137
143
 
144
+ ssh_public_key = Kytoon::Util.load_public_key
145
+ sudo = sg.use_sudo =~ /(true|t|yes|y|1)$/i ? "sudo" : ""
138
146
  hosts_file_data = "127.0.0.1\tlocalhost localhost.localdomain\n"
139
147
  sg.servers.each do |server|
140
148
 
@@ -142,7 +150,7 @@ class ServerGroup
142
150
  disk_path=File.join(image_dir, "#{sg.id}_#{server['hostname']}.img")
143
151
  server['disk_path'] = disk_path
144
152
 
145
- instance_ip = create_instance(sg.id, server['hostname'], server['memory'], server['original'], server['original_xml'], disk_path, server['create_cow'], ssh_public_key)
153
+ instance_ip = create_instance(sg.id, server['hostname'], server['memory'], server['original'], server['original_xml'], disk_path, server['create_cow'], ssh_public_key, sudo)
146
154
  server['ip_address'] = instance_ip
147
155
  hosts_file_data += "#{instance_ip}\t#{server['hostname']}\n"
148
156
  sg.cache_to_disk
@@ -218,7 +226,7 @@ fi
218
226
  raise "Unable to find disk path for instance."
219
227
  end
220
228
 
221
- def self.create_instance(group_id, inst_name, memory_gigs, original, original_xml, disk_path, create_cow, ssh_public_key)
229
+ def self.create_instance(group_id, inst_name, memory_gigs, original, original_xml, disk_path, create_cow, ssh_public_key, sudo)
222
230
 
223
231
  puts "Creating instance: #{inst_name}"
224
232
  instance_memory = (KIB_PER_GIG * memory_gigs.to_f).to_i
@@ -240,7 +248,7 @@ fi
240
248
 
241
249
  if [ -n "#{create_cow}" ]; then
242
250
 
243
- virt-clone --connect="$VIRSH_DEFAULT_CONNECT_URI" \
251
+ #{sudo} virt-clone --connect="$VIRSH_DEFAULT_CONNECT_URI" \
244
252
  --name '#{domain_name}' \
245
253
  --file '#{disk_path}' \
246
254
  --force \
@@ -248,11 +256,11 @@ if [ -n "#{create_cow}" ]; then
248
256
  --preserve-data \
249
257
  || { echo "failed to virt-clone"; exit 1; }
250
258
 
251
- qemu-img create -f qcow2 -o backing_file=#{original_disk_path} "#{disk_path}"
259
+ #{sudo} qemu-img create -f qcow2 -o backing_file=#{original_disk_path} "#{disk_path}"
252
260
 
253
261
  else
254
262
 
255
- virt-clone --connect="$VIRSH_DEFAULT_CONNECT_URI" \
263
+ #{sudo} virt-clone --connect="$VIRSH_DEFAULT_CONNECT_URI" \
256
264
  --name '#{domain_name}' \
257
265
  --file '#{disk_path}' \
258
266
  --force \
@@ -261,10 +269,10 @@ else
261
269
 
262
270
  fi
263
271
 
264
- LV_ROOT=$(virt-filesystems -a #{disk_path} --logical-volumes | grep root)
272
+ LV_ROOT=$(#{sudo} virt-filesystems -a #{disk_path} --logical-volumes | grep root)
265
273
  # If using LVM we inject the ssh key this way
266
274
  if [ -n "$LV_ROOT" ]; then
267
- guestfish --selinux add #{disk_path} : \
275
+ #{sudo} guestfish --selinux add #{disk_path} : \
268
276
  run : \
269
277
  mount $LV_ROOT / : \
270
278
  sh "/bin/mkdir -p /root/.ssh" : \
@@ -272,9 +280,9 @@ if [ -n "$LV_ROOT" ]; then
272
280
  sh "/bin/chmod -R 700 /root/.ssh"
273
281
  fi
274
282
 
275
- virsh setmaxmem #{domain_name} #{instance_memory}
276
- virsh start #{domain_name}
277
- virsh setmem #{domain_name} #{instance_memory}
283
+ #{sudo} virsh setmaxmem #{domain_name} #{instance_memory}
284
+ #{sudo} virsh start #{domain_name}
285
+ #{sudo} virsh setmem #{domain_name} #{instance_memory}
278
286
 
279
287
  }
280
288
  retval=$?
@@ -285,7 +293,7 @@ virsh setmem #{domain_name} #{instance_memory}
285
293
 
286
294
  # lookup server IP here...
287
295
  mac_addr = nil
288
- dom_xml = %x{virsh --connect=qemu:///system dumpxml #{domain_name}}
296
+ dom_xml = %x{#{sudo} virsh --connect=qemu:///system dumpxml #{domain_name}}
289
297
  dom = REXML::Document.new(dom_xml)
290
298
  REXML::XPath.each(dom, "//interface/mac") do |interface_xml|
291
299
  mac_addr = interface_xml.attributes['address']
@@ -306,21 +314,21 @@ virsh setmem #{domain_name} #{instance_memory}
306
314
 
307
315
  end
308
316
 
309
- def self.cleanup_instances(group_id, inst_name, disk_path)
317
+ def self.cleanup_instances(group_id, inst_name, disk_path, sudo)
310
318
  domain_name="#{group_id}_#{inst_name}"
311
319
  out = %x{
312
320
  if [ -n "$DEBUG" ]; then
313
321
  set -x
314
322
  fi
315
323
  export VIRSH_DEFAULT_CONNECT_URI="qemu:///system"
316
- if virsh dumpxml #{domain_name} &> /dev/null; then
317
- virsh destroy "#{domain_name}" &> /dev/null
318
- virsh undefine "#{domain_name}"
324
+ if #{sudo} virsh dumpxml #{domain_name} &> /dev/null; then
325
+ #{sudo} virsh destroy "#{domain_name}" &> /dev/null
326
+ #{sudo} virsh undefine "#{domain_name}"
319
327
  fi
320
328
  # If we used --preserve-data there will be no volume... ignore it
321
- virsh vol-delete --pool default "#{group_id}_#{inst_name}.img" &> /dev/null
329
+ #{sudo} virsh vol-delete --pool default "#{group_id}_#{inst_name}.img" &> /dev/null
322
330
  if [ -f "#{disk_path}" ]; then
323
- rm -f "#{disk_path}"
331
+ #{sudo} rm -f "#{disk_path}"
324
332
  fi
325
333
  }
326
334
  puts out
@@ -1,7 +1,3 @@
1
- require 'kytoon/providers/cloud_servers_vpc'
2
- require 'kytoon/providers/libvirt'
3
- require 'kytoon/providers/xenserver'
4
-
5
1
  class ServerGroup
6
2
 
7
3
  @@group_class = nil
@@ -12,10 +8,13 @@ class ServerGroup
12
8
  configs = Util.load_configs
13
9
  group_type = ENV['GROUP_TYPE'] || configs['group_type']
14
10
  if group_type == "cloud_server_vpc" then
11
+ require 'kytoon/providers/cloud_servers_vpc'
15
12
  @@group_class = Kytoon::Providers::CloudServersVPC::ServerGroup
16
13
  elsif group_type == "xenserver" then
14
+ require 'kytoon/providers/xenserver'
17
15
  @@group_class = Kytoon::Providers::Xenserver::ServerGroup
18
16
  elsif group_type == "libvirt" then
17
+ require 'kytoon/providers/libvirt'
19
18
  @@group_class = Kytoon::Providers::Libvirt::ServerGroup
20
19
  else
21
20
  raise "Invalid 'group_type' specified in config file."
data/lib/kytoon/util.rb CHANGED
@@ -30,9 +30,6 @@ module Util
30
30
 
31
31
  if File.exists?(config_file) then
32
32
  configs=YAML.load_file(config_file)
33
- raise_if_nil_or_empty(configs, "cloud_servers_vpc_url")
34
- raise_if_nil_or_empty(configs, "cloud_servers_vpc_username")
35
- raise_if_nil_or_empty(configs, "cloud_servers_vpc_password")
36
33
  @@configs=configs
37
34
  else
38
35
  raise "Failed to load kytoon config file. Please configure /etc/kytoon.conf or create a .kytoon.conf config file in your HOME directory."
@@ -57,9 +54,10 @@ module Util
57
54
 
58
55
  end
59
56
 
60
- def self.raise_if_nil_or_empty(options, key)
61
- if not options or options[key].nil? or options[key].empty? then
62
- raise "Please specify a valid #{key.to_s} parameter."
57
+ def self.check_config_param(key)
58
+ configs = load_configs
59
+ if not configs or configs[key].nil? or configs[key].empty? then
60
+ raise "Please specify '#{key.to_s}' in your kytoon config file."
63
61
  end
64
62
  end
65
63
 
data/test/client_test.rb CHANGED
@@ -1,5 +1,6 @@
1
1
  $:.unshift File.dirname(__FILE__)
2
2
  require 'test_helper'
3
+ require 'kytoon/providers/cloud_servers_vpc'
3
4
 
4
5
  module Kytoon
5
6
  module Providers
@@ -2,6 +2,7 @@ $:.unshift File.dirname(__FILE__)
2
2
  require 'test_helper'
3
3
 
4
4
  require 'tempfile'
5
+ require 'kytoon/providers/cloud_servers_vpc'
5
6
 
6
7
  module Kytoon
7
8
  module Vpn
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: kytoon
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.1.0
4
+ version: 1.1.1
5
5
  prerelease:
6
6
  platform: ruby
7
7
  authors:
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2012-08-22 00:00:00.000000000 Z
12
+ date: 2012-08-24 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: rdoc
@@ -235,7 +235,7 @@ required_ruby_version: !ruby/object:Gem::Requirement
235
235
  version: '0'
236
236
  segments:
237
237
  - 0
238
- hash: -990140921897305519
238
+ hash: -1959876688659758005
239
239
  required_rubygems_version: !ruby/object:Gem::Requirement
240
240
  none: false
241
241
  requirements: