kytoon 1.3.1 → 1.3.2

Sign up to get free protection for your applications and to get access to all the features.
data/CHANGELOG CHANGED
@@ -1,3 +1,8 @@
1
+ * Mon Feb 11 2013 Dan Prince <dprince@redhat.com> - 1.3.2
2
+ -Add 'selinux_enabled' option for libvirt provider. If set this
3
+ option will set the SELinux context on /root/.ssh/authorized_keys
4
+ in the instance thus making it ssh'able when SELinux is enabled.
5
+
1
6
  * Thu Dec 13 2012 Dan Prince <dprince@redhat.com> - 1.3.1
2
7
  -SSH config fixes for libvirt and openstack providers. You can now
3
8
  ssh to localhost from the gateway node. (weird but constistent)
data/README.md CHANGED
@@ -63,6 +63,8 @@ each server group. These files typically live inside of project are
63
63
  provider specific. The config files control things like memory, hostname,
64
64
  flavor, etc. Each group should define identify one instance as the 'gateway' host which marks it as the primary access point for SSH access into the group.
65
65
 
66
+ NOTE: As Kytoon relies on SSH access, the base images being used for server groups must have the SSH daemon installed and enabled, as well as reachable. If there is a firewall on the guest images, it must be configured accordingly.
67
+
66
68
  By default Kytoon looks for config/server_group.json in the current directory.
67
69
  You can override this with Rake using GROUP_CONFIG or bin/kytoon using --group-config.
68
70
 
@@ -94,7 +96,10 @@ For Openstack:
94
96
 
95
97
  For Libvirt (uses libvirt DHCP server for instance IP configuration):
96
98
 
99
+ NOTE: Kytoon is always using the qemu:///system Libvirt connection.
97
100
  NOTE: Kytoon assumes you are using NAT networking for your libvirt instances. If you use bridged networking the IP discovery mechanism will fail.
101
+ NOTE: If the 'create_cow' option is set to 'true', the format for the disk driver must be 'qcow2' in the original guest image, or guest xml (and not 'raw', for instance). The original guest xml might need to be adjusted for this.
102
+ NOTE: You can dump the xml for the original guest image with the 'virsh --connect=qemu:///system dumpxml $DOMAIN' command.
98
103
 
99
104
  ```bash
100
105
 
data/VERSION CHANGED
@@ -1 +1 @@
1
- 1.3.1
1
+ 1.3.2
@@ -5,14 +5,16 @@
5
5
  "hostname": "nova1",
6
6
  "memory": "1",
7
7
  "original_xml": "/home/dprince/f17.xml",
8
- "create_cow": "true"
8
+ "create_cow": "true",
9
+ "selinux_enabled": "false"
9
10
  },
10
11
  {
11
12
  "hostname": "login",
12
13
  "memory": "1",
13
14
  "gateway": "true",
14
15
  "original_xml": "/home/dprince/f17.xml",
15
- "create_cow": "true"
16
+ "create_cow": "true",
17
+ "selinux_enabled": "false"
16
18
  }
17
19
 
18
20
  ]
@@ -76,6 +76,7 @@ class ServerGroup
76
76
  'original' => server_hash['original'],
77
77
  'original_xml' => server_hash['original_xml'],
78
78
  'create_cow' => server_hash['create_cow'],
79
+ 'selinux_enabled' => server_hash['selinux_enabled'],
79
80
  'disk_path' => server_hash['disk_path'],
80
81
  'ip_address' => server_hash['ip_address'],
81
82
  'gateway' => server_hash['gateway'] || "false"
@@ -121,7 +122,7 @@ class ServerGroup
121
122
  'servers' => []
122
123
  }
123
124
  @servers.each do |server|
124
- sg_hash['servers'] << {'hostname' => server['hostname'], 'memory' => server['memory'], 'gateway' => server['gateway'], 'original' => server['original'], 'original_xml' => server['original_xml'], 'create_cow' => server['create_cow'], 'disk_path' => server['disk_path'], 'ip_address' => server['ip_address']}
125
+ sg_hash['servers'] << {'hostname' => server['hostname'], 'memory' => server['memory'], 'gateway' => server['gateway'], 'original' => server['original'], 'original_xml' => server['original_xml'], 'create_cow' => server['create_cow'], 'disk_path' => server['disk_path'], 'selinux_enabled' => server['selinux_enabled'], 'ip_address' => server['ip_address']}
125
126
  end
126
127
 
127
128
  FileUtils.mkdir_p(@@data_dir)
@@ -165,7 +166,7 @@ class ServerGroup
165
166
  disk_path=File.join(image_dir, "#{sg.id}_#{server['hostname']}.img")
166
167
  server['disk_path'] = disk_path
167
168
 
168
- instance_ip = create_instance(sg.id, server['hostname'], server['memory'], server['original'], server['original_xml'], disk_path, server['create_cow'], ssh_public_key, sudo)
169
+ instance_ip = create_instance(sg.id, server['hostname'], server['memory'], server['original'], server['original_xml'], disk_path, server['create_cow'], server['selinux_enabled'], ssh_public_key, sudo)
169
170
  server['ip_address'] = instance_ip
170
171
  hosts_file_data += "#{instance_ip}\t#{server['hostname']}\n"
171
172
  sg.cache_to_disk
@@ -294,7 +295,9 @@ fi
294
295
  raise KytoonException, "Unable to find disk path for instance."
295
296
  end
296
297
 
297
- def self.create_instance(group_id, inst_name, memory_gigs, original, original_xml, disk_path, create_cow, ssh_public_key, sudo)
298
+ def self.create_instance(group_id, inst_name, memory_gigs, original, original_xml, disk_path, create_cow, selinux_enabled, ssh_public_key, sudo)
299
+
300
+ selinux_enabled = selinux_enabled =~ /(true|t|yes|y|1)$/i ? "true" : ""
298
301
 
299
302
  puts "Creating instance: #{inst_name}"
300
303
  instance_memory = (KIB_PER_GIG * memory_gigs.to_f).to_i
@@ -305,7 +308,6 @@ fi
305
308
  if [ -n "$DEBUG" ]; then
306
309
  set -x
307
310
  fi
308
- export VIRSH_DEFAULT_CONNECT_URI="qemu:///system"
309
311
  if [ -n "#{original_xml}" ]; then
310
312
  ORIGIN="--original-xml #{original_xml}"
311
313
  elif [ -n "#{original}" ]; then
@@ -316,7 +318,7 @@ fi
316
318
 
317
319
  if [ -n "#{create_cow}" ]; then
318
320
 
319
- #{sudo} virt-clone --connect="$VIRSH_DEFAULT_CONNECT_URI" \
321
+ #{sudo} virt-clone --connect=qemu:///system \
320
322
  --name '#{domain_name}' \
321
323
  --file '#{disk_path}' \
322
324
  --force \
@@ -328,7 +330,7 @@ if [ -n "#{create_cow}" ]; then
328
330
 
329
331
  else
330
332
 
331
- #{sudo} virt-clone --connect="$VIRSH_DEFAULT_CONNECT_URI" \
333
+ #{sudo} virt-clone --connect=qemu:///system \
332
334
  --name '#{domain_name}' \
333
335
  --file '#{disk_path}' \
334
336
  --force \
@@ -340,17 +342,28 @@ fi
340
342
  LV_ROOT=$(#{sudo} virt-filesystems -a #{disk_path} --logical-volumes | grep root)
341
343
  # If using LVM we inject the ssh key this way
342
344
  if [ -n "$LV_ROOT" ]; then
343
- #{sudo} guestfish --selinux add #{disk_path} : \
344
- run : \
345
- mount $LV_ROOT / : \
346
- sh "/bin/mkdir -p /root/.ssh" : \
347
- write-append /root/.ssh/authorized_keys "#{ssh_public_key}\n" : \
348
- sh "/bin/chmod -R 700 /root/.ssh"
345
+ if [ -n "#{selinux_enabled}" ]; then
346
+ #{sudo} guestfish --selinux add #{disk_path} : \
347
+ run : \
348
+ mount $LV_ROOT / : \
349
+ sh "/bin/mkdir -p /root/.ssh" : \
350
+ write-append /root/.ssh/authorized_keys "#{ssh_public_key}\n" : \
351
+ sh "/bin/chmod -R 700 /root/.ssh" : \
352
+ sh "load_policy -i" : \
353
+ sh "chcon system_u:object_r:ssh_home_t /root/.ssh/authorized_keys"
354
+ else
355
+ #{sudo} guestfish add #{disk_path} : \
356
+ run : \
357
+ mount $LV_ROOT / : \
358
+ sh "/bin/mkdir -p /root/.ssh" : \
359
+ write-append /root/.ssh/authorized_keys "#{ssh_public_key}\n" : \
360
+ sh "/bin/chmod -R 700 /root/.ssh"
361
+ fi
349
362
  fi
350
363
 
351
- #{sudo} virsh setmaxmem #{domain_name} #{instance_memory}
352
- #{sudo} virsh start #{domain_name}
353
- #{sudo} virsh setmem #{domain_name} #{instance_memory}
364
+ #{sudo} virsh --connect=qemu:///system setmaxmem #{domain_name} #{instance_memory}
365
+ #{sudo} virsh --connect=qemu:///system start #{domain_name}
366
+ #{sudo} virsh --connect=qemu:///system setmem #{domain_name} #{instance_memory}
354
367
 
355
368
  }
356
369
  retval=$?
@@ -393,13 +406,12 @@ fi
393
406
  if [ -n "$DEBUG" ]; then
394
407
  set -x
395
408
  fi
396
- export VIRSH_DEFAULT_CONNECT_URI="qemu:///system"
397
- if #{sudo} virsh dumpxml #{domain_name} &> /dev/null; then
398
- #{sudo} virsh destroy "#{domain_name}" &> /dev/null
399
- #{sudo} virsh undefine "#{domain_name}"
409
+ if #{sudo} virsh --connect=qemu:///system dumpxml #{domain_name} &> /dev/null; then
410
+ #{sudo} virsh --connect=qemu:///system destroy "#{domain_name}" &> /dev/null
411
+ #{sudo} virsh --connect=qemu:///system undefine "#{domain_name}"
400
412
  fi
401
413
  # If we used --preserve-data there will be no volume... ignore it
402
- #{sudo} virsh vol-delete --pool default "#{group_id}_#{inst_name}.img" &> /dev/null
414
+ #{sudo} virsh --connect=qemu:///system vol-delete --pool default "#{group_id}_#{inst_name}.img" &> /dev/null
403
415
  if [ -f "#{disk_path}" ]; then
404
416
  #{sudo} rm -f "#{disk_path}"
405
417
  fi
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: kytoon
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.3.1
4
+ version: 1.3.2
5
5
  prerelease:
6
6
  platform: ruby
7
7
  authors:
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2012-12-14 00:00:00.000000000 Z
12
+ date: 2013-02-11 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: rdoc
@@ -289,7 +289,7 @@ required_ruby_version: !ruby/object:Gem::Requirement
289
289
  version: '0'
290
290
  segments:
291
291
  - 0
292
- hash: -598378511131357418
292
+ hash: -2314081733493131516
293
293
  required_rubygems_version: !ruby/object:Gem::Requirement
294
294
  none: false
295
295
  requirements: