libvirtinator 0.0.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,44 @@
1
+ namespace :libvirtinator do
2
+ # TODO: fix needing to define a stage before being able to install
3
+ desc 'Write example config files'
4
+ task :install do
5
+ run_locally do
6
+ execute "mkdir", "-p", "config/deploy", "templates/libvirtinator"
7
+ {
8
+ 'examples/Capfile' => 'Capfile_example',
9
+ 'examples/config/deploy.rb' => 'config/deploy_example.rb',
10
+ 'examples/config/sysadmins_keys.rb' => 'config/sysadmins_keys_example.rb',
11
+ 'examples/config/deploy/vm_name.rb' => 'config/deploy/vm_name_example.rb',
12
+ 'examples/first_boot.sh.erb' => 'templates/libvirtinator/first_boot_example.sh.erb',
13
+ 'examples/fstab.erb' => 'templates/libvirtinator/fstab_example.erb',
14
+ 'examples/hostname.erb' => 'templates/libvirtinator/hostname_example.erb',
15
+ 'examples/hosts.erb' => 'templates/libvirtinator/hosts_example.erb',
16
+ 'examples/interfaces.erb' => 'templates/libvirtinator/interfaces_example.erb',
17
+ 'examples/server.xml.erb' => 'templates/libvirtinator/server_example.xml.erb',
18
+ 'examples/sudoers-sudo.erb' => 'templates/libvirtinator/sudoers-sudo_example.erb',
19
+ 'examples/vmbuilder-init.sh.erb' => 'templates/libvirtinator/vmbuilder-init_example.sh.erb',
20
+ 'examples/vmbuilder.cfg.erb' => 'templates/libvirtinator/vmbuilder_example.cfg.erb',
21
+ }.each do |source, destination|
22
+ config = File.read(File.dirname(__FILE__) + "/#{source}")
23
+ File.open("./#{destination}", 'w') { |f| f.write(config) }
24
+ info "Wrote '#{destination}'"
25
+ end
26
+ info "Now remove the '_example' portion of their names or diff with existing files and add the needed lines."
27
+ end
28
+ end
29
+
30
+ desc 'Write an example VM config file'
31
+ task :install_vm do
32
+ run_locally do
33
+ execute "mkdir", "-p", "config/deploy"
34
+ {
35
+ 'examples/config/deploy/vm_name.rb' => 'config/deploy/vm_name_example.rb',
36
+ }.each do |source, destination|
37
+ config = File.read(File.dirname(__FILE__) + "/#{source}")
38
+ File.open("./#{destination}", 'w') { |f| f.write(config) }
39
+ info "Wrote '#{destination}'"
40
+ end
41
+ info "Now remove the '_example' portion of the name or diff with existing files and add the needed lines."
42
+ end
43
+ end
44
+ end
@@ -0,0 +1,3 @@
1
+ # Load DSL and Setup Up Stages
2
+ require 'capistrano/setup'
3
+ require 'libvirtinator'
@@ -0,0 +1,28 @@
1
+ set :host_machine_name, "my-host"
2
+ set :user, -> { ENV['USER'] } # SSH user
3
+
4
+ role :app, "#{fetch(:user)}@#{fetch(:host_machine_name)}.example.com"
5
+
6
+ set :base_image, "ubuntu-14.04-v0.0.0-docker1.3.1.qcow2"
7
+ set :node_name, -> { fetch(:stage) }
8
+
9
+ set :data_disk_enabled, true
10
+ set :data_disk_gb, "50"
11
+ set :data_disk_type, "lv" # "lv" or "qemu"
12
+ set :data_disk_mount_point, "/var/www" # inside the vm
13
+ set :data_disk_partition, "0" # set to "0" for none (normal),
14
+ # set to <partition number> for legacy logical volumes w/ partitions
15
+ set :memory_gb, "2"
16
+ set :cpus, "4"
17
+
18
+ set :ip, "123.123.123.123"
19
+ set :cidr, "123_123_123_123-27"
20
+
21
+ set :node_fqdn, -> { "#{fetch(:node_name)}.example.com" }
22
+ set :app_fqdn, "my-app.example.com"
23
+ set :hostname, "my-app"
24
+
25
+ set :usergroups, ["sysadmins"]
26
+ fetch(:usergroups).each do |usergroup|
27
+ require "./config/#{usergroup}_keys.rb"
28
+ end
@@ -0,0 +1,65 @@
1
+ # config valid only for Capistrano 3.1
2
+ lock '3.2.1'
3
+
4
+ set :log_level, :info
5
+ #set :log_level, :debug
6
+
7
+ # Setup the host machiness you will use
8
+ set "my_host_data_disk_vg_path", "/dev/ubuntu-vg"
9
+ set "my_host_dns_nameservers", "8.8.8.8 8.8.4.4"
10
+ set "my_host_bridge", "br0"
11
+ set "my_host_root_partitions_path", "/RootPartitions"
12
+
13
+ set "my_other_host_data_disk_vg_path", "/dev/ubuntu-vg"
14
+ set "my_other_host_dns_nameservers", "8.8.8.8 8.8.4.4"
15
+ set "my_other_host_bridge", "br0"
16
+ set "my_other_host_root_partitions_path", "/RootPartitions"
17
+
18
+ # Setup the CIDRs you will use
19
+ set "123_123_123_123-27_network", "123.123.123.0"
20
+ set "123_123_123_123-27_gateway", "123.123.123.1"
21
+ set "123_123_123_123-27_broadcast", "123.123.123.31"
22
+ set "123_123_123_123-27_netmask", "255.255.255.224"
23
+
24
+ set "231_231_231_231-27_network", "231.231.231.0"
25
+ set "231_231_231_231-27_gateway", "231.231.231.1"
26
+ set "231_231_231_231-27_broadcast", "231.231.231.31"
27
+ set "231_231_231_231-27_netmask", "255.255.255.224"
28
+
29
+ # Global dns-search. Can be overridden by setting the same
30
+ # in a VM's settings file, (a config/deploy/<stage>.rb file.)
31
+ set :dns_search, "example.com example2.com"
32
+
33
+ # Setup vmbuilder for building a base image
34
+ set :release_name, "ubuntu-14.04-v0.0.1-docker1.3.1"
35
+ set :vmbuilder_run_command, -> {
36
+ [ "kvm", "ubuntu",
37
+ "-o",
38
+ "--debug",
39
+ "--verbose",
40
+ "--dest=/tmp/#{fetch(:release_name)}",
41
+ "--config=templates/#{fetch(:release_name)}.cfg",
42
+ "--execscript=templates/#{fetch(:release_name)}-init.sh",
43
+ "--firstboot=/tmp/first_boot.sh",
44
+ "--rootsize=15360",
45
+ "--swapsize=2048"
46
+ ].join(' ')
47
+ }
48
+
49
+
50
+ ## Settings that shouldn't need changed:
51
+ set :nbd_run_file, -> { "/var/lock/#{fetch(:node_name)}.nbd" }
52
+ set :nbd_lock_file, -> { "/var/lock/qemu-nbd-#{fetch(:nbd)}" }
53
+ set :dev_nbd, -> { "/dev/#{fetch(:nbd)}" }
54
+ set :dev_nbdp1, -> { "/dev/#{fetch(:nbd)}p1" }
55
+ set :dns_nameservers, -> { fetch("#{fetch(:host_machine_name)}_dns_nameservers") }
56
+ set :bridge, -> { fetch("#{fetch(:host_machine_name)}_bridge") }
57
+ set :root_partitions_path, -> { fetch("#{fetch(:host_machine_name)}_root_partitions_path") }
58
+ set :base_image_path, -> { "#{fetch(:root_partitions_path)}/#{fetch(:base_image)}" }
59
+ set :root_image_path, -> { "#{fetch(:root_partitions_path)}/#{fetch(:node_name)}-root.qcow2" }
60
+ set :mount_point, -> { "#{fetch(:root_partitions_path)}/#{fetch(:node_name)}-root.qcow2_mnt" }
61
+ set :data_disk_vg_path, -> { fetch("#{fetch(:host_machine_name)}_data_disk_vg_path") }
62
+ set :data_disk_lv_name, -> { "#{fetch(:node_name)}-data" }
63
+ set :data_disk_lv_path, -> { "#{fetch(:data_disk_vg_path)}/#{fetch(:data_disk_lv_name)}" }
64
+ set :data_disk_qemu_path, -> { "#{fetch(:root_partitions_path)}/#{fetch(:node_name)}-data.qcow2" }
65
+
@@ -0,0 +1,20 @@
1
+ set :sysadmins, [
2
+ {
3
+ "name" => "username",
4
+ "disabled" => false,
5
+ "groups" => ["sudo", "docker"],
6
+ "ssh_keys" => [
7
+ "ssh-rsa blahblahblah username@computer",
8
+ "ssh-rsa blahblahblahother username@computer2"
9
+ ]
10
+ },
11
+ {
12
+ "name" => "username-other",
13
+ "disabled" => false,
14
+ "groups" => ["sudo", "docker"],
15
+ "ssh_keys" => [
16
+ "ssh-rsa blahblahblah username-other@computer",
17
+ "ssh-rsa blahblahblahother username-other@computer2"
18
+ ]
19
+ }
20
+ ]
@@ -0,0 +1,8 @@
1
+ #!/bin/bash
2
+
3
+ # Delete the ubuntu user's password
4
+ passwd -d ubuntu
5
+
6
+ # Regenerate ssh keys
7
+ rm /etc/ssh/ssh_host*key*
8
+ dpkg-reconfigure -fnoninteractive -pcritical openssh-server
@@ -0,0 +1,8 @@
1
+ # /etc/fstab: static file system information.
2
+ # <file system> <mount point> <type> <options> <dump> <pass>
3
+ proc /proc proc defaults 0 0
4
+ /dev/vda1 / ext4 defaults 0 0
5
+ /dev/vda2 swap swap defaults 0 0
6
+ <% if @data_disk_enabled %>
7
+ /dev/vdb<%= @data_disk_partition unless @data_disk_partition == "0" %> <%= @data_disk_mount_point %> ext4 defaults 0 2
8
+ <% end %>
@@ -0,0 +1 @@
1
+ <%= @hostname %>
@@ -0,0 +1,11 @@
1
+ 127.0.0.1 localhost
2
+ 127.0.1.1 <%= @node_fqdn %> <%= @node_name %>
3
+ 127.0.1.1 <%= @app_fqdn %> <%= @hostname %>
4
+
5
+ # The following lines are desirable for IPv6 capable hosts
6
+ ::1 ip6-localhost ip6-loopback
7
+ fe00::0 ip6-localnet
8
+ ff00::0 ip6-mcastprefix
9
+ ff02::1 ip6-allnodes
10
+ ff02::2 ip6-allrouters
11
+ ff02::3 ip6-allhosts
@@ -0,0 +1,17 @@
1
+ # This file describes the network interfaces available on your system
2
+ # and how to activate them. For more information, see interfaces(5).
3
+
4
+ # The loopback network interface
5
+ auto lo
6
+ iface lo inet loopback
7
+
8
+ # The primary network interface
9
+ auto eth0
10
+ iface eth0 inet static
11
+ network <%= @network %>
12
+ gateway <%= @gateway %>
13
+ address <%= @ip %>
14
+ broadcast <%= @broadcast %>
15
+ netmask <%= @netmask %>
16
+ dns-nameservers <%= @dns_nameservers %>
17
+ dns-search <%= @dns_search %>
@@ -0,0 +1,57 @@
1
+ <domain type='kvm'>
2
+ <name><%= @node_name %></name>
3
+ <memory><%= @memory_gb %></memory>
4
+ <currentMemory><%= @memory_gb %></currentMemory>
5
+ <vcpu><%= @cpus %></vcpu>
6
+ <os>
7
+ <type>hvm</type>
8
+ <type arch='x86_64' machine='pc-0.12'>hvm</type>
9
+ <boot dev='hd'/>
10
+ </os>
11
+ <features>
12
+ <acpi/>
13
+ </features>
14
+ <clock offset='utc'/>
15
+ <on_poweroff>destroy</on_poweroff>
16
+ <on_reboot>restart</on_reboot>
17
+ <on_crash>destroy</on_crash>
18
+ <devices>
19
+ <emulator>/usr/bin/kvm</emulator>
20
+ <disk type='file' device='disk'>
21
+ <driver name='qemu' type='qcow2'/>
22
+ <source file='<%= @root_image_path %>'/>
23
+ <target dev='vda' bus='virtio'/>
24
+ </disk>
25
+ <% if @data_disk_enabled %>
26
+ <% if @data_disk_type == "lv" %>
27
+ <disk type='file' device='disk'>
28
+ <driver name='qemu' type='raw'/>
29
+ <source file='<%= @data_disk_lv_path %>'/>
30
+ <target dev='vdb' bus='virtio'/>
31
+ </disk>
32
+ <% end %>
33
+ <% if @data_disk_type == "qemu" %>
34
+ <disk type='file' device='disk'>
35
+ <driver name='qemu' type='qcow2'/>
36
+ <source file='<%= @data_disk_qemu_path %>'/>
37
+ <target dev='vdb' bus='virtio'/>
38
+ </disk>
39
+ <% end %>
40
+ <% end %>
41
+ <interface type='bridge'>
42
+ <source bridge='<%= @bridge %>'/>
43
+ <model type='virtio'/>
44
+ </interface>
45
+ <input type='mouse' bus='ps2'/>
46
+ <graphics type='vnc' port='-1' autoport='yes' listen='127.0.0.1'/>
47
+ <video>
48
+ <model type='cirrus' vram='9216' heads='1'/>
49
+ </video>
50
+ <serial type='pty'>
51
+ <target port='0'/>
52
+ </serial>
53
+ <console type='pty'>
54
+ <target port='0'/>
55
+ </console>
56
+ </devices>
57
+ </domain>
@@ -0,0 +1,2 @@
1
+ %sudo ALL=(ALL) ALL
2
+ # %sudo ALL=(ALL) NOPASSWD: ALL
@@ -0,0 +1,9 @@
1
+ #!/bin/bash
2
+
3
+ chroot $1 apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
4
+ chroot $1 sh -c "echo deb https://get.docker.com/ubuntu docker main > /etc/apt/sources.list.d/docker.list"
5
+ chroot $1 apt-get purge -y grub
6
+ chroot $1 apt-get update -qq
7
+ chroot $1 apt-get install -y lxc-docker grub-pc
8
+ chroot $1 sed -i'' -e"s/GRUB_CMDLINE_LINUX=.*/GRUB_CMDLINE_LINUX=\"cgroup_enable=memory swapaccount=1\"/" /etc/default/grub
9
+ chroot $1 update-grub
@@ -0,0 +1,16 @@
1
+ [DEFAULT]
2
+ arch = amd64
3
+ tmpfs = -
4
+ user = ubuntu
5
+ name = ubuntu
6
+ execscript = /tmp/init.sh
7
+ firstboot = /tmp/first_boot.sh
8
+
9
+ [ubuntu]
10
+ mirror = http://archive.ubuntu.com/ubuntu
11
+ flavour = virtual
12
+ suite = trusty
13
+ domain = example.com
14
+ components = main,restricted,universe,multiverse
15
+ removepkg = grub
16
+ addpkg = openssh-server, unattended-upgrades, acpid, aptitude, language-pack-en-base, bwm-ng, htop, iotop, bzip2, file, unzip, zip, rar, unrar, nmap, man-db, curl, tmux, git-core, vim, locate, ipcalc, sl, nagios-plugins-basic, wget, rsync, tcpdump, mtr, apparmor, cgroup-lite, apt-transport-https, linux-image-generic, grub-pc
@@ -0,0 +1,185 @@
1
+ namespace :image do
2
+ desc "Build a base qcow2 image."
3
+ task :build_base do
4
+ on roles(:app) do
5
+ as :root do
6
+ ["first_boot.sh", "vmbuilder-init.sh", "vmbuilder.cfg"].each do |file|
7
+ template = File.new(File.expand_path("templates/#{file}.erb")).read
8
+ generated_config_file = ERB.new(template).result(binding)
9
+ upload! StringIO.new(generated_config_file), "/tmp/#{file}"
10
+ execute("chown", "-R", "root:root", "/tmp/#{file}")
11
+ execute("chmod", "770", "/tmp/#{file}")
12
+ end
13
+ # rootsize & swapsize settings do not get picked up in cfg file, so set here
14
+ if test "vmbuilder", fetch(:vmbuilder_run_command)
15
+ execute "mv /tmp/#{fetch(:release_name)}/*.qcow2 /tmp/#{fetch(:release_name)}/#{fetch(:release_name)}.qcow2"
16
+ info("Build finished successfully!")
17
+ info("You probably want to run 'cp /tmp/#{fetch(:release_name)}/#{fetch(:release_name)}.qcow2 <root partitions path>'.")
18
+ info("If you ran this on a Ubuntu 14.04 or later host, you'll probabaly want to make the image compatible " +
19
+ "with older versions of qemu using a command like this: 'sudo qemu-img amend -f qcow2 -o compat=0.10 #{fetch(:release_name)}.qcow2'.")
20
+ end
21
+ execute "rm", "/tmp/first_boot.sh", "-f"
22
+ end
23
+ end
24
+ end
25
+
26
+ #desc "Mount qcow2 image by creating a run file holding the nbd needed."
27
+ task :mount do
28
+ on roles(:app) do
29
+ as :root do
30
+ if test "[", "-f", fetch(:nbd_run_file), "]"
31
+ unless test "[", "-f", fetch(:nbd_lock_file), "]"
32
+ unless test "mountpoint", "-q", fetch(:mount_point)
33
+ info "Removing leftover run file"
34
+ execute "rm", fetch(:nbd_run_file), "-f"
35
+ unless test "[", "-f", fetch(:nbd_run_file), "]"
36
+ Rake::Task['image:mount'].reenable
37
+ return Rake::Task['image:mount'].invoke
38
+ end
39
+ end
40
+ end
41
+ raise "nbd run file already exists. is it already connected?"
42
+ end
43
+ info "Mounting #{fetch(:root_image_path)} on #{fetch(:mount_point)}"
44
+ set :nbd_connected, false
45
+ until fetch(:nbd_connected)
46
+ Rake::Task['image:connect_to_unused_nbd'].reenable
47
+ Rake::Task['image:connect_to_unused_nbd'].invoke
48
+ end
49
+ raise "Mount point #{fetch(:mount_point)} is already mounted" if test "mountpoint", "-q", fetch(:mount_point)
50
+ execute "mkdir", "-p", fetch(:mount_point)
51
+ execute "mount", fetch(:dev_nbdp1), fetch(:mount_point)
52
+ raise "Failed to mount #{fetch(:mount_point)}" unless test "mountpoint", "-q", fetch(:mount_point)
53
+ info "Mounted #{fetch(:root_image_path)} on #{fetch(:mount_point)} using #{fetch(:dev_nbd)}"
54
+ end
55
+ end
56
+ end
57
+
58
+ #desc "Un-mount qcow2 image"
59
+ task :umount do
60
+ on roles(:app) do
61
+ as :root do
62
+ if test "[", "-f", fetch(:nbd_run_file), "]"
63
+ info "found #{fetch(:nbd_run_file)}"
64
+ else
65
+ info "Unable to read #{fetch(:nbd_run_file)}"
66
+ end
67
+ set :nbd, capture("cat", fetch(:nbd_run_file)).chomp
68
+ unless test "mountpoint", "-q", fetch(:mount_point)
69
+ info "#{fetch(:mount_point)} is not mounted"
70
+ end
71
+ info "Unmounting root image #{fetch(:root_image_path)}"
72
+ execute "umount", fetch(:mount_point)
73
+ if test "mountpoint", "-q", fetch(:mount_point)
74
+ info "Failed to umount #{fetch(:mount_point)}"
75
+ end
76
+ Rake::Task['image:disconnect_from_nbd'].invoke
77
+ execute "rm", fetch(:mount_point), "-rf"
78
+ raise "Failed to remove #{fetch(:mount_point)}" if test "[", "-d", fetch(:mount_point), "]"
79
+ end
80
+ end
81
+ end
82
+
83
+ desc "Find the base image for each root qcow2 image."
84
+ task :list_bases do
85
+ on roles(:app) do
86
+ as :root do
87
+ set :files, -> { capture("ls", "#{fetch(:root_partitions_path)}/*.qcow2" ).split }
88
+ if fetch(:files, "").empty?
89
+ fatal "Error: No qcow2 files found in #{fetch(:root_partitions_path)}"
90
+ exit
91
+ end
92
+ fetch(:files).each do |image_file|
93
+ backing_file = ""
94
+ capture("qemu-img info #{image_file}").each_line do |line|
95
+ if line =~ /backing\ file:/
96
+ backing_file = line.split[2]
97
+ end
98
+ end
99
+ unless backing_file.empty?
100
+ info "#{backing_file} < #{image_file}"
101
+ else
102
+ info "No backing file found for #{image_file}"
103
+ end
104
+ end
105
+ end
106
+ end
107
+ end
108
+
109
+ task :connect_to_unused_nbd do
110
+ on roles(:app) do
111
+ as :root do
112
+ set :prelock, -> { "#{fetch(:nbd_lock_file)}.prelock" }
113
+ begin
114
+ raise "Error: #{fetch(:root_image_path)} not found!" unless test "[", "-f", fetch(:root_image_path), "]"
115
+ set :nbd, "nbd#{rand(16)}"
116
+ info "Randomly trying the #{fetch(:nbd)} network block device"
117
+ if test "[", "-f", fetch(:prelock), "]"
118
+ info "Another process is checking #{fetch(:nbd)}. Trying again..."
119
+ set :nbd_connected, false
120
+ return
121
+ else
122
+ execute "touch", fetch(:prelock)
123
+ info "Checking for qemu-nbd created lock file"
124
+ if test "[", "-f", fetch(:nbd_lock_file), "]"
125
+ info "#{fetch(:dev_nbd)} lockfile already in place - nbd device may be in use. Trying again..."
126
+ set :nbd_connected, false
127
+ return
128
+ end
129
+ if test "[", "-b", fetch(:dev_nbdp1), "]"
130
+ info "nbd device in use but no lockfile, Trying again..."
131
+ set :nbd_connected, false
132
+ return
133
+ end
134
+ info "Found unused block device"
135
+
136
+ execute "qemu-nbd", "-c", fetch(:dev_nbd), fetch(:root_image_path)
137
+ info "Waiting for block device to come online . . . "
138
+ begin
139
+ Timeout::timeout(20) do
140
+ until test "[", "-b", fetch(:dev_nbdp1), "]"
141
+ sleep 3
142
+ end
143
+ execute "echo", fetch(:nbd), ">", fetch(:nbd_run_file)
144
+ info "device online"
145
+ set :nbd_connected, true
146
+ end
147
+ rescue TimeoutError
148
+ fatal "Error: unable to create block dev #{fetch(:dev_nbd)}, trying again..."
149
+ set :nbd_connected, false
150
+ return
151
+ #raise "unable to create block device #{fetch(:dev_nbd)}"
152
+ end
153
+ end
154
+ ensure
155
+ execute "rm", fetch(:prelock), "-f"
156
+ end
157
+ end
158
+ end
159
+ end
160
+
161
+ task :disconnect_from_nbd do
162
+ on roles(:app) do
163
+ as :root do
164
+ execute "qemu-nbd", "-d", fetch(:dev_nbd)
165
+ info "Waiting for block device to go offline . . . "
166
+ begin
167
+ Timeout::timeout(20) do
168
+ while test "[", "-b", fetch(:dev_nbdp1), "]"
169
+ print ". "
170
+ sleep 3
171
+ end
172
+ info "block device offline"
173
+ end
174
+ rescue TimeoutError
175
+ info "Error: unable to free block dev #{fetch(:dev_nbd)}"
176
+ execute "rm", fetch(:nbd_run_file), "-rf"
177
+ execute "touch", fetch(:nbd_lock_file)
178
+ exit 1
179
+ end
180
+ raise "failed to free #{fetch(:dev_nbd)}" if test "[", "-b", fetch(:dev_nbdp1), "]"
181
+ execute "rm", fetch(:nbd_run_file), "-rf"
182
+ end
183
+ end
184
+ end
185
+ end
@@ -0,0 +1,45 @@
1
+ namespace :lv do
2
+ desc "Remove a logical volume and recreate it."
3
+ task :recreate do
4
+ on roles(:app) do
5
+ as :root do
6
+ if test "[", "-b", fetch(:data_disk_lv_path), "]"
7
+ ask :yes_no, "Are you sure you want to delete and recreate the logical volume #{fetch(:data_disk_lv_path)}?"
8
+ if fetch(:yes_no).chomp.downcase == "yes"
9
+ execute "lvremove", "--force", fetch(:data_disk_lv_path)
10
+ sleep 1
11
+ end
12
+ else
13
+ warn "Error: #{fetch(:data_disk_lv_path)} not found, yet you called lv:recreate!"
14
+ end
15
+ Rake::Task["lv:create"].invoke
16
+ end
17
+ end
18
+ end
19
+
20
+ #desc "Create a logical volume."
21
+ task :create do
22
+ on roles(:app) do
23
+ as :root do
24
+ if test "lvcreate", fetch(:data_disk_vg_path), "-L", "#{fetch(:data_disk_gb)}G", "-n", fetch(:data_disk_lv_name)
25
+ Rake::Task["lv:mkfs"].invoke
26
+ else
27
+ fatal "Error running lvcreate!"
28
+ exit
29
+ end
30
+ end
31
+ end
32
+ end
33
+
34
+ #desc "Create an ext4 filesystem."
35
+ task :mkfs do
36
+ on roles(:app) do
37
+ as :root do
38
+ unless test "[", "-b", fetch(:data_disk_lv_path), "]"
39
+ raise "Tried to create filesystem but path does not exist!"
40
+ end
41
+ execute "mkfs.ext4", "-q", "-m", "0", fetch(:data_disk_lv_path)
42
+ end
43
+ end
44
+ end
45
+ end
@@ -0,0 +1,55 @@
1
+ namespace :users do
2
+
3
+ task :load_settings do
4
+ set :path, ""
5
+ until File.exists?(fetch(:path)) and (! File.directory?(fetch(:path)))
6
+ ask :path, "Which private key has SSH access to the VM? Specifiy an absolute path"
7
+ end
8
+ SSHKit::Backend::Netssh.configure do |ssh|
9
+ ssh.ssh_options = {
10
+ keys: fetch(:path),
11
+ forward_agent: false,
12
+ auth_methods: %w(publickey)
13
+ }
14
+ end
15
+ end
16
+
17
+ desc "Idempotently setup unix admin users using SSH with sudo rights."
18
+ task :setup => :load_settings do
19
+ on "#{fetch(:user)}@#{fetch(:ip)}" do
20
+ as :root do
21
+ fetch(:usergroups).each do |usergroup|
22
+ usergroup = usergroup.to_sym
23
+ next if fetch(usergroup).nil? or fetch(usergroup).empty?
24
+ fetch(usergroup).each do |user|
25
+ key_file = "/home/#{user['name']}/.ssh/authorized_keys"
26
+ if user['disabled']
27
+ if test "id", "-u", user['name']
28
+ execute "bash", "-c", "\"echo", "''", ">", "#{key_file}\""
29
+ execute "passwd", "-d", user['name']
30
+ info "Disabled user #{user['name']}"
31
+ end
32
+ else
33
+ unless test "id", "-u", user['name']
34
+ exit unless test "adduser", "--disabled-password", "--gecos", "\'\'", user['name']
35
+ end
36
+ execute "usermod", "-s", "'/bin/bash'", user['name']
37
+ user['groups'].each do |group|
38
+ execute "usermod", "-a", "-G", group, user['name']
39
+ end
40
+ execute "mkdir", "-p", "/home/#{user['name']}/.ssh"
41
+ execute "chown", "#{user['name']}.", "-R", "/home/#{user['name']}"
42
+ execute "chmod", "700", "/home/#{user['name']}/.ssh"
43
+ content = StringIO.new("#{user['ssh_keys'].join("\n\n")}\n")
44
+ upload! content, "/tmp/temp_authorized_keys"
45
+ execute "mv", "/tmp/temp_authorized_keys", "/home/#{user['name']}/.ssh/authorized_keys"
46
+ execute "chown", "#{user['name']}.", "#{key_file}"
47
+ execute "chmod", "600", "#{key_file}"
48
+ end
49
+ end
50
+ end
51
+ info "Finished setting up users"
52
+ end
53
+ end
54
+ end
55
+ end
@@ -0,0 +1,411 @@
1
+ require 'socket'
2
+ require 'timeout'
3
+ require 'erb'
4
+
5
+ namespace :vm do
6
+ desc "Check the current status of a VM"
7
+ task :status do
8
+ on roles(:app) do
9
+ as :root do
10
+ if test("virsh", "list", "--all", "|", "grep", "-q", "#{fetch(:node_name)}")
11
+ if test("virsh", "list", "|", "grep", "-q", "#{fetch(:node_name)}")
12
+ info "VM #{fetch(:node_name)} exists and is running on #{host}"
13
+ else
14
+ info "VM #{fetch(:node_name)} is defined but not running on #{host}"
15
+ end
16
+ else
17
+ info "VM #{fetch(:node_name)} is undefined on #{host}"
18
+ end
19
+ if system "bash -c \"ping -c 3 -w 5 #{fetch(:ip)} &> /dev/null\""
20
+ begin
21
+ Timeout::timeout(5) do
22
+ (TCPSocket.open(fetch(:ip),22) rescue nil)
23
+ info "The IP #{fetch(:ip)} is responding to ping and SSH"
24
+ end
25
+ rescue TimeoutError
26
+ info "The IP #{fetch(:ip)} is responding to ping but not SSH"
27
+ end
28
+ else
29
+ info "The IP #{fetch(:ip)} is not responding to ping"
30
+ end
31
+ end
32
+ end
33
+ end
34
+
35
+ desc "Start a copy-on-write Virtual Machine from a base image."
36
+ task :start do
37
+ on roles(:app) do
38
+ info "Preparing to start #{fetch(:node_name)}"
39
+ Rake::Task['vm:ensure_nbd_module'].invoke
40
+ Rake::Task['vm:ensure_root_partitions_path'].invoke
41
+ Rake::Task['vm:ensure_vm_not_running'].invoke
42
+ Rake::Task['vm:ensure_ip_no_ping'].invoke
43
+ Rake::Task['vm:ensure_vm_not_defined'].invoke
44
+ Rake::Task['vm:verify_base'].invoke
45
+ Rake::Task['vm:remove_root_image'].invoke
46
+ Rake::Task['vm:create_root_image'].invoke
47
+ begin
48
+ Rake::Task["image:mount"].invoke
49
+ Rake::Task['vm:update_root_image'].invoke
50
+ ensure
51
+ Rake::Task["image:umount"].invoke
52
+ end
53
+ Rake::Task['vm:create_data'].invoke
54
+ Rake::Task['vm:define_domain'].invoke
55
+ Rake::Task['vm:start_domain'].invoke
56
+ Rake::Task['vm:reset_known_hosts_on_host'].invoke
57
+ Rake::Task['vm:setup_agent_forwarding'].invoke
58
+ Rake::Task['vm:wait_for_ping'].invoke
59
+ Rake::Task['vm:wait_for_ssh_alive'].invoke
60
+ Rake::Task['users:setup'].invoke
61
+ info "Say, you don't say? Are we finished?"
62
+ end
63
+ end
64
+
65
+ task :ensure_root_partitions_path do
66
+ on roles(:app) do
67
+ as :root do
68
+ dir = fetch(:root_partitions_path)
69
+ unless test "[", "-d", dir, "]"
70
+ fatal "Error: root partitions path #{dir} is not a directory!" && raise
71
+ end
72
+ end
73
+ end
74
+ end
75
+
76
+ task :ensure_nbd_module do
77
+ on roles(:app) do
78
+ as :root do
79
+ unless test("lsmod | grep -q nbd")
80
+ info 'Running modprobe nbd'
81
+ execute "modprobe", "nbd"
82
+ sleep 0.5
83
+ unless test("lsmod | grep -q nbd")
84
+ fatal "Error: Unable to modprobe nbd!" && raise
85
+ end
86
+ end
87
+ end
88
+ end
89
+ end
90
+
91
+ task :ensure_vm_not_running do
92
+ on roles(:app) do
93
+ as :root do
94
+ if test("virsh", "list", "|", "grep", "-q", "#{fetch(:node_name)}")
95
+ fatal "The VM #{fetch(:node_name)} is already running!" && raise
96
+ end
97
+ end
98
+ end
99
+ end
100
+
101
+ task :ensure_ip_no_ping do
102
+ run_locally do
103
+ info "Attempting to ping #{fetch(:ip)}"
104
+ if system "bash -c \"ping -c 3 -w 5 #{fetch(:ip)} &> /dev/null\""
105
+ fatal "The IP #{fetch(:ip)} is already pingable!"
106
+ raise
107
+ else
108
+ info "No ping returned, continuing"
109
+ end
110
+ end
111
+ end
112
+
113
+ task :ensure_vm_not_defined do
114
+ on roles(:app) do
115
+ as :root do
116
+ if test("virsh", "list", "--all", "|", "grep", "-q", "#{fetch(:node_name)}")
117
+ set :yes_or_no, ""
118
+ until fetch(:yes_or_no).chomp.downcase == "yes" or fetch(:yes_or_no).chomp.downcase == "no"
119
+ ask :yes_or_no, "The VM #{fetch(:node_name)} is defined but not running! Do you want to undefine/redefine it?"
120
+ end
121
+ unless fetch(:yes_or_no).chomp.downcase == "yes"
122
+ exit
123
+ else
124
+ execute "virsh", "undefine", fetch(:node_name)
125
+ end
126
+ end
127
+ end
128
+ end
129
+ end
130
+
131
+ task :verify_base do
132
+ on roles(:app) do
133
+ as :root do
134
+ unless test "[", "-f", fetch(:base_image_path), "]"
135
+ fatal "Error: cannot find the base image #{fetch(:base_image_path)}" && raise
136
+ end
137
+ raise unless test("chown", "libvirt-qemu:kvm", fetch(:base_image_path))
138
+ end
139
+ end
140
+ end
141
+
142
+ task :remove_root_image do
143
+ on roles(:app) do
144
+ as :root do
145
+ # use 'cap <server> create recreate_root=true' to recreate the root image
146
+ if ENV['recreate_root'] == "true"
147
+ if test "[", "-f", root_image_path, "]"
148
+ set :yes_or_no, ""
149
+ until fetch(:yes_or_no).chomp.downcase == "yes" or fetch(:yes_or_no).chomp.downcase == "no"
150
+ ask :yes_or_no, "Are you sure you want to remove the existing #{root_image_path} file?"
151
+ end
152
+ if fetch(:yes_or_no).chomp.downcase == "yes"
153
+ info "Removing old image"
154
+ execute "rm", root_image_path
155
+ end
156
+ end
157
+ end
158
+ end
159
+ end
160
+ end
161
+
162
+ task :create_root_image do
163
+ on roles(:app) do
164
+ as :root do
165
+ unless test "[", "-f", fetch(:root_image_path), "]"
166
+ info "Creating new image"
167
+ execute "qemu-img", "create", "-b", fetch(:base_image_path), "-f", "qcow2", fetch(:root_image_path)
168
+ else
169
+ set :yes_or_no, ""
170
+ until fetch(:yes_or_no).chomp.downcase == "yes" or fetch(:yes_or_no).chomp.downcase == "no"
171
+ ask :yes_or_no, "#{fetch(:root_image_path)} already exists, do you want to continue to update it's configuration?"
172
+ end
173
+ if fetch(:yes_or_no).chomp.downcase == "yes"
174
+ info "Updating file on an existing image."
175
+ else
176
+ exit
177
+ end
178
+ end
179
+ end
180
+ end
181
+ end
182
+
183
+ task :update_root_image do
184
+ on roles(:app) do
185
+ as :root do
186
+ mount_point = fetch(:mount_point)
187
+ raise if mount_point.nil?
188
+ set :logs_path, -> { fetch(:internal_logs_path) }
189
+ @internal_logs_path = fetch(:logs_path)
190
+ @node_name = fetch(:node_name)
191
+ @node_fqdn = fetch(:node_fqdn)
192
+ @app_fqdn = fetch(:app_fqdn)
193
+ @hostname = fetch(:hostname)
194
+ @data_disk_enabled = fetch(:data_disk_enabled)
195
+ @data_disk_partition = fetch(:data_disk_partition)
196
+ @data_disk_mount_point = fetch(:data_disk_mount_point)
197
+ @network = fetch("#{fetch(:cidr)}_network")
198
+ @gateway = fetch("#{fetch(:cidr)}_gateway")
199
+ @ip = fetch(:ip)
200
+ @broadcast = fetch("#{fetch(:cidr)}_broadcast")
201
+ @netmask = fetch("#{fetch(:cidr)}_netmask")
202
+ @dns_nameservers = fetch(:dns_nameservers)
203
+ @dns_search = fetch(:dns_search)
204
+ {
205
+ "sudoers-sudo" => "#{mount_point}/etc/sudoers.d/sudo",
206
+ "hosts" => "#{mount_point}/etc/hosts",
207
+ "hostname" => "#{mount_point}/etc/hostname",
208
+ "fstab" => "#{mount_point}/etc/fstab",
209
+ "interfaces" => "#{mount_point}/etc/network/interfaces",
210
+ }.each do |file, path|
211
+ template = File.new(File.expand_path("./templates/libvirtinator/#{file}.erb")).read
212
+ generated_config_file = ERB.new(template).result(binding)
213
+ upload! StringIO.new(generated_config_file), "/tmp/#{file}.file"
214
+ execute("mv", "/tmp/#{file}.file", path)
215
+ execute("chown", "root:root", path)
216
+ end
217
+ execute "sed", "-i\"\"", "\"/PermitRootLogin/c\\PermitRootLogin no\"",
218
+ "#{mount_point}/etc/ssh/sshd_config"
219
+ execute "chmod", "440", "#{mount_point}/etc/sudoers.d/*"
220
+ execute "echo", "-e", "\"\n#includedir /etc/sudoers.d\n\"", ">>",
221
+ "#{mount_point}/etc/sudoers"
222
+ user = fetch(:user)
223
+ begin
224
+ execute "bash", "-c", "\"for", "m", "in", "'sys'", "'dev'", "'proc';",
225
+ "do", "mount", "/$m", "#{mount_point}/$m", "-o", "bind;", "done\""
226
+ execute "chroot", mount_point, "/bin/bash", "-c",
227
+ "\"if", "!", "id", user, "&>", "/dev/null;", "then",
228
+ "useradd", "--user-group", "--shell",
229
+ "/bin/bash", "--create-home", "#{user};", "fi\""
230
+ execute "chroot", mount_point, "/bin/bash", "-c",
231
+ "\"usermod", "-a", "-G", "sudo", "#{user}\""
232
+ execute "mkdir", "-p", "#{mount_point}/home/#{user}/.ssh"
233
+ execute "chroot", mount_point, "/bin/bash", "-c",
234
+ "\"chown", "#{user}:#{user}", "/home/#{user}", "/home/#{user}/.ssh\""
235
+ execute "chmod", "700", "#{mount_point}/home/#{user}/.ssh"
236
+ set :path, ""
237
+ until File.exists? fetch(:path) and ! File.directory? fetch(:path)
238
+ ask :path, "Which public key should we install in #{user}'s authorized_keys file? Specifiy an absolute path"
239
+ end
240
+ upload! File.open(fetch(:path)), "/tmp/pubkeys"
241
+ execute "mv", "/tmp/pubkeys", "#{mount_point}/home/#{user}/.ssh/authorized_keys"
242
+ execute "chroot", mount_point, "/bin/bash", "-c",
243
+ "\"chown", "#{user}:#{user}", "/home/#{user}/.ssh/authorized_keys\""
244
+ execute "chmod", "600", "#{mount_point}/home/#{user}/.ssh/authorized_keys"
245
+ execute "mkdir", "-p", "#{mount_point}#{fetch(:data_disk_mount_point)}" if fetch(:data_disk_enabled)
246
+ ensure
247
+ execute "bash", "-c", "\"for", "m", "in", "'sys'", "'dev'", "'proc';",
248
+ "do", "umount", "#{mount_point}/$m;", "done\""
249
+ end
250
+ end
251
+ end
252
+ end
253
+
254
+ task :create_data do
255
+ on roles(:app) do
256
+ as 'root' do
257
+ unless fetch(:data_disk_enabled)
258
+ info "Not using a separate data disk."
259
+ break
260
+ end
261
+ if fetch(:data_disk_type) == "qemu"
262
+ if ! test("[", "-f", fetch(:data_disk_qemu_path), "]") or ENV['recreate_data'] == "true"
263
+ execute "guestfish", "--new", "disk:#{fetch(:data_disk_gb)}G << _EOF_
264
+ mkfs ext4 /dev/vda
265
+ _EOF_"
266
+ execute "qemu-img", "convert", "-O", "qcow2", "test1.img", "test1.qcow2"
267
+ execute "rm", "test1.img"
268
+ execute "mv", "test1.qcow2", fetch(:data_disk_qemu_path)
269
+ end
270
+ elsif fetch(:data_disk_type) == "lv"
271
+ if ENV['recreate_data'] == "true"
272
+ if test "[", "-b", fetch(:data_disk_lv_path), "]"
273
+ Rake::Task['lv:recreate'].invoke
274
+ else
275
+ Rake::Task['lv:create'].invoke
276
+ end
277
+ else
278
+ if test "[", "-b", fetch(:data_disk_lv_path), "]"
279
+ info "Found and using existing logical volume #{fetch(:data_disk_lv_path)}"
280
+ else
281
+ Rake::Task['lv:create'].invoke
282
+ end
283
+ end
284
+ else
285
+ fatal "No recognized disk type (lv, qemu), yet size is greater than zero!"
286
+ fatal "Fixed this by adding a recognized disk type (lv, qemu) to your config."
287
+ raise
288
+ end
289
+ end
290
+ end
291
+ end
292
+
293
+ task :define_domain do
294
+ on roles(:app) do
295
+ as 'root' do
296
+ # instance variables needed for ERB
297
+ @node_name = fetch(:node_name)
298
+ @memory_gb = fetch(:memory_gb).to_i * 1024 * 1024
299
+ @cpus = fetch(:cpus)
300
+ @root_image_path = fetch(:root_image_path)
301
+ @data_disk_enabled = fetch(:data_disk_enabled)
302
+ @data_disk_type = fetch(:data_disk_type)
303
+ @data_disk_lv_path = fetch(:data_disk_lv_path)
304
+ @data_disk_qemu_path = fetch(:data_disk_qemu_path)
305
+ @bridge = fetch(:bridge)
306
+ template = File.new(File.expand_path("templates/libvirtinator/server.xml.erb")).read
307
+ generated_config_file = ERB.new(template).result(binding)
308
+ upload! StringIO.new(generated_config_file), "/tmp/server.xml"
309
+ execute "virsh", "define", "/tmp/server.xml"
310
+ execute "rm", "/tmp/server.xml", "-rf"
311
+ end
312
+ end
313
+ end
314
+
315
+ task :start_domain do
316
+ on roles(:app) do
317
+ as 'root' do
318
+ execute "virsh", "start", "#{fetch(:node_name)}"
319
+ end
320
+ end
321
+ end
322
+
323
+ # Keep this to aid with users setup
324
+ task :reset_known_hosts_on_host do
325
+ run_locally do
326
+ user = if ENV['SUDO_USER']; ENV['SUDO_USER']; else; ENV['USER']; end
327
+ execute "sudo", "-u", user, "ssh-keygen", "-R", "#{fetch(:node_name)}"
328
+ execute "sudo", "-u", user, "ssh-keygen", "-R", "#{fetch(:node_fqdn)}"
329
+ execute "sudo", "-u", user, "ssh-keygen", "-R", "#{fetch(:hostname)}"
330
+ execute "sudo", "-u", user, "ssh-keygen", "-R", "#{fetch(:app_fqdn)}"
331
+ execute "sudo", "-u", user, "ssh-keygen", "-R", "#{fetch(:ip)}"
332
+ end
333
+ end
334
+
335
+ task :wait_for_ping do
336
+ run_locally do
337
+ info "Waiting for VM to respond to ping.."
338
+ begin
339
+ Timeout::timeout(30) do
340
+ until system "bash -c \"ping -c 3 -w 5 #{fetch(:ip)} &> /dev/null\"" do
341
+ print ' ...'
342
+ end
343
+ info "Ping alive!"
344
+ end
345
+ rescue Timeout::Error
346
+ puts
347
+ set :yes_or_no, ""
348
+ until fetch(:yes_or_no).chomp.downcase == "yes" or fetch(:yes_or_no).chomp.downcase == "no"
349
+ ask :yes_or_no, "Networking on the VM has not come up in 30 seconds, would you like to wait another 30?"
350
+ end
351
+ if fetch(:yes_or_no).chomp.downcase == "yes"
352
+ Rake::Task['vm:wait_for_ping'].reenable
353
+ return Rake::Task['vm:wait_for_ping'].invoke
354
+ else
355
+ warn "Exiting.."
356
+ exit
357
+ end
358
+ end
359
+ end
360
+ end
361
+
362
+ task :setup_agent_forwarding do
363
+ run_locally do
364
+ lines = <<-eos
365
+ \nHost #{fetch(:node_fqdn)}
366
+ ForwardAgent yes
367
+ Host #{fetch(:hostname)}
368
+ ForwardAgent yes
369
+ Host #{fetch(:app_fqdn)}
370
+ ForwardAgent yes
371
+ Host #{fetch(:ip)}
372
+ ForwardAgent yes
373
+ Host #{fetch(:node_name)}
374
+ ForwardAgent yes\n
375
+ eos
376
+ {ENV['USER'] => "/home/#{ENV['USER']}/.ssh"}.each do |user, dir|
377
+ if File.directory?(dir)
378
+ unless File.exists?("#{dir}/config")
379
+ execute "sudo", "-u", "#{user}", "touch", "#{dir}/config"
380
+ execute "chmod", "600", "#{dir}/config"
381
+ end
382
+ execute "echo", "-e", "\"#{lines}\"", ">>", "#{dir}/config"
383
+ end
384
+ end
385
+ end
386
+ end
387
+
388
+ task :wait_for_ssh_alive do
389
+ run_locally do
390
+ info "Waiting for VM SSH alive.."
391
+ begin
392
+ Timeout::timeout(30) do
393
+ (print "..."; sleep 3) until (TCPSocket.open(fetch(:ip),22) rescue nil)
394
+ end
395
+ rescue TimeoutError
396
+ set :yes_or_no, ""
397
+ until fetch(:yes_or_no).chomp.downcase == "yes" or fetch(:yes_or_no).chomp.downcase == "no"
398
+ ask :yes_or_no, "SSH on the VM has not come up in 30 seconds, would you like to wait another 30?"
399
+ end
400
+ if fetch(:yes_or_no).chomp.downcase == "yes"
401
+ Rake::Task['vm:wait_for_ssh_alive'].reenable
402
+ return Rake::Task['vm:wait_for_ssh_alive'].invoke
403
+ else
404
+ warn "Exiting.."
405
+ exit
406
+ end
407
+ end
408
+ info "SSH alive!"
409
+ end
410
+ end
411
+ end
@@ -0,0 +1,5 @@
1
+ load 'libvirtinator/config.rb'
2
+ load 'libvirtinator/vm.rb'
3
+ load 'libvirtinator/users.rb'
4
+ load 'libvirtinator/image.rb'
5
+ load 'libvirtinator/lv.rb'
metadata ADDED
@@ -0,0 +1,81 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: libvirtinator
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.0.0
5
+ prerelease:
6
+ platform: ruby
7
+ authors:
8
+ - david amick
9
+ autorequire:
10
+ bindir: bin
11
+ cert_chain: []
12
+ date: 2014-11-22 00:00:00.000000000 Z
13
+ dependencies:
14
+ - !ruby/object:Gem::Dependency
15
+ name: capistrano
16
+ requirement: !ruby/object:Gem::Requirement
17
+ none: false
18
+ requirements:
19
+ - - '='
20
+ - !ruby/object:Gem::Version
21
+ version: 3.2.1
22
+ type: :runtime
23
+ prerelease: false
24
+ version_requirements: !ruby/object:Gem::Requirement
25
+ none: false
26
+ requirements:
27
+ - - '='
28
+ - !ruby/object:Gem::Version
29
+ version: 3.2.1
30
+ description: An Opinionated libvirt VM Deployment gem
31
+ email: davidamick@ctisolutionsinc.com
32
+ executables: []
33
+ extensions: []
34
+ extra_rdoc_files: []
35
+ files:
36
+ - lib/libvirtinator.rb
37
+ - lib/libvirtinator/config.rb
38
+ - lib/libvirtinator/vm.rb
39
+ - lib/libvirtinator/users.rb
40
+ - lib/libvirtinator/image.rb
41
+ - lib/libvirtinator/lv.rb
42
+ - lib/libvirtinator/examples/Capfile
43
+ - lib/libvirtinator/examples/config/deploy.rb
44
+ - lib/libvirtinator/examples/config/deploy/vm_name.rb
45
+ - lib/libvirtinator/examples/config/sysadmins_keys.rb
46
+ - lib/libvirtinator/examples/first_boot.sh.erb
47
+ - lib/libvirtinator/examples/fstab.erb
48
+ - lib/libvirtinator/examples/hostname.erb
49
+ - lib/libvirtinator/examples/hosts.erb
50
+ - lib/libvirtinator/examples/interfaces.erb
51
+ - lib/libvirtinator/examples/server.xml.erb
52
+ - lib/libvirtinator/examples/sudoers-sudo.erb
53
+ - lib/libvirtinator/examples/vmbuilder-init.sh.erb
54
+ - lib/libvirtinator/examples/vmbuilder.cfg.erb
55
+ homepage: https://github.com/snarlysodboxer/libvirtinator
56
+ licenses:
57
+ - GNU
58
+ post_install_message:
59
+ rdoc_options: []
60
+ require_paths:
61
+ - lib
62
+ required_ruby_version: !ruby/object:Gem::Requirement
63
+ none: false
64
+ requirements:
65
+ - - ! '>='
66
+ - !ruby/object:Gem::Version
67
+ version: 1.9.3
68
+ required_rubygems_version: !ruby/object:Gem::Requirement
69
+ none: false
70
+ requirements:
71
+ - - ! '>='
72
+ - !ruby/object:Gem::Version
73
+ version: '0'
74
+ requirements:
75
+ - Docker ~1.3.1
76
+ rubyforge_project:
77
+ rubygems_version: 1.8.23.2
78
+ signing_key:
79
+ specification_version: 3
80
+ summary: Deploy libvirt VMs
81
+ test_files: []