libvirtinator 0.0.0 → 0.0.1
Sign up to get free protection for your applications and to get access to all the features.
@@ -1,5 +1,8 @@
|
|
1
1
|
set :host_machine_name, "my-host"
|
2
|
-
|
2
|
+
|
3
|
+
# Specify a user with existing SSH access and passwordless sudo rights on the host.
|
4
|
+
# The same user will be setup with SSH access on the VM.
|
5
|
+
set :user, -> { ENV['USER'] }
|
3
6
|
|
4
7
|
role :app, "#{fetch(:user)}@#{fetch(:host_machine_name)}.example.com"
|
5
8
|
|
data/lib/libvirtinator/image.rb
CHANGED
data/lib/libvirtinator/lv.rb
CHANGED
data/lib/libvirtinator/users.rb
CHANGED
@@ -14,7 +14,7 @@ namespace :users do
|
|
14
14
|
end
|
15
15
|
end
|
16
16
|
|
17
|
-
desc "Idempotently setup
|
17
|
+
desc "Idempotently setup admin UNIX users."
|
18
18
|
task :setup => :load_settings do
|
19
19
|
on "#{fetch(:user)}@#{fetch(:ip)}" do
|
20
20
|
as :root do
|
@@ -31,7 +31,7 @@ namespace :users do
|
|
31
31
|
end
|
32
32
|
else
|
33
33
|
unless test "id", "-u", user['name']
|
34
|
-
exit unless test "
|
34
|
+
exit unless test "useradd", "--user-group", "--shell", "/bin/bash", "--create-home", user['name']
|
35
35
|
end
|
36
36
|
execute "usermod", "-s", "'/bin/bash'", user['name']
|
37
37
|
user['groups'].each do |group|
|
data/lib/libvirtinator/vm.rb
CHANGED
@@ -2,366 +2,369 @@ require 'socket'
|
|
2
2
|
require 'timeout'
|
3
3
|
require 'erb'
|
4
4
|
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
if test("virsh", "list", "
|
11
|
-
|
12
|
-
info "VM #{fetch(:node_name)} exists and is running on #{host}"
|
13
|
-
else
|
14
|
-
info "VM #{fetch(:node_name)} is defined but not running on #{host}"
|
15
|
-
end
|
5
|
+
desc "Check the current status of a VM"
|
6
|
+
task :status do
|
7
|
+
on roles(:app) do
|
8
|
+
as :root do
|
9
|
+
if test("virsh", "list", "--all", "|", "grep", "-q", "#{fetch(:node_name)}")
|
10
|
+
if test("virsh", "list", "|", "grep", "-q", "#{fetch(:node_name)}")
|
11
|
+
info "VM #{fetch(:node_name)} exists and is running on #{host}"
|
16
12
|
else
|
17
|
-
info "VM #{fetch(:node_name)} is
|
13
|
+
info "VM #{fetch(:node_name)} is defined but not running on #{host}"
|
18
14
|
end
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
info "The IP #{fetch(:ip)} is responding to ping
|
15
|
+
else
|
16
|
+
info "VM #{fetch(:node_name)} is undefined on #{host}"
|
17
|
+
end
|
18
|
+
if system "bash -c \"ping -c 3 -w 5 #{fetch(:ip)} &> /dev/null\""
|
19
|
+
begin
|
20
|
+
Timeout::timeout(5) do
|
21
|
+
(TCPSocket.open(fetch(:ip),22) rescue nil)
|
22
|
+
info "The IP #{fetch(:ip)} is responding to ping and SSH"
|
27
23
|
end
|
28
|
-
|
29
|
-
info "The IP #{fetch(:ip)} is
|
24
|
+
rescue TimeoutError
|
25
|
+
info "The IP #{fetch(:ip)} is responding to ping but not SSH"
|
30
26
|
end
|
27
|
+
else
|
28
|
+
info "The IP #{fetch(:ip)} is not responding to ping"
|
31
29
|
end
|
32
30
|
end
|
33
31
|
end
|
32
|
+
end
|
34
33
|
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
end
|
53
|
-
Rake::Task['vm:create_data'].invoke
|
54
|
-
Rake::Task['vm:define_domain'].invoke
|
55
|
-
Rake::Task['vm:start_domain'].invoke
|
56
|
-
Rake::Task['vm:reset_known_hosts_on_host'].invoke
|
57
|
-
Rake::Task['vm:setup_agent_forwarding'].invoke
|
58
|
-
Rake::Task['vm:wait_for_ping'].invoke
|
59
|
-
Rake::Task['vm:wait_for_ssh_alive'].invoke
|
60
|
-
Rake::Task['users:setup'].invoke
|
61
|
-
info "Say, you don't say? Are we finished?"
|
34
|
+
desc "Start a copy-on-write VM from a base image."
|
35
|
+
task :start do
|
36
|
+
on roles(:app) do
|
37
|
+
info "Preparing to start #{fetch(:node_name)}"
|
38
|
+
Rake::Task['ensure_nbd_module'].invoke
|
39
|
+
Rake::Task['ensure_root_partitions_path'].invoke
|
40
|
+
Rake::Task['ensure_vm_not_running'].invoke
|
41
|
+
Rake::Task['ensure_ip_no_ping'].invoke
|
42
|
+
Rake::Task['ensure_vm_not_defined'].invoke
|
43
|
+
Rake::Task['verify_base'].invoke
|
44
|
+
Rake::Task['remove_root_image'].invoke
|
45
|
+
Rake::Task['create_root_image'].invoke
|
46
|
+
begin
|
47
|
+
Rake::Task["image:mount"].invoke
|
48
|
+
Rake::Task['update_root_image'].invoke
|
49
|
+
ensure
|
50
|
+
Rake::Task["image:umount"].invoke
|
62
51
|
end
|
52
|
+
Rake::Task['create_data'].invoke
|
53
|
+
Rake::Task['define_domain'].invoke
|
54
|
+
Rake::Task['start_domain'].invoke
|
55
|
+
Rake::Task['reset_known_hosts_on_host'].invoke
|
56
|
+
Rake::Task['setup_agent_forwarding'].invoke
|
57
|
+
Rake::Task['wait_for_ping'].invoke
|
58
|
+
Rake::Task['wait_for_ssh_alive'].invoke
|
59
|
+
Rake::Task['users:setup'].invoke
|
60
|
+
info "Say, you don't say? Are we finished?"
|
63
61
|
end
|
62
|
+
end
|
64
63
|
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
64
|
+
task :ensure_root_partitions_path do
|
65
|
+
on roles(:app) do
|
66
|
+
as :root do
|
67
|
+
dir = fetch(:root_partitions_path)
|
68
|
+
unless test "[", "-d", dir, "]"
|
69
|
+
fatal "Error: root partitions path #{dir} is not a directory!"
|
70
|
+
exit
|
72
71
|
end
|
73
72
|
end
|
74
73
|
end
|
74
|
+
end
|
75
75
|
|
76
|
-
|
77
|
-
|
78
|
-
|
76
|
+
task :ensure_nbd_module do
|
77
|
+
on roles(:app) do
|
78
|
+
as :root do
|
79
|
+
unless test("lsmod | grep -q nbd")
|
80
|
+
info 'Running modprobe nbd'
|
81
|
+
execute "modprobe", "nbd"
|
82
|
+
sleep 0.5
|
79
83
|
unless test("lsmod | grep -q nbd")
|
80
|
-
|
81
|
-
|
82
|
-
sleep 0.5
|
83
|
-
unless test("lsmod | grep -q nbd")
|
84
|
-
fatal "Error: Unable to modprobe nbd!" && raise
|
85
|
-
end
|
84
|
+
fatal "Error: Unable to modprobe nbd!"
|
85
|
+
exit
|
86
86
|
end
|
87
87
|
end
|
88
88
|
end
|
89
89
|
end
|
90
|
+
end
|
90
91
|
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
92
|
+
task :ensure_vm_not_running do
|
93
|
+
on roles(:app) do
|
94
|
+
as :root do
|
95
|
+
if test("virsh", "list", "|", "grep", "-q", "#{fetch(:node_name)}")
|
96
|
+
fatal "The VM #{fetch(:node_name)} is already running on #{host}!"
|
97
|
+
exit
|
97
98
|
end
|
98
99
|
end
|
99
100
|
end
|
101
|
+
end
|
100
102
|
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
end
|
103
|
+
task :ensure_ip_no_ping do
|
104
|
+
run_locally do
|
105
|
+
info "Attempting to ping #{fetch(:ip)}"
|
106
|
+
if system "bash -c \"ping -c 3 -w 5 #{fetch(:ip)} &> /dev/null\""
|
107
|
+
fatal "The IP #{fetch(:ip)} is already pingable!"
|
108
|
+
exit
|
109
|
+
else
|
110
|
+
info "No ping returned, continuing"
|
110
111
|
end
|
111
112
|
end
|
113
|
+
end
|
112
114
|
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
end
|
121
|
-
unless fetch(:yes_or_no).chomp.downcase == "yes"
|
122
|
-
exit
|
123
|
-
else
|
124
|
-
execute "virsh", "undefine", fetch(:node_name)
|
125
|
-
end
|
115
|
+
task :ensure_vm_not_defined do
|
116
|
+
on roles(:app) do
|
117
|
+
as :root do
|
118
|
+
if test("virsh", "list", "--all", "|", "grep", "-q", "#{fetch(:node_name)}")
|
119
|
+
set :yes_or_no, ""
|
120
|
+
until fetch(:yes_or_no).chomp.downcase == "yes" or fetch(:yes_or_no).chomp.downcase == "no"
|
121
|
+
ask :yes_or_no, "The VM #{fetch(:node_name)} is defined but not running! Do you want to undefine/redefine it?"
|
126
122
|
end
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
task :verify_base do
|
132
|
-
on roles(:app) do
|
133
|
-
as :root do
|
134
|
-
unless test "[", "-f", fetch(:base_image_path), "]"
|
135
|
-
fatal "Error: cannot find the base image #{fetch(:base_image_path)}" && raise
|
123
|
+
unless fetch(:yes_or_no).chomp.downcase == "yes"
|
124
|
+
exit
|
125
|
+
else
|
126
|
+
execute "virsh", "undefine", fetch(:node_name)
|
136
127
|
end
|
137
|
-
raise unless test("chown", "libvirt-qemu:kvm", fetch(:base_image_path))
|
138
128
|
end
|
139
129
|
end
|
140
130
|
end
|
131
|
+
end
|
141
132
|
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
set :yes_or_no, ""
|
149
|
-
until fetch(:yes_or_no).chomp.downcase == "yes" or fetch(:yes_or_no).chomp.downcase == "no"
|
150
|
-
ask :yes_or_no, "Are you sure you want to remove the existing #{root_image_path} file?"
|
151
|
-
end
|
152
|
-
if fetch(:yes_or_no).chomp.downcase == "yes"
|
153
|
-
info "Removing old image"
|
154
|
-
execute "rm", root_image_path
|
155
|
-
end
|
156
|
-
end
|
157
|
-
end
|
133
|
+
task :verify_base do
|
134
|
+
on roles(:app) do
|
135
|
+
as :root do
|
136
|
+
unless test "[", "-f", fetch(:base_image_path), "]"
|
137
|
+
fatal "Error: cannot find the base image #{fetch(:base_image_path)}"
|
138
|
+
exit
|
158
139
|
end
|
140
|
+
raise unless test("chown", "libvirt-qemu:kvm", fetch(:base_image_path))
|
159
141
|
end
|
160
142
|
end
|
143
|
+
end
|
161
144
|
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
else
|
145
|
+
task :remove_root_image do
|
146
|
+
on roles(:app) do
|
147
|
+
as :root do
|
148
|
+
# use 'cap <server> create recreate_root=true' to recreate the root image
|
149
|
+
if ENV['recreate_root'] == "true"
|
150
|
+
if test "[", "-f", root_image_path, "]"
|
169
151
|
set :yes_or_no, ""
|
170
152
|
until fetch(:yes_or_no).chomp.downcase == "yes" or fetch(:yes_or_no).chomp.downcase == "no"
|
171
|
-
ask :yes_or_no, "
|
153
|
+
ask :yes_or_no, "Are you sure you want to remove the existing #{root_image_path} file?"
|
172
154
|
end
|
173
155
|
if fetch(:yes_or_no).chomp.downcase == "yes"
|
174
|
-
info "
|
175
|
-
|
176
|
-
exit
|
156
|
+
info "Removing old image"
|
157
|
+
execute "rm", root_image_path
|
177
158
|
end
|
178
159
|
end
|
179
160
|
end
|
180
161
|
end
|
181
162
|
end
|
163
|
+
end
|
182
164
|
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
@hostname = fetch(:hostname)
|
194
|
-
@data_disk_enabled = fetch(:data_disk_enabled)
|
195
|
-
@data_disk_partition = fetch(:data_disk_partition)
|
196
|
-
@data_disk_mount_point = fetch(:data_disk_mount_point)
|
197
|
-
@network = fetch("#{fetch(:cidr)}_network")
|
198
|
-
@gateway = fetch("#{fetch(:cidr)}_gateway")
|
199
|
-
@ip = fetch(:ip)
|
200
|
-
@broadcast = fetch("#{fetch(:cidr)}_broadcast")
|
201
|
-
@netmask = fetch("#{fetch(:cidr)}_netmask")
|
202
|
-
@dns_nameservers = fetch(:dns_nameservers)
|
203
|
-
@dns_search = fetch(:dns_search)
|
204
|
-
{
|
205
|
-
"sudoers-sudo" => "#{mount_point}/etc/sudoers.d/sudo",
|
206
|
-
"hosts" => "#{mount_point}/etc/hosts",
|
207
|
-
"hostname" => "#{mount_point}/etc/hostname",
|
208
|
-
"fstab" => "#{mount_point}/etc/fstab",
|
209
|
-
"interfaces" => "#{mount_point}/etc/network/interfaces",
|
210
|
-
}.each do |file, path|
|
211
|
-
template = File.new(File.expand_path("./templates/libvirtinator/#{file}.erb")).read
|
212
|
-
generated_config_file = ERB.new(template).result(binding)
|
213
|
-
upload! StringIO.new(generated_config_file), "/tmp/#{file}.file"
|
214
|
-
execute("mv", "/tmp/#{file}.file", path)
|
215
|
-
execute("chown", "root:root", path)
|
165
|
+
task :create_root_image do
|
166
|
+
on roles(:app) do
|
167
|
+
as :root do
|
168
|
+
unless test "[", "-f", fetch(:root_image_path), "]"
|
169
|
+
info "Creating new image"
|
170
|
+
execute "qemu-img", "create", "-b", fetch(:base_image_path), "-f", "qcow2", fetch(:root_image_path)
|
171
|
+
else
|
172
|
+
set :yes_or_no, ""
|
173
|
+
until fetch(:yes_or_no).chomp.downcase == "yes" or fetch(:yes_or_no).chomp.downcase == "no"
|
174
|
+
ask :yes_or_no, "#{fetch(:root_image_path)} already exists, do you want to continue to update it's configuration?"
|
216
175
|
end
|
217
|
-
|
218
|
-
"
|
219
|
-
|
220
|
-
|
221
|
-
"#{mount_point}/etc/sudoers"
|
222
|
-
user = fetch(:user)
|
223
|
-
begin
|
224
|
-
execute "bash", "-c", "\"for", "m", "in", "'sys'", "'dev'", "'proc';",
|
225
|
-
"do", "mount", "/$m", "#{mount_point}/$m", "-o", "bind;", "done\""
|
226
|
-
execute "chroot", mount_point, "/bin/bash", "-c",
|
227
|
-
"\"if", "!", "id", user, "&>", "/dev/null;", "then",
|
228
|
-
"useradd", "--user-group", "--shell",
|
229
|
-
"/bin/bash", "--create-home", "#{user};", "fi\""
|
230
|
-
execute "chroot", mount_point, "/bin/bash", "-c",
|
231
|
-
"\"usermod", "-a", "-G", "sudo", "#{user}\""
|
232
|
-
execute "mkdir", "-p", "#{mount_point}/home/#{user}/.ssh"
|
233
|
-
execute "chroot", mount_point, "/bin/bash", "-c",
|
234
|
-
"\"chown", "#{user}:#{user}", "/home/#{user}", "/home/#{user}/.ssh\""
|
235
|
-
execute "chmod", "700", "#{mount_point}/home/#{user}/.ssh"
|
236
|
-
set :path, ""
|
237
|
-
until File.exists? fetch(:path) and ! File.directory? fetch(:path)
|
238
|
-
ask :path, "Which public key should we install in #{user}'s authorized_keys file? Specifiy an absolute path"
|
239
|
-
end
|
240
|
-
upload! File.open(fetch(:path)), "/tmp/pubkeys"
|
241
|
-
execute "mv", "/tmp/pubkeys", "#{mount_point}/home/#{user}/.ssh/authorized_keys"
|
242
|
-
execute "chroot", mount_point, "/bin/bash", "-c",
|
243
|
-
"\"chown", "#{user}:#{user}", "/home/#{user}/.ssh/authorized_keys\""
|
244
|
-
execute "chmod", "600", "#{mount_point}/home/#{user}/.ssh/authorized_keys"
|
245
|
-
execute "mkdir", "-p", "#{mount_point}#{fetch(:data_disk_mount_point)}" if fetch(:data_disk_enabled)
|
246
|
-
ensure
|
247
|
-
execute "bash", "-c", "\"for", "m", "in", "'sys'", "'dev'", "'proc';",
|
248
|
-
"do", "umount", "#{mount_point}/$m;", "done\""
|
176
|
+
if fetch(:yes_or_no).chomp.downcase == "yes"
|
177
|
+
info "Updating file on an existing image."
|
178
|
+
else
|
179
|
+
exit
|
249
180
|
end
|
250
181
|
end
|
251
182
|
end
|
252
183
|
end
|
184
|
+
end
|
253
185
|
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
186
|
+
task :update_root_image do
|
187
|
+
on roles(:app) do
|
188
|
+
as :root do
|
189
|
+
mount_point = fetch(:mount_point)
|
190
|
+
raise if mount_point.nil?
|
191
|
+
set :logs_path, -> { fetch(:internal_logs_path) }
|
192
|
+
@internal_logs_path = fetch(:logs_path)
|
193
|
+
@node_name = fetch(:node_name)
|
194
|
+
@node_fqdn = fetch(:node_fqdn)
|
195
|
+
@app_fqdn = fetch(:app_fqdn)
|
196
|
+
@hostname = fetch(:hostname)
|
197
|
+
@data_disk_enabled = fetch(:data_disk_enabled)
|
198
|
+
@data_disk_partition = fetch(:data_disk_partition)
|
199
|
+
@data_disk_mount_point = fetch(:data_disk_mount_point)
|
200
|
+
@network = fetch("#{fetch(:cidr)}_network")
|
201
|
+
@gateway = fetch("#{fetch(:cidr)}_gateway")
|
202
|
+
@ip = fetch(:ip)
|
203
|
+
@broadcast = fetch("#{fetch(:cidr)}_broadcast")
|
204
|
+
@netmask = fetch("#{fetch(:cidr)}_netmask")
|
205
|
+
@dns_nameservers = fetch(:dns_nameservers)
|
206
|
+
@dns_search = fetch(:dns_search)
|
207
|
+
{
|
208
|
+
"sudoers-sudo" => "#{mount_point}/etc/sudoers.d/sudo",
|
209
|
+
"hosts" => "#{mount_point}/etc/hosts",
|
210
|
+
"hostname" => "#{mount_point}/etc/hostname",
|
211
|
+
"fstab" => "#{mount_point}/etc/fstab",
|
212
|
+
"interfaces" => "#{mount_point}/etc/network/interfaces",
|
213
|
+
}.each do |file, path|
|
214
|
+
template = File.new(File.expand_path("./templates/libvirtinator/#{file}.erb")).read
|
215
|
+
generated_config_file = ERB.new(template).result(binding)
|
216
|
+
upload! StringIO.new(generated_config_file), "/tmp/#{file}.file"
|
217
|
+
execute("mv", "/tmp/#{file}.file", path)
|
218
|
+
execute("chown", "root:root", path)
|
219
|
+
end
|
220
|
+
execute "sed", "-i\"\"", "\"/PermitRootLogin/c\\PermitRootLogin no\"",
|
221
|
+
"#{mount_point}/etc/ssh/sshd_config"
|
222
|
+
execute "chmod", "440", "#{mount_point}/etc/sudoers.d/*"
|
223
|
+
execute "echo", "-e", "\"\n#includedir /etc/sudoers.d\n\"", ">>",
|
224
|
+
"#{mount_point}/etc/sudoers"
|
225
|
+
user = fetch(:user)
|
226
|
+
begin
|
227
|
+
execute "bash", "-c", "\"for", "m", "in", "'sys'", "'dev'", "'proc';",
|
228
|
+
"do", "mount", "/$m", "#{mount_point}/$m", "-o", "bind;", "done\""
|
229
|
+
execute "chroot", mount_point, "/bin/bash", "-c",
|
230
|
+
"\"if", "!", "id", user, "&>", "/dev/null;", "then",
|
231
|
+
"useradd", "--user-group", "--shell",
|
232
|
+
"/bin/bash", "--create-home", "#{user};", "fi\""
|
233
|
+
execute "chroot", mount_point, "/bin/bash", "-c",
|
234
|
+
"\"usermod", "-a", "-G", "sudo", "#{user}\""
|
235
|
+
execute "mkdir", "-p", "#{mount_point}/home/#{user}/.ssh"
|
236
|
+
execute "chroot", mount_point, "/bin/bash", "-c",
|
237
|
+
"\"chown", "#{user}:#{user}", "/home/#{user}", "/home/#{user}/.ssh\""
|
238
|
+
execute "chmod", "700", "#{mount_point}/home/#{user}/.ssh"
|
239
|
+
set :path, ""
|
240
|
+
until File.exists? fetch(:path) and ! File.directory? fetch(:path)
|
241
|
+
ask :path, "Which public key should we install in #{user}'s authorized_keys file? Specifiy an absolute path"
|
260
242
|
end
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
243
|
+
upload! File.open(fetch(:path)), "/tmp/pubkeys"
|
244
|
+
execute "mv", "/tmp/pubkeys", "#{mount_point}/home/#{user}/.ssh/authorized_keys"
|
245
|
+
execute "chroot", mount_point, "/bin/bash", "-c",
|
246
|
+
"\"chown", "#{user}:#{user}", "/home/#{user}/.ssh/authorized_keys\""
|
247
|
+
execute "chmod", "600", "#{mount_point}/home/#{user}/.ssh/authorized_keys"
|
248
|
+
execute "mkdir", "-p", "#{mount_point}#{fetch(:data_disk_mount_point)}" if fetch(:data_disk_enabled)
|
249
|
+
ensure
|
250
|
+
execute "bash", "-c", "\"for", "m", "in", "'sys'", "'dev'", "'proc';",
|
251
|
+
"do", "umount", "#{mount_point}/$m;", "done\""
|
252
|
+
end
|
253
|
+
end
|
254
|
+
end
|
255
|
+
end
|
256
|
+
|
257
|
+
task :create_data do
|
258
|
+
on roles(:app) do
|
259
|
+
as 'root' do
|
260
|
+
unless fetch(:data_disk_enabled)
|
261
|
+
info "Not using a separate data disk."
|
262
|
+
break
|
263
|
+
end
|
264
|
+
if fetch(:data_disk_type) == "qemu"
|
265
|
+
if ! test("[", "-f", fetch(:data_disk_qemu_path), "]") or ENV['recreate_data'] == "true"
|
266
|
+
execute "guestfish", "--new", "disk:#{fetch(:data_disk_gb)}G << _EOF_
|
267
|
+
mkfs ext4 /dev/vda
|
268
|
+
_EOF_"
|
269
|
+
execute "qemu-img", "convert", "-O", "qcow2", "test1.img", "test1.qcow2"
|
270
|
+
execute "rm", "test1.img"
|
271
|
+
execute "mv", "test1.qcow2", fetch(:data_disk_qemu_path)
|
272
|
+
end
|
273
|
+
elsif fetch(:data_disk_type) == "lv"
|
274
|
+
if ENV['recreate_data'] == "true"
|
275
|
+
if test "[", "-b", fetch(:data_disk_lv_path), "]"
|
276
|
+
Rake::Task['lv:recreate'].invoke
|
277
277
|
else
|
278
|
-
|
279
|
-
info "Found and using existing logical volume #{fetch(:data_disk_lv_path)}"
|
280
|
-
else
|
281
|
-
Rake::Task['lv:create'].invoke
|
282
|
-
end
|
278
|
+
Rake::Task['lv:create'].invoke
|
283
279
|
end
|
284
280
|
else
|
285
|
-
|
286
|
-
|
287
|
-
|
281
|
+
if test "[", "-b", fetch(:data_disk_lv_path), "]"
|
282
|
+
info "Found and using existing logical volume #{fetch(:data_disk_lv_path)}"
|
283
|
+
else
|
284
|
+
Rake::Task['lv:create'].invoke
|
285
|
+
end
|
288
286
|
end
|
287
|
+
else
|
288
|
+
fatal "No recognized disk type (lv, qemu), yet size is greater than zero!"
|
289
|
+
fatal "Fixed this by adding a recognized disk type (lv, qemu) to your config."
|
290
|
+
exit
|
289
291
|
end
|
290
292
|
end
|
291
293
|
end
|
294
|
+
end
|
292
295
|
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
end
|
296
|
+
task :define_domain do
|
297
|
+
on roles(:app) do
|
298
|
+
as 'root' do
|
299
|
+
# instance variables needed for ERB
|
300
|
+
@node_name = fetch(:node_name)
|
301
|
+
@memory_gb = fetch(:memory_gb).to_i * 1024 * 1024
|
302
|
+
@cpus = fetch(:cpus)
|
303
|
+
@root_image_path = fetch(:root_image_path)
|
304
|
+
@data_disk_enabled = fetch(:data_disk_enabled)
|
305
|
+
@data_disk_type = fetch(:data_disk_type)
|
306
|
+
@data_disk_lv_path = fetch(:data_disk_lv_path)
|
307
|
+
@data_disk_qemu_path = fetch(:data_disk_qemu_path)
|
308
|
+
@bridge = fetch(:bridge)
|
309
|
+
template = File.new(File.expand_path("templates/libvirtinator/server.xml.erb")).read
|
310
|
+
generated_config_file = ERB.new(template).result(binding)
|
311
|
+
upload! StringIO.new(generated_config_file), "/tmp/server.xml"
|
312
|
+
execute "virsh", "define", "/tmp/server.xml"
|
313
|
+
execute "rm", "/tmp/server.xml", "-rf"
|
312
314
|
end
|
313
315
|
end
|
316
|
+
end
|
314
317
|
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
end
|
318
|
+
task :start_domain do
|
319
|
+
on roles(:app) do
|
320
|
+
as 'root' do
|
321
|
+
execute "virsh", "start", "#{fetch(:node_name)}"
|
320
322
|
end
|
321
323
|
end
|
324
|
+
end
|
322
325
|
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
end
|
326
|
+
# Keep this to aid with users setup
|
327
|
+
task :reset_known_hosts_on_host do
|
328
|
+
run_locally do
|
329
|
+
user = if ENV['SUDO_USER']; ENV['SUDO_USER']; else; ENV['USER']; end
|
330
|
+
execute "sudo", "-u", user, "ssh-keygen", "-R", "#{fetch(:node_name)}"
|
331
|
+
execute "sudo", "-u", user, "ssh-keygen", "-R", "#{fetch(:node_fqdn)}"
|
332
|
+
execute "sudo", "-u", user, "ssh-keygen", "-R", "#{fetch(:hostname)}"
|
333
|
+
execute "sudo", "-u", user, "ssh-keygen", "-R", "#{fetch(:app_fqdn)}"
|
334
|
+
execute "sudo", "-u", user, "ssh-keygen", "-R", "#{fetch(:ip)}"
|
333
335
|
end
|
336
|
+
end
|
334
337
|
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
end
|
343
|
-
info "Ping alive!"
|
344
|
-
end
|
345
|
-
rescue Timeout::Error
|
346
|
-
puts
|
347
|
-
set :yes_or_no, ""
|
348
|
-
until fetch(:yes_or_no).chomp.downcase == "yes" or fetch(:yes_or_no).chomp.downcase == "no"
|
349
|
-
ask :yes_or_no, "Networking on the VM has not come up in 30 seconds, would you like to wait another 30?"
|
350
|
-
end
|
351
|
-
if fetch(:yes_or_no).chomp.downcase == "yes"
|
352
|
-
Rake::Task['vm:wait_for_ping'].reenable
|
353
|
-
return Rake::Task['vm:wait_for_ping'].invoke
|
354
|
-
else
|
355
|
-
warn "Exiting.."
|
356
|
-
exit
|
338
|
+
task :wait_for_ping do
|
339
|
+
run_locally do
|
340
|
+
info "Waiting for VM to respond to ping.."
|
341
|
+
begin
|
342
|
+
Timeout::timeout(30) do
|
343
|
+
until system "bash -c \"ping -c 3 -w 5 #{fetch(:ip)} &> /dev/null\"" do
|
344
|
+
print ' ...'
|
357
345
|
end
|
346
|
+
info "Ping alive!"
|
347
|
+
end
|
348
|
+
rescue Timeout::Error
|
349
|
+
puts
|
350
|
+
set :yes_or_no, ""
|
351
|
+
until fetch(:yes_or_no).chomp.downcase == "yes" or fetch(:yes_or_no).chomp.downcase == "no"
|
352
|
+
ask :yes_or_no, "Networking on the VM has not come up in 30 seconds, would you like to wait another 30?"
|
353
|
+
end
|
354
|
+
if fetch(:yes_or_no).chomp.downcase == "yes"
|
355
|
+
Rake::Task['wait_for_ping'].reenable
|
356
|
+
return Rake::Task['wait_for_ping'].invoke
|
357
|
+
else
|
358
|
+
warn "Exiting.."
|
359
|
+
exit
|
358
360
|
end
|
359
361
|
end
|
360
362
|
end
|
363
|
+
end
|
361
364
|
|
362
|
-
|
363
|
-
|
364
|
-
|
365
|
+
task :setup_agent_forwarding do
|
366
|
+
run_locally do
|
367
|
+
lines = <<-eos
|
365
368
|
\nHost #{fetch(:node_fqdn)}
|
366
369
|
ForwardAgent yes
|
367
370
|
Host #{fetch(:hostname)}
|
@@ -372,40 +375,39 @@ Host #{fetch(:ip)}
|
|
372
375
|
ForwardAgent yes
|
373
376
|
Host #{fetch(:node_name)}
|
374
377
|
ForwardAgent yes\n
|
375
|
-
|
376
|
-
|
377
|
-
|
378
|
-
|
379
|
-
|
380
|
-
|
381
|
-
end
|
382
|
-
execute "echo", "-e", "\"#{lines}\"", ">>", "#{dir}/config"
|
378
|
+
eos
|
379
|
+
{ENV['USER'] => "/home/#{ENV['USER']}/.ssh"}.each do |user, dir|
|
380
|
+
if File.directory?(dir)
|
381
|
+
unless File.exists?("#{dir}/config")
|
382
|
+
execute "sudo", "-u", "#{user}", "touch", "#{dir}/config"
|
383
|
+
execute "chmod", "600", "#{dir}/config"
|
383
384
|
end
|
385
|
+
execute "echo", "-e", "\"#{lines}\"", ">>", "#{dir}/config"
|
384
386
|
end
|
385
387
|
end
|
386
388
|
end
|
389
|
+
end
|
387
390
|
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
|
397
|
-
|
398
|
-
|
399
|
-
|
400
|
-
|
401
|
-
|
402
|
-
|
403
|
-
|
404
|
-
|
405
|
-
|
406
|
-
end
|
391
|
+
task :wait_for_ssh_alive do
|
392
|
+
run_locally do
|
393
|
+
info "Waiting for VM SSH alive.."
|
394
|
+
begin
|
395
|
+
Timeout::timeout(30) do
|
396
|
+
(print "..."; sleep 3) until (TCPSocket.open(fetch(:ip),22) rescue nil)
|
397
|
+
end
|
398
|
+
rescue TimeoutError
|
399
|
+
set :yes_or_no, ""
|
400
|
+
until fetch(:yes_or_no).chomp.downcase == "yes" or fetch(:yes_or_no).chomp.downcase == "no"
|
401
|
+
ask :yes_or_no, "SSH on the VM has not come up in 30 seconds, would you like to wait another 30?"
|
402
|
+
end
|
403
|
+
if fetch(:yes_or_no).chomp.downcase == "yes"
|
404
|
+
Rake::Task['wait_for_ssh_alive'].reenable
|
405
|
+
return Rake::Task['wait_for_ssh_alive'].invoke
|
406
|
+
else
|
407
|
+
warn "Exiting.."
|
408
|
+
exit
|
407
409
|
end
|
408
|
-
info "SSH alive!"
|
409
410
|
end
|
411
|
+
info "SSH alive!"
|
410
412
|
end
|
411
413
|
end
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: libvirtinator
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.0.
|
4
|
+
version: 0.0.1
|
5
5
|
prerelease:
|
6
6
|
platform: ruby
|
7
7
|
authors:
|
@@ -9,7 +9,7 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
|
-
date: 2014-11-
|
12
|
+
date: 2014-11-26 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
14
|
- !ruby/object:Gem::Dependency
|
15
15
|
name: capistrano
|