vagrant-g5k 0.0.18 → 0.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +7 -0
- data/README.md +2 -0
- data/Vagrantfile +25 -30
- data/lib/vagrant-g5k/action.rb +0 -3
- data/lib/vagrant-g5k/action/connect_g5k.rb +33 -2
- data/lib/vagrant-g5k/action/read_ssh_info.rb +13 -32
- data/lib/vagrant-g5k/action/read_state.rb +4 -8
- data/lib/vagrant-g5k/command.rb +2 -2
- data/lib/vagrant-g5k/config.rb +4 -3
- data/lib/vagrant-g5k/disk/local.rb +68 -0
- data/lib/vagrant-g5k/disk/rbd.rb +92 -0
- data/lib/vagrant-g5k/g5k_connection.rb +144 -0
- data/lib/vagrant-g5k/network/bridge.rb +115 -0
- data/lib/vagrant-g5k/network/nat.rb +65 -0
- data/lib/vagrant-g5k/oar.rb +90 -0
- data/lib/vagrant-g5k/util/{launch_vm_fwd.sh → launch_vm.sh} +35 -5
- data/lib/vagrant-g5k/version.rb +1 -1
- data/spec/vagrant-g5k/oar_spec.rb +57 -0
- data/vagrant-g5k.gemspec +3 -3
- metadata +52 -6
- data/lib/vagrant-g5k/action/close_g5k.rb +0 -24
- data/lib/vagrant-g5k/util/g5k_utils.rb +0 -435
- data/lib/vagrant-g5k/util/launch_vm_bridge.sh +0 -58
@@ -0,0 +1,144 @@
|
|
1
|
+
require 'net/ssh/multi'
|
2
|
+
require 'net/scp'
|
3
|
+
require 'json'
|
4
|
+
require 'digest'
|
5
|
+
require 'thread'
|
6
|
+
|
7
|
+
require 'vagrant/util/retryable'
|
8
|
+
|
9
|
+
LAUNCHER_SCRIPT = "util/launch_vm.sh"
|
10
|
+
|
11
|
+
STRATEGY_SNAPSHOT = "snapshot"
|
12
|
+
STRATEGY_COPY = "copy"
|
13
|
+
STRATEGY_COW = "cow"
|
14
|
+
STRATEGY_DIRECT = "direct"
|
15
|
+
|
16
|
+
module VagrantPlugins
|
17
|
+
module G5K
|
18
|
+
class Connection
|
19
|
+
include Vagrant::Util::Retryable
|
20
|
+
include VagrantPlugins::G5K
|
21
|
+
|
22
|
+
def initialize(env, cwd, driver, oar_driver, net_driver, disk_driver)
|
23
|
+
@logger = Log4r::Logger.new("vagrant::g5k_connection")
|
24
|
+
@ui = env[:ui]
|
25
|
+
|
26
|
+
@provider_config = env[:machine].provider_config
|
27
|
+
@site = @provider_config.site
|
28
|
+
@walltime = @provider_config.walltime
|
29
|
+
@image= @provider_config.image
|
30
|
+
@oar = "{#{@provider_config.oar}}/" if @provider_config.oar != ""
|
31
|
+
|
32
|
+
@cwd = cwd
|
33
|
+
@driver = driver
|
34
|
+
@oar_driver = oar_driver
|
35
|
+
@net_driver = net_driver
|
36
|
+
@disk_driver = disk_driver
|
37
|
+
end
|
38
|
+
|
39
|
+
|
40
|
+
def create_local_working_dir()
|
41
|
+
exec("mkdir -p #{@cwd}")
|
42
|
+
end
|
43
|
+
|
44
|
+
def check_job(job_id)
|
45
|
+
@oar_driver.check_job(job_id)
|
46
|
+
end
|
47
|
+
|
48
|
+
def check_net(job_id)
|
49
|
+
@net_driver.check_state(job_id)
|
50
|
+
end
|
51
|
+
|
52
|
+
def vm_ssh_info(vmid)
|
53
|
+
@net_driver.vm_ssh_info(vmid)
|
54
|
+
end
|
55
|
+
|
56
|
+
def delete_job(job_id)
|
57
|
+
@ui.info("Soft deleting the associated job")
|
58
|
+
begin
|
59
|
+
@oar_driver.delete_job(job_id, ["-c", "-s 12"])
|
60
|
+
rescue VagrantPlugins::G5K::Errors::CommandError
|
61
|
+
@logger.debug "Checkpointing failed, sending hard deletion"
|
62
|
+
@ui.warn("Soft delete failed : proceeding to hard delete")
|
63
|
+
@oar_driver.delete_job(job_id)
|
64
|
+
ensure
|
65
|
+
@net_driver.detach()
|
66
|
+
end
|
67
|
+
end
|
68
|
+
|
69
|
+
def check_storage(env)
|
70
|
+
# Is the disk image already here ?
|
71
|
+
file = @disk_driver.check_storage()
|
72
|
+
return file if file != ""
|
73
|
+
return nil
|
74
|
+
end
|
75
|
+
|
76
|
+
def launch_vm(env)
|
77
|
+
launcher_path = File.join(File.dirname(__FILE__), LAUNCHER_SCRIPT)
|
78
|
+
@ui.info("Launching the VM on #{@site}")
|
79
|
+
# Checking the subnet job
|
80
|
+
# uploading the launcher
|
81
|
+
launcher_remote_path = File.join(@cwd, File.basename(LAUNCHER_SCRIPT))
|
82
|
+
upload(launcher_path, launcher_remote_path)
|
83
|
+
|
84
|
+
# Generate partial arguments for the kvm command
|
85
|
+
# NOTE: net is first due the the shape of the bridge launcher script
|
86
|
+
net = @net_driver.generate_net()
|
87
|
+
drive = _generate_drive(env)
|
88
|
+
|
89
|
+
args = [net, drive].join(" ")
|
90
|
+
# Submitting a new job
|
91
|
+
# Getting the job_id as a ruby string
|
92
|
+
options = [
|
93
|
+
"-t allow_classic_ssh",
|
94
|
+
"-l \"#{@oar}nodes=1, walltime=#{@walltime}\"",
|
95
|
+
"--name #{env[:machine].name}",
|
96
|
+
"--checkpoint 60",
|
97
|
+
"--signal 12"
|
98
|
+
]
|
99
|
+
job_id = @oar_driver.submit_job("#{launcher_remote_path} #{args}", options)
|
100
|
+
# saving the id asap
|
101
|
+
env[:machine].id = job_id
|
102
|
+
wait_for_vm(job_id)
|
103
|
+
end
|
104
|
+
|
105
|
+
def wait_for_vm(job_id)
|
106
|
+
@oar_driver.wait_for(job_id)
|
107
|
+
@net_driver.attach()
|
108
|
+
@ui.info("ready @#{@site}")
|
109
|
+
end
|
110
|
+
|
111
|
+
|
112
|
+
def delete_disk(env)
|
113
|
+
if [STRATEGY_DIRECT, STRATEGY_SNAPSHOT].include?(@image[:backing])
|
114
|
+
@ui.error("Destroy not support for the strategy #{@image[:backing]}")
|
115
|
+
return
|
116
|
+
end
|
117
|
+
@disk_driver.delete_disk()
|
118
|
+
end
|
119
|
+
|
120
|
+
def exec(cmd)
|
121
|
+
@driver.exec(cmd)
|
122
|
+
end
|
123
|
+
|
124
|
+
def upload(src, dst)
|
125
|
+
@driver.upload(src, dst)
|
126
|
+
end
|
127
|
+
|
128
|
+
def _generate_drive(env)
|
129
|
+
# Depending on the strategy we generate the file location
|
130
|
+
# This code smells a bit better
|
131
|
+
file = ""
|
132
|
+
snapshot = ""
|
133
|
+
if @image[:backing] == STRATEGY_SNAPSHOT
|
134
|
+
snapshot = "-snapshot"
|
135
|
+
end
|
136
|
+
file = @disk_driver.generate_drive()
|
137
|
+
|
138
|
+
return "-drive file=#{file},if=virtio #{snapshot}"
|
139
|
+
end
|
140
|
+
|
141
|
+
end
|
142
|
+
end
|
143
|
+
end
|
144
|
+
|
@@ -0,0 +1,115 @@
|
|
1
|
+
require 'log4r'
|
2
|
+
|
3
|
+
module VagrantPlugins
|
4
|
+
module G5K
|
5
|
+
module Network
|
6
|
+
|
7
|
+
class Bridge
|
8
|
+
|
9
|
+
include VagrantPlugins::G5K
|
10
|
+
|
11
|
+
def initialize(env, driver, oar_driver)
|
12
|
+
@logger = Log4r::Logger.new("vagrant::network::bridge")
|
13
|
+
# command driver is unused
|
14
|
+
@driver = driver
|
15
|
+
@oar_driver = oar_driver
|
16
|
+
@net = env[:machine].provider_config.net
|
17
|
+
@project_id = env[:machine].provider_config.project_id
|
18
|
+
@walltime = env[:machine].provider_config.walltime
|
19
|
+
end
|
20
|
+
|
21
|
+
def generate_net()
|
22
|
+
lockable(:lock => VagrantPlugins::G5K.subnet_lock) do
|
23
|
+
subnet_job_id = _find_subnet
|
24
|
+
if subnet_job_id.nil?
|
25
|
+
subnet_job_id = _create_subnet
|
26
|
+
@oar_driver.wait_for(subnet_job_id)
|
27
|
+
# we can't call this inside the launcher script
|
28
|
+
# let's put it in a file instead...
|
29
|
+
@driver.exec("g5k-subnets -j #{subnet_job_id} -im > #{_subnet_file}" )
|
30
|
+
# initialize subnet count
|
31
|
+
@driver.exec("echo 0 > #{_subnet_count}")
|
32
|
+
end
|
33
|
+
return "BRIDGE #{_subnet_file}"
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
def check_state(job_id)
|
38
|
+
subnet_job_id = _find_subnet()
|
39
|
+
return :subnet_missing if subnet_job_id.nil?
|
40
|
+
nil
|
41
|
+
end
|
42
|
+
|
43
|
+
def attach()
|
44
|
+
_update_subnet_use("+")
|
45
|
+
end
|
46
|
+
|
47
|
+
def detach()
|
48
|
+
_update_subnet_use("-")
|
49
|
+
end
|
50
|
+
|
51
|
+
def vm_ssh_info(vmid)
|
52
|
+
subnet = @driver.exec("cat #{_subnet_file}" )
|
53
|
+
.split("\n")
|
54
|
+
.map{|macip| macip.split("\t")}
|
55
|
+
# recalculate ip given to this VM
|
56
|
+
macip = subnet[vmid.to_i.modulo(1022)]
|
57
|
+
return {
|
58
|
+
:host => macip[0]
|
59
|
+
}
|
60
|
+
end
|
61
|
+
|
62
|
+
|
63
|
+
def _cwd()
|
64
|
+
# remote working directory
|
65
|
+
File.join(".vagrant", @project_id)
|
66
|
+
end
|
67
|
+
|
68
|
+
|
69
|
+
def _create_subnet()
|
70
|
+
options = []
|
71
|
+
options << "--name '#{@project_id}-net'"
|
72
|
+
options << "-l 'slash_22=1, walltime=#{@walltime}'"
|
73
|
+
@oar_driver.submit_job('sleep 84400', options )
|
74
|
+
end
|
75
|
+
|
76
|
+
def _subnet_file()
|
77
|
+
return File.join(_cwd(), 'subnet')
|
78
|
+
end
|
79
|
+
|
80
|
+
def _subnet_count()
|
81
|
+
return File.join(_cwd(), 'subnet-count')
|
82
|
+
end
|
83
|
+
|
84
|
+
def _find_subnet()
|
85
|
+
job = @oar_driver.look_by_name("#{@project_id}-net")
|
86
|
+
return job["Job_Id"] unless job.nil?
|
87
|
+
nil
|
88
|
+
end
|
89
|
+
|
90
|
+
# Update the subnet use
|
91
|
+
# op is a string "+" or "-"
|
92
|
+
# if after the update the subnet use is 0
|
93
|
+
# the subnet in use is also deleted
|
94
|
+
def _update_subnet_use(op)
|
95
|
+
cmd = []
|
96
|
+
cmd << "c=$(cat #{_subnet_count});"
|
97
|
+
cmd << "echo $(($c #{op} 1)) > #{_subnet_count};"
|
98
|
+
cmd << "cat #{_subnet_count}"
|
99
|
+
count = @driver.exec(cmd.join(" "))
|
100
|
+
@logger.info("subnet_count = #{count}")
|
101
|
+
if count.to_i <= 0
|
102
|
+
@logger.info("deleteting the associated subnet")
|
103
|
+
subnet_id = _find_subnet()
|
104
|
+
@oar_driver.delete_job(subnet_id)
|
105
|
+
|
106
|
+
end
|
107
|
+
|
108
|
+
end
|
109
|
+
|
110
|
+
end
|
111
|
+
end
|
112
|
+
end
|
113
|
+
end
|
114
|
+
|
115
|
+
|
@@ -0,0 +1,65 @@
|
|
1
|
+
require 'log4r'
|
2
|
+
|
3
|
+
module VagrantPlugins
|
4
|
+
module G5K
|
5
|
+
module Network
|
6
|
+
|
7
|
+
class Nat
|
8
|
+
|
9
|
+
def initialize(env, driver, oar_driver)
|
10
|
+
@logger = Log4r::Logger.new("vagrant::network::nat")
|
11
|
+
# command driver is unused
|
12
|
+
@env = env
|
13
|
+
@driver = driver
|
14
|
+
@oar_driver = oar_driver
|
15
|
+
@net = env[:machine].provider_config.net
|
16
|
+
end
|
17
|
+
|
18
|
+
def generate_net()
|
19
|
+
fwd_ports = @net[:ports].map do |p|
|
20
|
+
"hostfwd=tcp::#{p}"
|
21
|
+
end.join(',')
|
22
|
+
net = "-net nic,model=virtio -net user,#{fwd_ports}"
|
23
|
+
|
24
|
+
@logger.debug("Generated net string : #{net}")
|
25
|
+
return "NAT #{net}"
|
26
|
+
end
|
27
|
+
|
28
|
+
def check_state(job_id)
|
29
|
+
return nil
|
30
|
+
end
|
31
|
+
|
32
|
+
def attach()
|
33
|
+
# noop
|
34
|
+
end
|
35
|
+
|
36
|
+
def detach()
|
37
|
+
# noop
|
38
|
+
end
|
39
|
+
|
40
|
+
def vm_ssh_info(vmid)
|
41
|
+
# get forwarded port 22
|
42
|
+
ports = @net[:ports]
|
43
|
+
ssh_fwd = ports.select{ |x| x.split(':')[1] == '22'}.first
|
44
|
+
if ssh_fwd.nil?
|
45
|
+
env[:ui].error "SSH port 22 must be forwarded"
|
46
|
+
raise Error "SSh port 22 isn't forwarded"
|
47
|
+
end
|
48
|
+
ssh_fwd = ssh_fwd.split('-:')[0]
|
49
|
+
# get node hosting the vm
|
50
|
+
job = @oar_driver.check_job(@env[:machine].id)
|
51
|
+
ssh_info = {
|
52
|
+
:host => job["assigned_network_address"].first,
|
53
|
+
:port => ssh_fwd
|
54
|
+
}
|
55
|
+
@logger.debug(ssh_info)
|
56
|
+
ssh_info
|
57
|
+
end
|
58
|
+
|
59
|
+
|
60
|
+
end
|
61
|
+
end
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
|
@@ -0,0 +1,90 @@
|
|
1
|
+
require 'log4r'
|
2
|
+
|
3
|
+
module VagrantPlugins
|
4
|
+
module G5K
|
5
|
+
class Oar
|
6
|
+
|
7
|
+
include Vagrant::Util::Retryable
|
8
|
+
|
9
|
+
attr_accessor :driver
|
10
|
+
|
11
|
+
def initialize(ui, driver)
|
12
|
+
@logger = Log4r::Logger.new("vagrant::environment")
|
13
|
+
@driver = driver
|
14
|
+
@ui = ui
|
15
|
+
end
|
16
|
+
|
17
|
+
def submit_job(cmd, options)
|
18
|
+
# prefix by the oarsub command
|
19
|
+
opt = _build_oar_cmd(options)
|
20
|
+
# get the job id returned by the command
|
21
|
+
extra = ["| grep \"job_id\"| cut -d':' -f2"]
|
22
|
+
cmd = ["oarsub --json", opt,"\'#{cmd}\'" , extra].join(" ")
|
23
|
+
@driver.exec(cmd).gsub(/"/,"").strip.to_i
|
24
|
+
end
|
25
|
+
|
26
|
+
def delete_job(job_id, options = [])
|
27
|
+
cmd = _build_oar_cmd(options)
|
28
|
+
cmd = ["oardel", cmd, job_id].join(" ")
|
29
|
+
@driver.exec(cmd)
|
30
|
+
end
|
31
|
+
|
32
|
+
def check_job(job_id)
|
33
|
+
cmd = ['oarstat']
|
34
|
+
cmd << "--json"
|
35
|
+
cmd << "-j #{job_id}"
|
36
|
+
cmd = cmd.join(" ")
|
37
|
+
job = @driver.exec(cmd)
|
38
|
+
JSON.load(job)["#{job_id}"]
|
39
|
+
end
|
40
|
+
|
41
|
+
def look_by_name(job_name)
|
42
|
+
begin
|
43
|
+
jobs = @driver.exec("oarstat -u --json")
|
44
|
+
jobs = JSON.load(jobs)
|
45
|
+
s = jobs.select{|k,v| v["name"] == "#{job_name}" }.values.first
|
46
|
+
return s
|
47
|
+
rescue Exception => e
|
48
|
+
@logger.debug(e)
|
49
|
+
end
|
50
|
+
nil
|
51
|
+
end
|
52
|
+
|
53
|
+
def wait_for(job_id)
|
54
|
+
job = nil
|
55
|
+
begin
|
56
|
+
retryable(:on => VagrantPlugins::G5K::Errors::JobNotRunning, :tries => 100, :sleep => 1) do
|
57
|
+
job = check_job(job_id)
|
58
|
+
if !job.nil? and ["Error", "Terminated"].include?(job["state"])
|
59
|
+
_process_errors(job_id)
|
60
|
+
end
|
61
|
+
if job.nil? or (!job.nil? and job["state"] != "Running")
|
62
|
+
@ui.info("Waiting for the job to be running")
|
63
|
+
raise VagrantPlugins::G5K::Errors::JobNotRunning
|
64
|
+
end
|
65
|
+
break
|
66
|
+
end
|
67
|
+
rescue VagrantPlugins::G5K::Errors::JobNotRunning
|
68
|
+
@ui.error("Tired of waiting")
|
69
|
+
raise VagrantPlugins::G5K::Errors::JobNotRunning
|
70
|
+
end
|
71
|
+
return job
|
72
|
+
end
|
73
|
+
|
74
|
+
def _build_oar_cmd(options)
|
75
|
+
options.join(" ")
|
76
|
+
end
|
77
|
+
|
78
|
+
def _process_errors(job_id)
|
79
|
+
job = check_job(job_id)
|
80
|
+
stderr_file = job["stderr_file"]
|
81
|
+
stderr = exec("cat #{stderr_file}")
|
82
|
+
@ui.error("#{stderr_file}: #{stderr}")
|
83
|
+
raise VagrantPlugins::G5K::Errors::JobError
|
84
|
+
end
|
85
|
+
|
86
|
+
|
87
|
+
end
|
88
|
+
end
|
89
|
+
end
|
90
|
+
|
@@ -1,11 +1,41 @@
|
|
1
1
|
#!/bin/bash
|
2
2
|
|
3
|
-
# This script is borrowed
|
3
|
+
# This script is originally borrowed to pmorillo
|
4
4
|
# Thanks to him !
|
5
|
+
# I've made some addition though :)
|
6
|
+
|
7
|
+
#
|
8
|
+
|
9
|
+
|
10
|
+
function net_bridge() {
|
11
|
+
SUBNET_FILE=$1
|
12
|
+
# As we chose a stateless designe,let's calculate here the ip and mac
|
13
|
+
# assuming we got a slash_22
|
14
|
+
ipnumber=$(($OAR_JOB_ID % 1022))
|
15
|
+
IP_MAC=$(cat $SUBNET_FILE|head -n $((ipnumber + 1))|tail -n 1)
|
16
|
+
IP_ADDR=$(echo $IP_MAC|awk '{print $1}')
|
17
|
+
MAC_ADDR=$(echo $IP_MAC|awk '{print $2}')
|
18
|
+
|
19
|
+
# create tap
|
20
|
+
TAP=$(sudo create_tap)
|
21
|
+
|
22
|
+
# return the specific net string of the kvm command
|
23
|
+
echo "-net nic,model=virtio,macaddr=$MAC_ADDR -net tap,ifname=$TAP,script=no"
|
24
|
+
}
|
25
|
+
|
26
|
+
net=""
|
27
|
+
if [ "$1" == "BRIDGE" ]
|
28
|
+
then
|
29
|
+
shift
|
30
|
+
net=$(net_bridge $@)
|
31
|
+
echo $(hostname)
|
32
|
+
echo $net
|
33
|
+
shift
|
34
|
+
else
|
35
|
+
shift
|
36
|
+
net=""
|
37
|
+
fi
|
5
38
|
|
6
|
-
#OAR -l slash_22=1+{virtual!='none'}/nodes=1,walltime=06:00:00
|
7
|
-
#OAR --checkpoint 60
|
8
|
-
#OAR --signal 12
|
9
39
|
|
10
40
|
# Directory for qcow2 snapshots
|
11
41
|
export TMPDIR=/tmp
|
@@ -28,7 +58,7 @@ trap clean_shutdown 12
|
|
28
58
|
|
29
59
|
# Launch virtual machine
|
30
60
|
#kvm -m $VM_MEM -smp $SMP -drive file=$IMAGE,if=virtio -snapshot -fsdev local,security_model=none,id=fsdev0,path=$HOME -device virtio-9p-pci,id=fs0,fsdev=fsdev0,mount_tag=hostshare -nographic -net nic,model=virtio,macaddr=$MAC_ADDR -net tap,ifname=$TAP,script=no -monitor unix:/tmp/vagrant-g5k.mon,server,nowait -localtime -enable-kvm &
|
31
|
-
kvm -m $VM_MEM -smp $SMP -fsdev local,security_model=none,id=fsdev0,path=$HOME -device virtio-9p-pci,id=fs0,fsdev=fsdev0,mount_tag=hostshare -nographic -monitor unix:/tmp/vagrant-g5k.mon,server,nowait -localtime -enable-kvm $@ &
|
61
|
+
kvm -m $VM_MEM -smp $SMP -fsdev local,security_model=none,id=fsdev0,path=$HOME -device virtio-9p-pci,id=fs0,fsdev=fsdev0,mount_tag=hostshare -nographic -monitor unix:/tmp/vagrant-g5k.mon,server,nowait -localtime -enable-kvm $net $@ &
|
32
62
|
|
33
63
|
wait
|
34
64
|
|