vagrant-g5k 0.0.3 → 0.0.4
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/vagrant-g5k/action/connect_g5k.rb +2 -1
- data/lib/vagrant-g5k/action/read_ssh_info.rb +2 -1
- data/lib/vagrant-g5k/action/read_state.rb +0 -4
- data/lib/vagrant-g5k/config.rb +5 -0
- data/lib/vagrant-g5k/util/g5k_utils.rb +24 -43
- data/lib/vagrant-g5k/util/launch_vm_fwd.sh +1 -2
- data/lib/vagrant-g5k/version.rb +1 -1
- metadata +3 -4
- data/lib/vagrant-g5k/util/launch_vm.sh +0 -38
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 14d213a2e487b13b315432d6743eeb5c07fe877e
|
4
|
+
data.tar.gz: f0fa3704d917e257c0062be78edd16715422c4c7
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: ac2fdb41d5a0099d3cd031db5cedd9266c15327faee01f99a70b65f1cadd326e542697fbea0981d2f272b39b551e99f08594d62aac5248a876dc1c013f7cd9d9
|
7
|
+
data.tar.gz: be378516080dc415441b04ac96536a498831f7291c9ad06fe51b88e25ab385783a6cbd2445609a5458c41126c1ddae36ea33d47bd2a9acb9d1b2c18617e0b522
|
@@ -18,7 +18,8 @@ module VagrantPlugins
|
|
18
18
|
:logger => env[:ui],
|
19
19
|
:username => env[:machine].provider_config.username,
|
20
20
|
:image_location => env[:machine].provider_config.image_location,
|
21
|
-
:site => env[:machine].provider_config.site
|
21
|
+
:site => env[:machine].provider_config.site,
|
22
|
+
:ports => env[:machine].provider_config.ports
|
22
23
|
)
|
23
24
|
@app.call(env)
|
24
25
|
end
|
@@ -20,7 +20,8 @@ module VagrantPlugins
|
|
20
20
|
def read_ssh_info(conn, machine)
|
21
21
|
return nil if machine.id.nil?
|
22
22
|
|
23
|
-
return { :host => conn.
|
23
|
+
return { :host => conn.node,
|
24
|
+
:port => 2222,
|
24
25
|
:proxy_command => "ssh #{conn.username}@access.grid5000.fr nc %h %p",
|
25
26
|
}
|
26
27
|
end
|
@@ -19,10 +19,6 @@ module VagrantPlugins
|
|
19
19
|
def read_state(machine, conn)
|
20
20
|
return :not_created if machine.id.nil?
|
21
21
|
# is there a job running for this vm ?
|
22
|
-
subnet = conn.check_or_reserve_subnet()
|
23
|
-
if subnet.nil?
|
24
|
-
return :missing_subnet
|
25
|
-
end
|
26
22
|
job = conn.check_job(machine.id)
|
27
23
|
if job.nil? # TODO or fraged
|
28
24
|
return :not_created
|
data/lib/vagrant-g5k/config.rb
CHANGED
@@ -5,7 +5,7 @@ require 'json'
|
|
5
5
|
require 'vagrant/util/retryable'
|
6
6
|
|
7
7
|
WORKING_DIR = ".vagrant-g5k"
|
8
|
-
LAUNCHER_SCRIPT = "
|
8
|
+
LAUNCHER_SCRIPT = "launch_vm_fwd.sh"
|
9
9
|
JOB_SUBNET_NAME = "vagrant-g5k-subnet"
|
10
10
|
WALLTIME="01:00:00"
|
11
11
|
|
@@ -28,9 +28,7 @@ module VagrantPlugins
|
|
28
28
|
|
29
29
|
attr_accessor :pool
|
30
30
|
|
31
|
-
attr_accessor :
|
32
|
-
|
33
|
-
attr_accessor :mac
|
31
|
+
attr_accessor :ports
|
34
32
|
|
35
33
|
@@locations = [
|
36
34
|
{
|
@@ -44,9 +42,10 @@ module VagrantPlugins
|
|
44
42
|
args.each do |k,v|
|
45
43
|
instance_variable_set("@#{k}", v) unless v.nil?
|
46
44
|
end
|
45
|
+
@logger.info("connecting with #{@username} on site #{@site}")
|
46
|
+
gateway = Net::SSH::Gateway.new("access.grid5000.fr", @username, :forward_agent => true)
|
47
47
|
|
48
|
-
|
49
|
-
@session = gateway.ssh(@site, "msimonin")
|
48
|
+
@session = gateway.ssh(@site, @username)
|
50
49
|
end
|
51
50
|
|
52
51
|
def list_images()
|
@@ -86,52 +85,22 @@ module VagrantPlugins
|
|
86
85
|
return r
|
87
86
|
end
|
88
87
|
|
89
|
-
def check_or_reserve_subnet()
|
90
|
-
@logger.info("Checking if a subnet has been reserved")
|
91
|
-
oarstat = exec("oarstat --json")
|
92
|
-
oarstat = JSON.load(oarstat)
|
93
|
-
job = oarstat.select!{ |k,v| v["owner"] == @username && v["name"] == JOB_SUBNET_NAME }.values.first
|
94
|
-
if job.nil?
|
95
|
-
# we have to reserve a subnet
|
96
|
-
@logger.info("Reserving a subnet")
|
97
|
-
job_id = exec("oarsub -l \"slash_22=1, walltime=#{WALLTIME}\" --name #{JOB_SUBNET_NAME} \"sleep 3600\" | grep OAR_JOB_ID | cut -d '=' -f2").chomp
|
98
|
-
begin
|
99
|
-
retryable(:on => VagrantPlugins::G5K::Errors::JobNotRunning, :tries => 100, :sleep => 3) do
|
100
|
-
@logger.info("Waiting for the job to be running")
|
101
|
-
job = check_job(job_id)
|
102
|
-
if job.nil? or job["state"] != "Running"
|
103
|
-
raise VagrantPlugins::G5K::Errors::JobNotRunning
|
104
|
-
end
|
105
|
-
break
|
106
|
-
end
|
107
|
-
rescue VagrantPlugins::G5K::Errors::JobNotRunning
|
108
|
-
@logger.error("Tired of waiting")
|
109
|
-
raise VagrantPlugins::G5K::Errors::JobNotRunning
|
110
|
-
end
|
111
|
-
end
|
112
|
-
# get the macs ips addresses pool
|
113
|
-
im = exec("g5k-subnets -j #{job["Job_Id"]} -im")
|
114
|
-
@pool = im.split("\n").map{|i| i.split("\t")}
|
115
|
-
@ip, @mac = @pool[0]
|
116
|
-
@logger.info("Get the mac #{mac} and the corresponding ip #{ip} from the subnet")
|
117
|
-
end
|
118
|
-
|
119
|
-
|
120
88
|
def launch_vm(env)
|
121
89
|
launcher_path = File.join(File.dirname(__FILE__), LAUNCHER_SCRIPT)
|
122
|
-
@logger.info("Launching the VM on Grid'
|
90
|
+
@logger.info("Launching the VM on Grid'50001")
|
123
91
|
# Checking the subnet job
|
124
|
-
subnet = check_or_reserve_subnet()
|
125
92
|
@logger.info("Uploading launcher")
|
126
93
|
# uploading the launcher
|
127
94
|
launcher_remote_path = File.join("/home", @username , WORKING_DIR, LAUNCHER_SCRIPT)
|
128
95
|
upload(launcher_path, launcher_remote_path)
|
129
|
-
|
130
|
-
|
131
|
-
|
96
|
+
|
97
|
+
# Generate partial arguments for the kvm command
|
98
|
+
drive = _generate_drive()
|
99
|
+
net = _generate_net()
|
100
|
+
args = [drive, net].join(" ")
|
132
101
|
# Submitting a new job
|
133
102
|
@logger.info("Starting a new job")
|
134
|
-
job_id = exec("oarsub -t allow_classic_ssh -l \"{virtual!=\'none\'}/nodes=1,walltime=#{WALLTIME}\" --name #{env[:machine].name} --checkpoint 60 --signal 12
|
103
|
+
job_id = exec("oarsub -t allow_classic_ssh -l \"{virtual!=\'none\'}/nodes=1,walltime=#{WALLTIME}\" --name #{env[:machine].name} --checkpoint 60 --signal 12 \"#{launcher_remote_path} #{args}\" | grep OAR_JOB_ID | cut -d '=' -f2").chomp
|
135
104
|
|
136
105
|
|
137
106
|
begin
|
@@ -167,7 +136,19 @@ module VagrantPlugins
|
|
167
136
|
@session.scp.upload!(src, dst)
|
168
137
|
end
|
169
138
|
|
139
|
+
def _generate_drive()
|
140
|
+
return "-drive file=#{@image_location},if=virtio"
|
141
|
+
end
|
170
142
|
|
143
|
+
def _generate_net()
|
144
|
+
# default port to use for ssh
|
145
|
+
@ports << "2222-:22"
|
146
|
+
fwd_ports = @ports.map do |p|
|
147
|
+
"hostfwd=tcp::#{p}"
|
148
|
+
end.join(',')
|
149
|
+
net = "-net nic,model=virtio -net user,#{fwd_ports}"
|
150
|
+
return net
|
151
|
+
end
|
171
152
|
|
172
153
|
|
173
154
|
end
|
@@ -6,7 +6,6 @@
|
|
6
6
|
# Directory for qcow2 snapshots
|
7
7
|
export TMPDIR=/tmp
|
8
8
|
#IMAGE=/grid5000/virt-images/alpine-docker.qcow2
|
9
|
-
IMAGE=$1
|
10
9
|
|
11
10
|
# GET Virtual IP information
|
12
11
|
IP_ADDR=$(/usr/local/bin/g5k-subnets -im | head -1 | awk '{print $1}')
|
@@ -38,7 +37,7 @@ trap clean_shutdown 12
|
|
38
37
|
# Launch virtual machine
|
39
38
|
#kvm -m $VM_MEM -smp $SMP -drive file=/grid5000/images/KVM/alpine_docker.qcow2,if=virtio -snapshot -fsdev local,security_model=none,id=fsdev0,path=$HOME -device virtio-9p-pci,id=fs0,fsdev=fsdev0,mount_tag=hostshare -nographic -net nic,model=virtio,macaddr=$MAC_ADDR -net tap,ifname=$TAP,script=no -monitor unix:/tmp/alpine_docker_vm.mon,server,nowait -localtime -enable-kvm &
|
40
39
|
#kvm -m $VM_MEM -smp $SMP -drive file=$IMAGE,if=virtio -snapshot -fsdev local,security_model=none,id=fsdev0,path=$HOME -device virtio-9p-pci,id=fs0,fsdev=fsdev0,mount_tag=hostshare -nographic -net nic,model=virtio,macaddr=$MAC_ADDR -net tap,ifname=$TAP,script=no -monitor unix:/tmp/vagrant-g5k.mon,server,nowait -localtime -enable-kvm &
|
41
|
-
kvm -m $VM_MEM -smp $SMP -
|
40
|
+
kvm -m $VM_MEM -smp $SMP -snapshot -fsdev local,security_model=none,id=fsdev0,path=$HOME -device virtio-9p-pci,id=fs0,fsdev=fsdev0,mount_tag=hostshare -nographic -monitor unix:/tmp/vagrant-g5k.mon,server,nowait -localtime -enable-kvm $@ &
|
42
41
|
|
43
42
|
wait
|
44
43
|
|
data/lib/vagrant-g5k/version.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: vagrant-g5k
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.0.
|
4
|
+
version: 0.0.4
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Matthieu Simonin
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2016-09-
|
11
|
+
date: 2016-09-13 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: iniparse
|
@@ -129,7 +129,6 @@ files:
|
|
129
129
|
- lib/vagrant-g5k.rb
|
130
130
|
- lib/vagrant-g5k/.config.rb.swp
|
131
131
|
- lib/vagrant-g5k/action.rb
|
132
|
-
- lib/vagrant-g5k/action/.message_not_created.rb.swp
|
133
132
|
- lib/vagrant-g5k/action/connect_g5k.rb
|
134
133
|
- lib/vagrant-g5k/action/create_local_working_dir.rb
|
135
134
|
- lib/vagrant-g5k/action/is_created.rb
|
@@ -144,8 +143,8 @@ files:
|
|
144
143
|
- lib/vagrant-g5k/plugin.rb
|
145
144
|
- lib/vagrant-g5k/provider.rb
|
146
145
|
- lib/vagrant-g5k/util/.g5k_utils.rb.swp
|
146
|
+
- lib/vagrant-g5k/util/.launch_vm_fwd.sh.swp
|
147
147
|
- lib/vagrant-g5k/util/g5k_utils.rb
|
148
|
-
- lib/vagrant-g5k/util/launch_vm.sh
|
149
148
|
- lib/vagrant-g5k/util/launch_vm_fwd.sh
|
150
149
|
- lib/vagrant-g5k/version.rb
|
151
150
|
- locales/en.yml
|
@@ -1,38 +0,0 @@
|
|
1
|
-
#!/bin/bash
|
2
|
-
#OAR -l slash_22=1+{virtual!='none'}/nodes=1,walltime=06:00:00
|
3
|
-
#OAR --checkpoint 60
|
4
|
-
#OAR --signal 12
|
5
|
-
|
6
|
-
# Directory for qcow2 snapshots
|
7
|
-
export TMPDIR=/tmp
|
8
|
-
#IMAGE=/grid5000/virt-images/alpine-docker.qcow2
|
9
|
-
IMAGE=$1
|
10
|
-
MAC_ADDR=$2
|
11
|
-
|
12
|
-
echo "VM IP informations :"
|
13
|
-
echo "MAC address: $MAC_ADDR"
|
14
|
-
|
15
|
-
# Create tap
|
16
|
-
TAP=$(sudo create_tap)
|
17
|
-
|
18
|
-
# Memory allocation
|
19
|
-
KEEP_SYSTEM_MEM=1 # Gb
|
20
|
-
TOTAL_MEM=$(cat /proc/meminfo | grep -e '^MemTotal:' | awk '{print $2}')
|
21
|
-
VM_MEM=$(( ($TOTAL_MEM / 1024) - $KEEP_SYSTEM_MEM * 1024 ))
|
22
|
-
|
23
|
-
# CPU
|
24
|
-
SMP=$(nproc)
|
25
|
-
|
26
|
-
# Clean shutdown of the VM at the end of the OAR job
|
27
|
-
clean_shutdown() {
|
28
|
-
echo "Caught shutdown signal at $(date)"
|
29
|
-
echo "system_powerdown" | nc -U /tmp/vagrant-g5k.mon
|
30
|
-
}
|
31
|
-
|
32
|
-
trap clean_shutdown 12
|
33
|
-
|
34
|
-
# Launch virtual machine
|
35
|
-
kvm -m $VM_MEM -smp $SMP -drive file=$IMAGE,if=virtio -snapshot -fsdev local,security_model=none,id=fsdev0,path=$HOME -device virtio-9p-pci,id=fs0,fsdev=fsdev0,mount_tag=hostshare -nographic -net nic,model=virtio,macaddr=$MAC_ADDR -net tap,ifname=$TAP,script=no -monitor unix:/tmp/vagrant-g5k.mon,server,nowait -localtime -enable-kvm &
|
36
|
-
|
37
|
-
wait
|
38
|
-
|