vagrant-g5k 0.0.3 → 0.0.4

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: e6e3b8b70409d84ba3e8085e618971fe21a4f96d
4
- data.tar.gz: 085ca43afd7c20e6bb268c7e439bb3f148782cc6
3
+ metadata.gz: 14d213a2e487b13b315432d6743eeb5c07fe877e
4
+ data.tar.gz: f0fa3704d917e257c0062be78edd16715422c4c7
5
5
  SHA512:
6
- metadata.gz: 1a0ffe62377539bf6f38bcfbf5bdc2bcc2edcc5766e1a68322090bb15f392104a2138e7b1b015318830ab17e9038a62454b5ea05a1e2688f1cbd4987cd1232bd
7
- data.tar.gz: f00e2106f1cd0b8f4aa87f26b9ebe6843de8f69bac1d44fa1101cf8e151707252329f3741956b99e036e7599814de850536cae9641b6182e778bba7a4d1ad50d
6
+ metadata.gz: ac2fdb41d5a0099d3cd031db5cedd9266c15327faee01f99a70b65f1cadd326e542697fbea0981d2f272b39b551e99f08594d62aac5248a876dc1c013f7cd9d9
7
+ data.tar.gz: be378516080dc415441b04ac96536a498831f7291c9ad06fe51b88e25ab385783a6cbd2445609a5458c41126c1ddae36ea33d47bd2a9acb9d1b2c18617e0b522
@@ -18,7 +18,8 @@ module VagrantPlugins
18
18
  :logger => env[:ui],
19
19
  :username => env[:machine].provider_config.username,
20
20
  :image_location => env[:machine].provider_config.image_location,
21
- :site => env[:machine].provider_config.site
21
+ :site => env[:machine].provider_config.site,
22
+ :ports => env[:machine].provider_config.ports
22
23
  )
23
24
  @app.call(env)
24
25
  end
@@ -20,7 +20,8 @@ module VagrantPlugins
20
20
  def read_ssh_info(conn, machine)
21
21
  return nil if machine.id.nil?
22
22
 
23
- return { :host => conn.ip,
23
+ return { :host => conn.node,
24
+ :port => 2222,
24
25
  :proxy_command => "ssh #{conn.username}@access.grid5000.fr nc %h %p",
25
26
  }
26
27
  end
@@ -19,10 +19,6 @@ module VagrantPlugins
19
19
  def read_state(machine, conn)
20
20
  return :not_created if machine.id.nil?
21
21
  # is there a job running for this vm ?
22
- subnet = conn.check_or_reserve_subnet()
23
- if subnet.nil?
24
- return :missing_subnet
25
- end
26
22
  job = conn.check_job(machine.id)
27
23
  if job.nil? # TODO or fraged
28
24
  return :not_created
@@ -29,6 +29,11 @@ module VagrantPlugins
29
29
  # @return [String]
30
30
  attr_accessor :image_strategy
31
31
 
32
+ # G5K ports mapping
33
+ #
34
+ #
35
+ # @return [Array]
36
+ attr_accessor :ports
32
37
 
33
38
  def initialize()
34
39
  @username = nil
@@ -5,7 +5,7 @@ require 'json'
5
5
  require 'vagrant/util/retryable'
6
6
 
7
7
  WORKING_DIR = ".vagrant-g5k"
8
- LAUNCHER_SCRIPT = "launch_vm.sh"
8
+ LAUNCHER_SCRIPT = "launch_vm_fwd.sh"
9
9
  JOB_SUBNET_NAME = "vagrant-g5k-subnet"
10
10
  WALLTIME="01:00:00"
11
11
 
@@ -28,9 +28,7 @@ module VagrantPlugins
28
28
 
29
29
  attr_accessor :pool
30
30
 
31
- attr_accessor :ip
32
-
33
- attr_accessor :mac
31
+ attr_accessor :ports
34
32
 
35
33
  @@locations = [
36
34
  {
@@ -44,9 +42,10 @@ module VagrantPlugins
44
42
  args.each do |k,v|
45
43
  instance_variable_set("@#{k}", v) unless v.nil?
46
44
  end
45
+ @logger.info("connecting with #{@username} on site #{@site}")
46
+ gateway = Net::SSH::Gateway.new("access.grid5000.fr", @username, :forward_agent => true)
47
47
 
48
- gateway = Net::SSH::Gateway.new("access.grid5000.fr", "msimonin", :forward_agent => true)
49
- @session = gateway.ssh(@site, "msimonin")
48
+ @session = gateway.ssh(@site, @username)
50
49
  end
51
50
 
52
51
  def list_images()
@@ -86,52 +85,22 @@ module VagrantPlugins
86
85
  return r
87
86
  end
88
87
 
89
- def check_or_reserve_subnet()
90
- @logger.info("Checking if a subnet has been reserved")
91
- oarstat = exec("oarstat --json")
92
- oarstat = JSON.load(oarstat)
93
- job = oarstat.select!{ |k,v| v["owner"] == @username && v["name"] == JOB_SUBNET_NAME }.values.first
94
- if job.nil?
95
- # we have to reserve a subnet
96
- @logger.info("Reserving a subnet")
97
- job_id = exec("oarsub -l \"slash_22=1, walltime=#{WALLTIME}\" --name #{JOB_SUBNET_NAME} \"sleep 3600\" | grep OAR_JOB_ID | cut -d '=' -f2").chomp
98
- begin
99
- retryable(:on => VagrantPlugins::G5K::Errors::JobNotRunning, :tries => 100, :sleep => 3) do
100
- @logger.info("Waiting for the job to be running")
101
- job = check_job(job_id)
102
- if job.nil? or job["state"] != "Running"
103
- raise VagrantPlugins::G5K::Errors::JobNotRunning
104
- end
105
- break
106
- end
107
- rescue VagrantPlugins::G5K::Errors::JobNotRunning
108
- @logger.error("Tired of waiting")
109
- raise VagrantPlugins::G5K::Errors::JobNotRunning
110
- end
111
- end
112
- # get the macs ips addresses pool
113
- im = exec("g5k-subnets -j #{job["Job_Id"]} -im")
114
- @pool = im.split("\n").map{|i| i.split("\t")}
115
- @ip, @mac = @pool[0]
116
- @logger.info("Get the mac #{mac} and the corresponding ip #{ip} from the subnet")
117
- end
118
-
119
-
120
88
  def launch_vm(env)
121
89
  launcher_path = File.join(File.dirname(__FILE__), LAUNCHER_SCRIPT)
122
- @logger.info("Launching the VM on Grid'5000")
90
+ @logger.info("Launching the VM on Grid'50001")
123
91
  # Checking the subnet job
124
- subnet = check_or_reserve_subnet()
125
92
  @logger.info("Uploading launcher")
126
93
  # uploading the launcher
127
94
  launcher_remote_path = File.join("/home", @username , WORKING_DIR, LAUNCHER_SCRIPT)
128
95
  upload(launcher_path, launcher_remote_path)
129
- # creating the params file
130
- params_path = File.join("/home", @username, WORKING_DIR, 'params')
131
- exec("echo #{@image_location} #{@mac} > #{params_path}")
96
+
97
+ # Generate partial arguments for the kvm command
98
+ drive = _generate_drive()
99
+ net = _generate_net()
100
+ args = [drive, net].join(" ")
132
101
  # Submitting a new job
133
102
  @logger.info("Starting a new job")
134
- job_id = exec("oarsub -t allow_classic_ssh -l \"{virtual!=\'none\'}/nodes=1,walltime=#{WALLTIME}\" --name #{env[:machine].name} --checkpoint 60 --signal 12 --array-param-file #{params_path} #{launcher_remote_path} | grep OAR_JOB_ID | cut -d '=' -f2").chomp
103
+ job_id = exec("oarsub -t allow_classic_ssh -l \"{virtual!=\'none\'}/nodes=1,walltime=#{WALLTIME}\" --name #{env[:machine].name} --checkpoint 60 --signal 12 \"#{launcher_remote_path} #{args}\" | grep OAR_JOB_ID | cut -d '=' -f2").chomp
135
104
 
136
105
 
137
106
  begin
@@ -167,7 +136,19 @@ module VagrantPlugins
167
136
  @session.scp.upload!(src, dst)
168
137
  end
169
138
 
139
+ def _generate_drive()
140
+ return "-drive file=#{@image_location},if=virtio"
141
+ end
170
142
 
143
+ def _generate_net()
144
+ # default port to use for ssh
145
+ @ports << "2222-:22"
146
+ fwd_ports = @ports.map do |p|
147
+ "hostfwd=tcp::#{p}"
148
+ end.join(',')
149
+ net = "-net nic,model=virtio -net user,#{fwd_ports}"
150
+ return net
151
+ end
171
152
 
172
153
 
173
154
  end
@@ -6,7 +6,6 @@
6
6
  # Directory for qcow2 snapshots
7
7
  export TMPDIR=/tmp
8
8
  #IMAGE=/grid5000/virt-images/alpine-docker.qcow2
9
- IMAGE=$1
10
9
 
11
10
  # GET Virtual IP information
12
11
  IP_ADDR=$(/usr/local/bin/g5k-subnets -im | head -1 | awk '{print $1}')
@@ -38,7 +37,7 @@ trap clean_shutdown 12
38
37
  # Launch virtual machine
39
38
  #kvm -m $VM_MEM -smp $SMP -drive file=/grid5000/images/KVM/alpine_docker.qcow2,if=virtio -snapshot -fsdev local,security_model=none,id=fsdev0,path=$HOME -device virtio-9p-pci,id=fs0,fsdev=fsdev0,mount_tag=hostshare -nographic -net nic,model=virtio,macaddr=$MAC_ADDR -net tap,ifname=$TAP,script=no -monitor unix:/tmp/alpine_docker_vm.mon,server,nowait -localtime -enable-kvm &
40
39
  #kvm -m $VM_MEM -smp $SMP -drive file=$IMAGE,if=virtio -snapshot -fsdev local,security_model=none,id=fsdev0,path=$HOME -device virtio-9p-pci,id=fs0,fsdev=fsdev0,mount_tag=hostshare -nographic -net nic,model=virtio,macaddr=$MAC_ADDR -net tap,ifname=$TAP,script=no -monitor unix:/tmp/vagrant-g5k.mon,server,nowait -localtime -enable-kvm &
41
- kvm -m $VM_MEM -smp $SMP -drive file=$IMAGE,if=virtio -snapshot -fsdev local,security_model=none,id=fsdev0,path=$HOME -device virtio-9p-pci,id=fs0,fsdev=fsdev0,mount_tag=hostshare -nographic -net nic,model=virtio -net user,hostfwd=tcp::2222-:22 -monitor unix:/tmp/vagrant-g5k.mon,server,nowait -localtime -enable-kvm &
40
+ kvm -m $VM_MEM -smp $SMP -snapshot -fsdev local,security_model=none,id=fsdev0,path=$HOME -device virtio-9p-pci,id=fs0,fsdev=fsdev0,mount_tag=hostshare -nographic -monitor unix:/tmp/vagrant-g5k.mon,server,nowait -localtime -enable-kvm $@ &
42
41
 
43
42
  wait
44
43
 
@@ -1,5 +1,5 @@
1
1
  module VagrantPlugins
2
2
  module G5K
3
- VERSION = '0.0.3'
3
+ VERSION = '0.0.4'
4
4
  end
5
5
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: vagrant-g5k
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.0.3
4
+ version: 0.0.4
5
5
  platform: ruby
6
6
  authors:
7
7
  - Matthieu Simonin
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2016-09-12 00:00:00.000000000 Z
11
+ date: 2016-09-13 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: iniparse
@@ -129,7 +129,6 @@ files:
129
129
  - lib/vagrant-g5k.rb
130
130
  - lib/vagrant-g5k/.config.rb.swp
131
131
  - lib/vagrant-g5k/action.rb
132
- - lib/vagrant-g5k/action/.message_not_created.rb.swp
133
132
  - lib/vagrant-g5k/action/connect_g5k.rb
134
133
  - lib/vagrant-g5k/action/create_local_working_dir.rb
135
134
  - lib/vagrant-g5k/action/is_created.rb
@@ -144,8 +143,8 @@ files:
144
143
  - lib/vagrant-g5k/plugin.rb
145
144
  - lib/vagrant-g5k/provider.rb
146
145
  - lib/vagrant-g5k/util/.g5k_utils.rb.swp
146
+ - lib/vagrant-g5k/util/.launch_vm_fwd.sh.swp
147
147
  - lib/vagrant-g5k/util/g5k_utils.rb
148
- - lib/vagrant-g5k/util/launch_vm.sh
149
148
  - lib/vagrant-g5k/util/launch_vm_fwd.sh
150
149
  - lib/vagrant-g5k/version.rb
151
150
  - locales/en.yml
@@ -1,38 +0,0 @@
1
- #!/bin/bash
2
- #OAR -l slash_22=1+{virtual!='none'}/nodes=1,walltime=06:00:00
3
- #OAR --checkpoint 60
4
- #OAR --signal 12
5
-
6
- # Directory for qcow2 snapshots
7
- export TMPDIR=/tmp
8
- #IMAGE=/grid5000/virt-images/alpine-docker.qcow2
9
- IMAGE=$1
10
- MAC_ADDR=$2
11
-
12
- echo "VM IP informations :"
13
- echo "MAC address: $MAC_ADDR"
14
-
15
- # Create tap
16
- TAP=$(sudo create_tap)
17
-
18
- # Memory allocation
19
- KEEP_SYSTEM_MEM=1 # Gb
20
- TOTAL_MEM=$(cat /proc/meminfo | grep -e '^MemTotal:' | awk '{print $2}')
21
- VM_MEM=$(( ($TOTAL_MEM / 1024) - $KEEP_SYSTEM_MEM * 1024 ))
22
-
23
- # CPU
24
- SMP=$(nproc)
25
-
26
- # Clean shutdown of the VM at the end of the OAR job
27
- clean_shutdown() {
28
- echo "Caught shutdown signal at $(date)"
29
- echo "system_powerdown" | nc -U /tmp/vagrant-g5k.mon
30
- }
31
-
32
- trap clean_shutdown 12
33
-
34
- # Launch virtual machine
35
- kvm -m $VM_MEM -smp $SMP -drive file=$IMAGE,if=virtio -snapshot -fsdev local,security_model=none,id=fsdev0,path=$HOME -device virtio-9p-pci,id=fs0,fsdev=fsdev0,mount_tag=hostshare -nographic -net nic,model=virtio,macaddr=$MAC_ADDR -net tap,ifname=$TAP,script=no -monitor unix:/tmp/vagrant-g5k.mon,server,nowait -localtime -enable-kvm &
36
-
37
- wait
38
-