vagrant-g5k 0.0.18 → 0.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +7 -0
- data/README.md +2 -0
- data/Vagrantfile +25 -30
- data/lib/vagrant-g5k/action.rb +0 -3
- data/lib/vagrant-g5k/action/connect_g5k.rb +33 -2
- data/lib/vagrant-g5k/action/read_ssh_info.rb +13 -32
- data/lib/vagrant-g5k/action/read_state.rb +4 -8
- data/lib/vagrant-g5k/command.rb +2 -2
- data/lib/vagrant-g5k/config.rb +4 -3
- data/lib/vagrant-g5k/disk/local.rb +68 -0
- data/lib/vagrant-g5k/disk/rbd.rb +92 -0
- data/lib/vagrant-g5k/g5k_connection.rb +144 -0
- data/lib/vagrant-g5k/network/bridge.rb +115 -0
- data/lib/vagrant-g5k/network/nat.rb +65 -0
- data/lib/vagrant-g5k/oar.rb +90 -0
- data/lib/vagrant-g5k/util/{launch_vm_fwd.sh → launch_vm.sh} +35 -5
- data/lib/vagrant-g5k/version.rb +1 -1
- data/spec/vagrant-g5k/oar_spec.rb +57 -0
- data/vagrant-g5k.gemspec +3 -3
- metadata +52 -6
- data/lib/vagrant-g5k/action/close_g5k.rb +0 -24
- data/lib/vagrant-g5k/util/g5k_utils.rb +0 -435
- data/lib/vagrant-g5k/util/launch_vm_bridge.sh +0 -58
data/lib/vagrant-g5k/version.rb
CHANGED
@@ -0,0 +1,57 @@
|
|
1
|
+
require "vagrant-g5k/oar"
|
2
|
+
require 'rspec/its'
|
3
|
+
require 'rspec/mocks'
|
4
|
+
|
5
|
+
describe VagrantPlugins::G5K::Oar do
|
6
|
+
describe "_build_oar_cmd" do
|
7
|
+
it "builds the wanted oar string" do
|
8
|
+
oar = VagrantPlugins::G5K::Oar.new(nil)
|
9
|
+
cmd = oar._build_oar_cmd([
|
10
|
+
"a",
|
11
|
+
"b"
|
12
|
+
])
|
13
|
+
expect(cmd).to eq "a b"
|
14
|
+
end
|
15
|
+
end
|
16
|
+
|
17
|
+
describe "submit_job" do
|
18
|
+
it "submit job without error" do
|
19
|
+
driver = double("driver")
|
20
|
+
expect(driver).to receive(:exec)
|
21
|
+
.with("oarsub --json -t deploy 'sleep 1' | grep \"job_id\"| cut -d':' -f2")
|
22
|
+
.and_return("1")
|
23
|
+
oar = VagrantPlugins::G5K::Oar.new(driver)
|
24
|
+
job_id = oar.submit_job("sleep 1", [
|
25
|
+
"-t deploy"
|
26
|
+
])
|
27
|
+
expect(job_id).to eq 1
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
describe "delete_job" do
|
32
|
+
it "delete job without error" do
|
33
|
+
driver = double("driver")
|
34
|
+
expect(driver).to receive(:exec).with("oardel -c -s 12 1")
|
35
|
+
oar = VagrantPlugins::G5K::Oar.new(driver)
|
36
|
+
oar.delete_job(1, [
|
37
|
+
"-c",
|
38
|
+
"-s 12"
|
39
|
+
])
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
describe "check_job" do
|
44
|
+
it "check the job" do
|
45
|
+
driver = double("driver")
|
46
|
+
expect(driver).to receive(:exec).with("oarstat --json -j 1")
|
47
|
+
.and_return('{"1" : {"name" : "foo"}}')
|
48
|
+
oar = VagrantPlugins::G5K::Oar.new(driver)
|
49
|
+
job = oar.check_job(1)
|
50
|
+
expect(job).to include({"name" => "foo"})
|
51
|
+
puts job
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
55
|
+
|
56
|
+
end
|
57
|
+
|
data/vagrant-g5k.gemspec
CHANGED
@@ -20,10 +20,10 @@ Gem::Specification.new do |s|
|
|
20
20
|
s.add_runtime_dependency "net-scp", "~> 1.1", ">= 1.1.2"
|
21
21
|
s.add_runtime_dependency "net-ssh-multi", "~> 1.2", ">=1.2.1"
|
22
22
|
|
23
|
-
|
23
|
+
s.add_development_dependency "rake"
|
24
24
|
# rspec 3.4 to mock File
|
25
|
-
|
26
|
-
|
25
|
+
s.add_development_dependency "rspec", "~> 3.4"
|
26
|
+
s.add_development_dependency "rspec-its"
|
27
27
|
|
28
28
|
# The following block of code determines the files that should be included
|
29
29
|
# in the gem. It does this by reading all the files in the directory where
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: vagrant-g5k
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.0
|
4
|
+
version: 0.9.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Matthieu Simonin
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2016-11-
|
11
|
+
date: 2016-11-14 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: iniparse
|
@@ -90,6 +90,48 @@ dependencies:
|
|
90
90
|
- - ">="
|
91
91
|
- !ruby/object:Gem::Version
|
92
92
|
version: 1.2.1
|
93
|
+
- !ruby/object:Gem::Dependency
|
94
|
+
name: rake
|
95
|
+
requirement: !ruby/object:Gem::Requirement
|
96
|
+
requirements:
|
97
|
+
- - ">="
|
98
|
+
- !ruby/object:Gem::Version
|
99
|
+
version: '0'
|
100
|
+
type: :development
|
101
|
+
prerelease: false
|
102
|
+
version_requirements: !ruby/object:Gem::Requirement
|
103
|
+
requirements:
|
104
|
+
- - ">="
|
105
|
+
- !ruby/object:Gem::Version
|
106
|
+
version: '0'
|
107
|
+
- !ruby/object:Gem::Dependency
|
108
|
+
name: rspec
|
109
|
+
requirement: !ruby/object:Gem::Requirement
|
110
|
+
requirements:
|
111
|
+
- - "~>"
|
112
|
+
- !ruby/object:Gem::Version
|
113
|
+
version: '3.4'
|
114
|
+
type: :development
|
115
|
+
prerelease: false
|
116
|
+
version_requirements: !ruby/object:Gem::Requirement
|
117
|
+
requirements:
|
118
|
+
- - "~>"
|
119
|
+
- !ruby/object:Gem::Version
|
120
|
+
version: '3.4'
|
121
|
+
- !ruby/object:Gem::Dependency
|
122
|
+
name: rspec-its
|
123
|
+
requirement: !ruby/object:Gem::Requirement
|
124
|
+
requirements:
|
125
|
+
- - ">="
|
126
|
+
- !ruby/object:Gem::Version
|
127
|
+
version: '0'
|
128
|
+
type: :development
|
129
|
+
prerelease: false
|
130
|
+
version_requirements: !ruby/object:Gem::Requirement
|
131
|
+
requirements:
|
132
|
+
- - ">="
|
133
|
+
- !ruby/object:Gem::Version
|
134
|
+
version: '0'
|
93
135
|
description: Enables to boot a vm in the production environment of G5K.
|
94
136
|
email: matthieu.simonin@inria.fr
|
95
137
|
executables: []
|
@@ -106,7 +148,6 @@ files:
|
|
106
148
|
- lib/vagrant-g5k.rb
|
107
149
|
- lib/vagrant-g5k/.config.rb.swp
|
108
150
|
- lib/vagrant-g5k/action.rb
|
109
|
-
- lib/vagrant-g5k/action/close_g5k.rb
|
110
151
|
- lib/vagrant-g5k/action/connect_g5k.rb
|
111
152
|
- lib/vagrant-g5k/action/create_local_working_dir.rb
|
112
153
|
- lib/vagrant-g5k/action/delete_disk.rb
|
@@ -122,16 +163,21 @@ files:
|
|
122
163
|
- lib/vagrant-g5k/action/wait_instance.rb
|
123
164
|
- lib/vagrant-g5k/command.rb
|
124
165
|
- lib/vagrant-g5k/config.rb
|
166
|
+
- lib/vagrant-g5k/disk/local.rb
|
167
|
+
- lib/vagrant-g5k/disk/rbd.rb
|
125
168
|
- lib/vagrant-g5k/driver.rb
|
126
169
|
- lib/vagrant-g5k/errors.rb
|
170
|
+
- lib/vagrant-g5k/g5k_connection.rb
|
171
|
+
- lib/vagrant-g5k/network/bridge.rb
|
172
|
+
- lib/vagrant-g5k/network/nat.rb
|
173
|
+
- lib/vagrant-g5k/oar.rb
|
127
174
|
- lib/vagrant-g5k/plugin.rb
|
128
175
|
- lib/vagrant-g5k/provider.rb
|
129
|
-
- lib/vagrant-g5k/util/
|
130
|
-
- lib/vagrant-g5k/util/launch_vm_bridge.sh
|
131
|
-
- lib/vagrant-g5k/util/launch_vm_fwd.sh
|
176
|
+
- lib/vagrant-g5k/util/launch_vm.sh
|
132
177
|
- lib/vagrant-g5k/version.rb
|
133
178
|
- locales/en.yml
|
134
179
|
- spec/vagrant-g5k/config_spec.rb
|
180
|
+
- spec/vagrant-g5k/oar_spec.rb
|
135
181
|
- vagrant-g5k.gemspec
|
136
182
|
homepage: https://github.com/msimonin/vagrant-g5k
|
137
183
|
licenses:
|
@@ -1,24 +0,0 @@
|
|
1
|
-
require "log4r"
|
2
|
-
require "vagrant-g5k/util/g5k_utils"
|
3
|
-
|
4
|
-
# Unused
|
5
|
-
module VagrantPlugins
|
6
|
-
module G5K
|
7
|
-
module Action
|
8
|
-
# This action connects to G5K, verifies credentials work, and
|
9
|
-
# puts the G5K connection object into the `:g5k_connection` key
|
10
|
-
# in the environment.
|
11
|
-
class CloseG5K
|
12
|
-
def initialize(app, env)
|
13
|
-
@app = app
|
14
|
-
@logger = Log4r::Logger.new("vagrant_g5k::action::close_g5k")
|
15
|
-
end
|
16
|
-
|
17
|
-
def call(env)
|
18
|
-
env[:g5k_connection].close()
|
19
|
-
@app.call(env)
|
20
|
-
end
|
21
|
-
end
|
22
|
-
end
|
23
|
-
end
|
24
|
-
end
|
@@ -1,435 +0,0 @@
|
|
1
|
-
require 'net/ssh/multi'
|
2
|
-
require 'net/scp'
|
3
|
-
require 'json'
|
4
|
-
require 'digest'
|
5
|
-
require 'thread'
|
6
|
-
|
7
|
-
require 'vagrant/util/retryable'
|
8
|
-
|
9
|
-
LAUNCHER_SCRIPT = "launch_vm_fwd.sh"
|
10
|
-
LAUNCHER_BRIDGE_SCRIPT = "launch_vm_bridge.sh"
|
11
|
-
|
12
|
-
STRATEGY_SNAPSHOT = "snapshot"
|
13
|
-
STRATEGY_COPY = "copy"
|
14
|
-
STRATEGY_COW = "cow"
|
15
|
-
STRATEGY_DIRECT = "direct"
|
16
|
-
|
17
|
-
module VagrantPlugins
|
18
|
-
module G5K
|
19
|
-
class Connection
|
20
|
-
include Vagrant::Util::Retryable
|
21
|
-
include VagrantPlugins::G5K
|
22
|
-
|
23
|
-
attr_accessor :driver
|
24
|
-
|
25
|
-
attr_accessor :username
|
26
|
-
|
27
|
-
attr_accessor :gateway
|
28
|
-
|
29
|
-
attr_accessor :project_id
|
30
|
-
|
31
|
-
attr_accessor :private_key
|
32
|
-
|
33
|
-
attr_accessor :site
|
34
|
-
|
35
|
-
attr_accessor :walltime
|
36
|
-
|
37
|
-
attr_accessor :logger
|
38
|
-
|
39
|
-
attr_accessor :node
|
40
|
-
|
41
|
-
attr_accessor :net
|
42
|
-
|
43
|
-
attr_accessor :oar
|
44
|
-
|
45
|
-
def initialize(env, driver)
|
46
|
-
# provider specific config
|
47
|
-
@provider_config = env[:machine].provider_config
|
48
|
-
@username = @provider_config.username
|
49
|
-
@project_id = @provider_config.project_id
|
50
|
-
@private_key = @provider_config.private_key
|
51
|
-
@site = @provider_config.site
|
52
|
-
@walltime = @provider_config.walltime
|
53
|
-
@image= @provider_config.image
|
54
|
-
@gateway = @provider_config.gateway
|
55
|
-
@oar = "{#{@provider_config.oar}}/" if @provider_config.oar != ""
|
56
|
-
@net = @provider_config.net
|
57
|
-
# grab the network config of the vm
|
58
|
-
# @networks = env[:machine].config.vm.networks
|
59
|
-
# to log to the ui
|
60
|
-
@ui = env[:ui]
|
61
|
-
|
62
|
-
@logger = Log4r::Logger.new("vagrant::environment")
|
63
|
-
@driver = driver
|
64
|
-
|
65
|
-
end
|
66
|
-
|
67
|
-
|
68
|
-
def create_local_working_dir()
|
69
|
-
exec("mkdir -p #{cwd()}")
|
70
|
-
end
|
71
|
-
|
72
|
-
def cwd()
|
73
|
-
# remote working directory
|
74
|
-
File.join(".vagrant", @project_id)
|
75
|
-
end
|
76
|
-
|
77
|
-
|
78
|
-
def check_job(job_id)
|
79
|
-
# Note: when switching from on site to another
|
80
|
-
# this command may failed due the job_id that has nothing
|
81
|
-
# to do with the new site.
|
82
|
-
r = nil
|
83
|
-
begin
|
84
|
-
oarstat = exec("oarstat -j #{job_id} --json")
|
85
|
-
# json is
|
86
|
-
# { "job_id" : {description}}
|
87
|
-
r = JSON.load(oarstat)["#{job_id}"]
|
88
|
-
if !r.nil?
|
89
|
-
@node = r["assigned_network_address"].first
|
90
|
-
end
|
91
|
-
rescue VagrantPlugins::G5K::Errors::CommandError
|
92
|
-
@logger.debug "Rescued error when executing the command"
|
93
|
-
end
|
94
|
-
|
95
|
-
return r
|
96
|
-
end
|
97
|
-
|
98
|
-
def process_errors(job_id)
|
99
|
-
job = check_job(job_id)
|
100
|
-
stderr_file = job["stderr_file"]
|
101
|
-
stderr = exec("cat #{stderr_file}")
|
102
|
-
@ui.error("#{stderr_file}: #{stderr}")
|
103
|
-
raise VagrantPlugins::G5K::Errors::JobError
|
104
|
-
end
|
105
|
-
|
106
|
-
def delete_job(job_id)
|
107
|
-
@ui.info("Soft deleting the associated job")
|
108
|
-
begin
|
109
|
-
exec("oardel -c -s 12 #{job_id}")
|
110
|
-
rescue VagrantPlugins::G5K::Errors::CommandError
|
111
|
-
@logger.debug "Checkpointing failed, sending hard deletion"
|
112
|
-
@ui.warn("Soft delete failed : proceeding to hard delete")
|
113
|
-
exec("oardel #{job_id}")
|
114
|
-
ensure
|
115
|
-
_update_subnet_use("-")
|
116
|
-
end
|
117
|
-
end
|
118
|
-
|
119
|
-
def check_local_storage(env)
|
120
|
-
# Is the disk image already here ?
|
121
|
-
if @image["pool"].nil?
|
122
|
-
file = _check_file_local_storage(env)
|
123
|
-
else
|
124
|
-
file = _check_rbd_local_storage(env)
|
125
|
-
end
|
126
|
-
return file if file != ""
|
127
|
-
return nil
|
128
|
-
end
|
129
|
-
|
130
|
-
def _check_file_local_storage(env)
|
131
|
-
strategy = @image["backing"]
|
132
|
-
file_to_check = ""
|
133
|
-
if [STRATEGY_SNAPSHOT, STRATEGY_DIRECT].include?(strategy)
|
134
|
-
file_to_check = @image["path"]
|
135
|
-
else
|
136
|
-
file_to_check = File.join(cwd(), env[:machine].name.to_s)
|
137
|
-
end
|
138
|
-
exec("[ -f \"#{file_to_check}\" ] && echo #{file_to_check} || echo \"\"")
|
139
|
-
end
|
140
|
-
|
141
|
-
def _check_rbd_local_storage(env)
|
142
|
-
strategy = @image["backing"]
|
143
|
-
file_to_check = ""
|
144
|
-
if [STRATEGY_SNAPSHOT, STRATEGY_DIRECT].include?(strategy)
|
145
|
-
file_to_check = @image["rbd"]
|
146
|
-
else
|
147
|
-
file_to_check = File.join(cwd(), env[:machine].name.to_s)
|
148
|
-
end
|
149
|
-
exec("(rbd --pool #{@image["pool"]} --id #{@image["id"]} --conf #{@image["conf"]} ls | grep \"^#{file_to_check}\") || echo \"\"")
|
150
|
-
end
|
151
|
-
|
152
|
-
|
153
|
-
def launch_vm(env)
|
154
|
-
if @net["type"] == "bridge"
|
155
|
-
launcher_path = File.join(File.dirname(__FILE__), LAUNCHER_BRIDGE_SCRIPT)
|
156
|
-
else
|
157
|
-
launcher_path = File.join(File.dirname(__FILE__), LAUNCHER_SCRIPT)
|
158
|
-
end
|
159
|
-
|
160
|
-
@ui.info("Launching the VM on #{@site}")
|
161
|
-
# Checking the subnet job
|
162
|
-
# uploading the launcher
|
163
|
-
launcher_remote_path = File.join(cwd(), LAUNCHER_SCRIPT)
|
164
|
-
upload(launcher_path, launcher_remote_path)
|
165
|
-
|
166
|
-
# Generate partial arguments for the kvm command
|
167
|
-
# NOTE: net is first dur the the shape of the bridge launcher script
|
168
|
-
# TODO: clean / improve this (that smells)
|
169
|
-
net = _generate_net()
|
170
|
-
drive = _generate_drive(env)
|
171
|
-
|
172
|
-
args = [net, drive].join(" ")
|
173
|
-
# Submitting a new job
|
174
|
-
# Getting the job_id as a ruby string
|
175
|
-
cmd = []
|
176
|
-
cmd << "oarsub"
|
177
|
-
cmd << "--json"
|
178
|
-
cmd << "-t allow_classic_ssh"
|
179
|
-
cmd << "-l \"#{@oar}nodes=1,walltime=#{@walltime}\""
|
180
|
-
cmd << "--name #{env[:machine].name}"
|
181
|
-
cmd << "--checkpoint 60 --signal 12"
|
182
|
-
cmd << "'#{launcher_remote_path} #{args}'"
|
183
|
-
cmd << "| grep \"job_id\"| cut -d':' -f2"
|
184
|
-
job_id = exec(cmd.join(" ")).gsub(/"/,"").strip
|
185
|
-
# saving the id asap
|
186
|
-
env[:machine].id = job_id
|
187
|
-
wait_for_vm(job_id)
|
188
|
-
end
|
189
|
-
|
190
|
-
def wait_for_vm(job_id)
|
191
|
-
_wait_for(job_id)
|
192
|
-
_update_subnet_use("+")
|
193
|
-
@ui.info("ready @#{@site} on #{@node}")
|
194
|
-
end
|
195
|
-
|
196
|
-
def _wait_for(job_id)
|
197
|
-
begin
|
198
|
-
retryable(:on => VagrantPlugins::G5K::Errors::JobNotRunning, :tries => 100, :sleep => 1) do
|
199
|
-
job = check_job(job_id)
|
200
|
-
if !job.nil? and ["Error", "Terminated"].include?(job["state"])
|
201
|
-
process_errors(job_id)
|
202
|
-
end
|
203
|
-
if job.nil? or (!job.nil? and job["state"] != "Running")
|
204
|
-
@ui.info("Waiting for the job to be running")
|
205
|
-
raise VagrantPlugins::G5K::Errors::JobNotRunning
|
206
|
-
end
|
207
|
-
break
|
208
|
-
end
|
209
|
-
rescue VagrantPlugins::G5K::Errors::JobNotRunning
|
210
|
-
@ui.error("Tired of waiting")
|
211
|
-
raise VagrantPlugins::G5K::Errors::JobNotRunning
|
212
|
-
end
|
213
|
-
end
|
214
|
-
|
215
|
-
|
216
|
-
def delete_disk(env)
|
217
|
-
if [STRATEGY_DIRECT, STRATEGY_SNAPSHOT].include?(@image["backing"])
|
218
|
-
@ui.error("Destroy not support for the strategy #{@image["backing"]}")
|
219
|
-
return
|
220
|
-
end
|
221
|
-
|
222
|
-
if @image["pool"].nil?
|
223
|
-
disk = File.join(cwd(), env[:machine].name.to_s)
|
224
|
-
exec("rm -f #{disk}")
|
225
|
-
else
|
226
|
-
disk = File.join(@image["pool"], cwd(), env[:machine].name.to_s)
|
227
|
-
begin
|
228
|
-
retryable(:on => VagrantPlugins::G5K::Errors::CommandError, :tries => 10, :sleep => 5) do
|
229
|
-
exec("rbd rm #{disk} --conf #{@image["conf"]} --id #{@image["id"]}" )
|
230
|
-
break
|
231
|
-
end
|
232
|
-
rescue VagrantPlugins::G5K::Errors::CommandError
|
233
|
-
@ui.error("Reach max attempt while trying to remove the rbd")
|
234
|
-
raise VagrantPlugins::G5K::Errors::CommandError
|
235
|
-
end
|
236
|
-
end
|
237
|
-
end
|
238
|
-
|
239
|
-
def close()
|
240
|
-
# Terminate the driver
|
241
|
-
@driver[:session].close
|
242
|
-
end
|
243
|
-
|
244
|
-
def exec(cmd)
|
245
|
-
@driver.exec(cmd)
|
246
|
-
end
|
247
|
-
|
248
|
-
def upload(src, dst)
|
249
|
-
@driver.upload(src, dst)
|
250
|
-
end
|
251
|
-
|
252
|
-
def _generate_drive(env)
|
253
|
-
# Depending on the strategy we generate the file location
|
254
|
-
# This code smells a bit better
|
255
|
-
file = ""
|
256
|
-
snapshot = ""
|
257
|
-
if @image["backing"] == STRATEGY_SNAPSHOT
|
258
|
-
snapshot = "-snapshot"
|
259
|
-
end
|
260
|
-
|
261
|
-
if @image["pool"].nil?
|
262
|
-
file = _generate_drive_local(env)
|
263
|
-
else
|
264
|
-
file = _generate_drive_rbd(env)
|
265
|
-
end
|
266
|
-
|
267
|
-
return "-drive file=#{file},if=virtio #{snapshot}"
|
268
|
-
end
|
269
|
-
|
270
|
-
def _generate_drive_rbd(env)
|
271
|
-
strategy = @image["backing"]
|
272
|
-
if [STRATEGY_SNAPSHOT, STRATEGY_DIRECT].include?(strategy)
|
273
|
-
file = File.join(@image["pool"], @image["rbd"])
|
274
|
-
elsif strategy == STRATEGY_COW
|
275
|
-
file = _rbd_clone_or_copy_image(env, clone = true)
|
276
|
-
elsif strategy == STRATEGY_COPY
|
277
|
-
file = _rbd_clone_or_copy_image(env, clone = false)
|
278
|
-
end
|
279
|
-
# encapsulate the file to a qemu ready disk description
|
280
|
-
file = "rbd:#{file}:id=#{@image["id"]}:conf=#{@image["conf"]}:rbd_cache=true,cache=writeback"
|
281
|
-
@logger.debug("Generated drive string : #{file}")
|
282
|
-
return file
|
283
|
-
end
|
284
|
-
|
285
|
-
def _generate_drive_local(env)
|
286
|
-
strategy = @image["backing"]
|
287
|
-
if [STRATEGY_SNAPSHOT, STRATEGY_DIRECT].include?(strategy)
|
288
|
-
file = @image["path"]
|
289
|
-
elsif strategy == STRATEGY_COW
|
290
|
-
file = _file_clone_or_copy_image(env, clone = true)
|
291
|
-
elsif strategy == STRATEGY_COPY
|
292
|
-
file = _file_clone_or_copy_image(env, clone = false)
|
293
|
-
end
|
294
|
-
return file
|
295
|
-
end
|
296
|
-
|
297
|
-
def _rbd_clone_or_copy_image(env, clone = true)
|
298
|
-
# destination in the same pool under the .vagrant ns
|
299
|
-
destination = File.join(@image["pool"], cwd(), env[:machine].name.to_s)
|
300
|
-
# Even if nothing bad will happen when the destination already exist, we should test it before
|
301
|
-
exists = _check_rbd_local_storage(env)
|
302
|
-
if exists == ""
|
303
|
-
# we create the destination
|
304
|
-
if clone
|
305
|
-
# parent = pool/rbd@snap
|
306
|
-
@ui.info("Cloning the rbd image")
|
307
|
-
parent = File.join(@image["pool"], "#{@image["rbd"]}@#{@image["snapshot"]}")
|
308
|
-
exec("rbd clone #{parent} #{destination} --conf #{@image["conf"]} --id #{@image["id"]}" )
|
309
|
-
else
|
310
|
-
@ui.info("Copying the rbd image (This may take some time)")
|
311
|
-
# parent = pool/rbd@snap
|
312
|
-
parent = File.join(@image["pool"], "#{@image["rbd"]}")
|
313
|
-
exec("rbd cp #{parent} #{destination} --conf #{@image["conf"]} --id #{@image["id"]}" )
|
314
|
-
end
|
315
|
-
end
|
316
|
-
return destination
|
317
|
-
end
|
318
|
-
|
319
|
-
def _file_clone_or_copy_image(env, clone = true)
|
320
|
-
@ui.info("Clone the file image")
|
321
|
-
file = File.join(cwd(), env[:machine].name.to_s)
|
322
|
-
exists = _check_file_local_storage(env)
|
323
|
-
if exists == ""
|
324
|
-
if clone
|
325
|
-
exec("qemu-img create -f qcow2 -b #{@image["path"]} #{file}")
|
326
|
-
else
|
327
|
-
exec("cp #{@image["path"]} #{file}")
|
328
|
-
end
|
329
|
-
end
|
330
|
-
return file
|
331
|
-
end
|
332
|
-
|
333
|
-
def _generate_net()
|
334
|
-
net = ""
|
335
|
-
@logger.debug(@net)
|
336
|
-
if @net["type"] == "bridge"
|
337
|
-
# we reserve a subnet if necessary and pick one mac/ip from it
|
338
|
-
lockable(:lock => VagrantPlugins::G5K.subnet_lock) do
|
339
|
-
subnet_job_id = _find_subnet
|
340
|
-
if subnet_job_id.nil?
|
341
|
-
subnet_job_id = _create_subnet
|
342
|
-
_wait_for(subnet_job_id)
|
343
|
-
# we can't call this inside the launcher script
|
344
|
-
# let's put it in a file instead...
|
345
|
-
exec("g5k-subnets -j #{subnet_job_id} -im > #{_subnet_file}" )
|
346
|
-
# initialize subnet count
|
347
|
-
exec("echo 0 > #{_subnet_count}")
|
348
|
-
end
|
349
|
-
@subnet_id = subnet_job_id
|
350
|
-
net = _subnet_file
|
351
|
-
end
|
352
|
-
else
|
353
|
-
fwd_ports = @net["ports"].map do |p|
|
354
|
-
"hostfwd=tcp::#{p}"
|
355
|
-
end.join(',')
|
356
|
-
net = "-net nic,model=virtio -net user,#{fwd_ports}"
|
357
|
-
end
|
358
|
-
|
359
|
-
@logger.debug("Generated net string : #{net}")
|
360
|
-
return net
|
361
|
-
end
|
362
|
-
|
363
|
-
def _subnet_file()
|
364
|
-
return File.join(cwd(), 'subnet')
|
365
|
-
end
|
366
|
-
|
367
|
-
def _subnet_count()
|
368
|
-
return File.join(cwd(), 'subnet-count')
|
369
|
-
end
|
370
|
-
|
371
|
-
|
372
|
-
def _find_subnet(vmid = nil)
|
373
|
-
begin
|
374
|
-
jobs = exec("oarstat -u --json")
|
375
|
-
jobs = JSON.load(jobs)
|
376
|
-
s = jobs.select{|k,v| v["name"] == "#{@project_id}-net" }.values.first
|
377
|
-
# we set @node to the ip in the vnet
|
378
|
-
# if there's a subnet and a vmid, read the mac/ip
|
379
|
-
# rebuild the ip associated with that vm
|
380
|
-
if not vmid.nil?
|
381
|
-
subnet = exec("cat #{_subnet_file}" )
|
382
|
-
.split("\n")
|
383
|
-
.map{|macip| macip.split("\t")}
|
384
|
-
# recalculate ip given to this VM
|
385
|
-
macip = subnet[vmid.to_i.modulo(1022)]
|
386
|
-
@node = macip[0]
|
387
|
-
@logger.debug("#{subnet.size} - #{vmid} - #{macip}")
|
388
|
-
end
|
389
|
-
return s["Job_Id"]
|
390
|
-
rescue Exception => e
|
391
|
-
@logger.debug(e)
|
392
|
-
end
|
393
|
-
nil
|
394
|
-
end
|
395
|
-
|
396
|
-
def _create_subnet()
|
397
|
-
cmd = []
|
398
|
-
cmd << "oarsub"
|
399
|
-
cmd << "--json"
|
400
|
-
cmd << "--name '#{@project_id}-net'"
|
401
|
-
cmd << "-l 'slash_22=1, walltime=#{@walltime}' 'sleep 84400'"
|
402
|
-
# getting the job_id for this subnet
|
403
|
-
cmd << "| grep 'job_id'"
|
404
|
-
cmd << "| cut -d':' -f2"
|
405
|
-
|
406
|
-
exec(cmd.join(" ")).gsub(/"/,"").strip
|
407
|
-
end
|
408
|
-
|
409
|
-
# Update the subnet use
|
410
|
-
# op is a string "+" or "-"
|
411
|
-
# if after the update the subnet use is 0
|
412
|
-
# the subnet in use is also deleted
|
413
|
-
def _update_subnet_use(op)
|
414
|
-
if not @net['type'] == 'bridge'
|
415
|
-
return
|
416
|
-
end
|
417
|
-
cmd = []
|
418
|
-
cmd << "c=$(cat #{_subnet_count});"
|
419
|
-
cmd << "echo $(($c #{op} 1)) > #{_subnet_count};"
|
420
|
-
cmd << "cat #{_subnet_count}"
|
421
|
-
count = exec(cmd.join(" "))
|
422
|
-
@logger.info("subnet_count = #{count}")
|
423
|
-
if count.to_i <= 0
|
424
|
-
@logger.info("deleteting the associated subnet")
|
425
|
-
subnet_id = _find_subnet()
|
426
|
-
exec("oardel #{subnet_id}")
|
427
|
-
end
|
428
|
-
|
429
|
-
end
|
430
|
-
|
431
|
-
end
|
432
|
-
end
|
433
|
-
end
|
434
|
-
|
435
|
-
|