vagrant-g5k 0.0.16 → 0.0.17

Sign up to get free protection for your applications and to get access to all the features.
Files changed (48) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +8 -0
  3. data/README.md +30 -2
  4. data/Vagrantfile +49 -20
  5. data/lib/vagrant-g5k.rb +16 -0
  6. data/lib/vagrant-g5k/action.rb +52 -20
  7. data/lib/vagrant-g5k/action/connect_g5k.rb +3 -8
  8. data/lib/vagrant-g5k/action/create_local_working_dir.rb +1 -1
  9. data/lib/vagrant-g5k/action/get_state.rb +23 -0
  10. data/lib/vagrant-g5k/action/is_created.rb +6 -2
  11. data/lib/vagrant-g5k/action/{message_already_created.rb → message_already_running.rb} +1 -1
  12. data/lib/vagrant-g5k/action/message_not_running.rb +16 -0
  13. data/lib/vagrant-g5k/action/read_ssh_info.rb +16 -14
  14. data/lib/vagrant-g5k/action/read_state.rb +8 -0
  15. data/lib/vagrant-g5k/action/run_instance.rb +1 -0
  16. data/lib/vagrant-g5k/action/wait_instance.rb +18 -0
  17. data/lib/vagrant-g5k/config.rb +7 -3
  18. data/lib/vagrant-g5k/util/g5k_utils.rb +164 -37
  19. data/lib/vagrant-g5k/util/launch_vm_bridge.sh +58 -0
  20. data/lib/vagrant-g5k/version.rb +1 -1
  21. data/vagrant-g5k.gemspec +6 -6
  22. metadata +24 -70
  23. data/Vagrantfile.multisite +0 -37
  24. data/results/.gitignore +0 -2
  25. data/results/README.md +0 -43
  26. data/results/Vagrantfile +0 -126
  27. data/results/ansible.cfg +0 -2
  28. data/results/boilerplate.retry +0 -1
  29. data/results/boilerplate.yml +0 -248
  30. data/results/files/grafana/dashboard.json +0 -4572
  31. data/results/files/grafana/dedicated.json +0 -5486
  32. data/results/files/grafana/haproxy.json +0 -2632
  33. data/results/files/heka/config.json +0 -47
  34. data/results/files/heka/heka-globals.toml +0 -2
  35. data/results/files/heka/lua_decoders/haproxy_log.lua +0 -162
  36. data/results/files/heka/lua_decoders/os_keystone_apache_log.lua +0 -78
  37. data/results/files/heka/lua_decoders/os_mysql_log.lua +0 -56
  38. data/results/files/heka/lua_decoders/os_openstack_log.lua +0 -146
  39. data/results/files/heka/lua_decoders/os_rabbitmq_log.lua +0 -79
  40. data/results/files/kibana/all_objects.json +0 -81
  41. data/results/files/nginx.conf +0 -34
  42. data/results/templates/heka-elasticsearch.toml.j2 +0 -18
  43. data/results/templates/heka-haproxy.toml.j2 +0 -10
  44. data/results/templates/heka-keystone.toml.j2 +0 -14
  45. data/results/templates/heka-mariadb.toml.j2 +0 -14
  46. data/results/templates/heka-openstack.toml.j2 +0 -15
  47. data/results/templates/heka-rabbitmq.toml.j2 +0 -21
  48. data/results/test.rb +0 -32
@@ -35,6 +35,14 @@ module VagrantPlugins
35
35
  if job.nil?
36
36
  return :not_created
37
37
  end
38
+ if env[:machine].provider_config.net["type"] == "bridge"
39
+ # is the subnet still there ?
40
+ subnet_id = conn._find_subnet(id)
41
+ if subnet_id.nil?
42
+ return :subnet_missing
43
+ end
44
+ end
45
+
38
46
  return job["state"].to_sym
39
47
  end
40
48
 
@@ -16,6 +16,7 @@ module VagrantPlugins
16
16
  end
17
17
 
18
18
  def call(env)
19
+ # Note: here we are sure that we have to start the vm
19
20
  conn = env[:g5k_connection]
20
21
  conn.launch_vm(env)
21
22
  @app.call(env)
@@ -0,0 +1,18 @@
1
+ module VagrantPlugins
2
+ module G5K
3
+ module Action
4
+ class WaitInstance
5
+ def initialize(app, env)
6
+ @app = app
7
+ end
8
+
9
+ def call(env)
10
+ job_id = env[:machine].id
11
+ conn = env[:g5k_connection]
12
+ conn.wait_for_vm(job_id)
13
+ @app.call(env)
14
+ end
15
+ end
16
+ end
17
+ end
18
+ end
@@ -40,11 +40,11 @@ module VagrantPlugins
40
40
  attr_accessor :image
41
41
 
42
42
 
43
- # G5K ports mapping
43
+ # G5K network options
44
44
  #
45
45
  #
46
- # @return [Array]
47
- attr_accessor :ports
46
+ # @return [Hash]
47
+ attr_accessor :net
48
48
 
49
49
  # OAR resource selection
50
50
  #
@@ -59,6 +59,10 @@ module VagrantPlugins
59
59
  @gateway = nil
60
60
  @walltime = "01:00:00"
61
61
  @oar = ""
62
+ @net = {
63
+ 'type' => 'nat',
64
+ 'ports' => ['2222-,22']
65
+ }
62
66
  end
63
67
 
64
68
  def finalize!()
@@ -7,6 +7,7 @@ require 'thread'
7
7
  require 'vagrant/util/retryable'
8
8
 
9
9
  LAUNCHER_SCRIPT = "launch_vm_fwd.sh"
10
+ LAUNCHER_BRIDGE_SCRIPT = "launch_vm_bridge.sh"
10
11
 
11
12
  STRATEGY_SNAPSHOT = "snapshot"
12
13
  STRATEGY_COPY = "copy"
@@ -17,6 +18,7 @@ module VagrantPlugins
17
18
  module G5K
18
19
  class Connection
19
20
  include Vagrant::Util::Retryable
21
+ include VagrantPlugins::G5K
20
22
 
21
23
  attr_accessor :driver
22
24
 
@@ -35,9 +37,9 @@ module VagrantPlugins
35
37
  attr_accessor :logger
36
38
 
37
39
  attr_accessor :node
38
-
39
- attr_accessor :ports
40
40
 
41
+ attr_accessor :net
42
+
41
43
  attr_accessor :oar
42
44
 
43
45
  def initialize(env, driver)
@@ -48,12 +50,12 @@ module VagrantPlugins
48
50
  @private_key = @provider_config.private_key
49
51
  @site = @provider_config.site
50
52
  @walltime = @provider_config.walltime
51
- @ports = @provider_config.ports
52
53
  @image= @provider_config.image
53
54
  @gateway = @provider_config.gateway
54
55
  @oar = "{#{@provider_config.oar}}/" if @provider_config.oar != ""
56
+ @net = @provider_config.net
55
57
  # grab the network config of the vm
56
- @networks = env[:machine].config.vm.networks
58
+ # @networks = env[:machine].config.vm.networks
57
59
  # to log to the ui
58
60
  @ui = env[:ui]
59
61
 
@@ -63,24 +65,33 @@ module VagrantPlugins
63
65
  end
64
66
 
65
67
 
66
- def create_local_working_dir(env)
67
- exec("mkdir -p #{cwd(env)}")
68
+ def create_local_working_dir()
69
+ exec("mkdir -p #{cwd()}")
68
70
  end
69
71
 
70
- def cwd(env)
72
+ def cwd()
71
73
  # remote working directory
72
74
  File.join(".vagrant", @project_id)
73
75
  end
74
76
 
75
77
 
76
78
  def check_job(job_id)
77
- oarstat = exec("oarstat -j #{job_id} --json")
78
- # json is
79
- # { "job_id" : {description}}
80
- r = JSON.load(oarstat)["#{job_id}"]
81
- if !r.nil?
82
- @node = r["assigned_network_address"].first
79
+ # Note: when switching from on site to another
80
+ # this command may failed due the job_id that has nothing
81
+ # to do with the new site.
82
+ r = nil
83
+ begin
84
+ oarstat = exec("oarstat -j #{job_id} --json")
85
+ # json is
86
+ # { "job_id" : {description}}
87
+ r = JSON.load(oarstat)["#{job_id}"]
88
+ if !r.nil?
89
+ @node = r["assigned_network_address"].first
90
+ end
91
+ rescue VagrantPlugins::G5K::Errors::CommandError
92
+ @logger.debug "Rescued error when executing the command"
83
93
  end
94
+
84
95
  return r
85
96
  end
86
97
 
@@ -93,11 +104,18 @@ module VagrantPlugins
93
104
  end
94
105
 
95
106
  def delete_job(job_id)
96
- @ui.info("Deleting the associated job")
97
- exec("oardel -c -s 12 #{job_id}")
107
+ @ui.info("Soft deleting the associated job")
108
+ begin
109
+ exec("oardel -c -s 12 #{job_id}")
110
+ rescue VagrantPlugins::G5K::Errors::CommandError
111
+ @logger.debug "Checkpointing failed, sending hard deletion"
112
+ @ui.warn("Soft delete failed : proceeding to hard delete")
113
+ exec("oardel #{job_id}")
114
+ ensure
115
+ _update_subnet_use("-")
116
+ end
98
117
  end
99
118
 
100
-
101
119
  def check_local_storage(env)
102
120
  # Is the disk image already here ?
103
121
  if @image["pool"].nil?
@@ -115,7 +133,7 @@ module VagrantPlugins
115
133
  if [STRATEGY_SNAPSHOT, STRATEGY_DIRECT].include?(strategy)
116
134
  file_to_check = @image["path"]
117
135
  else
118
- file_to_check = File.join(cwd(env), env[:machine].name.to_s)
136
+ file_to_check = File.join(cwd(), env[:machine].name.to_s)
119
137
  end
120
138
  exec("[ -f \"#{file_to_check}\" ] && echo #{file_to_check} || echo \"\"")
121
139
  end
@@ -126,29 +144,56 @@ module VagrantPlugins
126
144
  if [STRATEGY_SNAPSHOT, STRATEGY_DIRECT].include?(strategy)
127
145
  file_to_check = @image["rbd"]
128
146
  else
129
- file_to_check = File.join(cwd(env), env[:machine].name.to_s)
147
+ file_to_check = File.join(cwd(), env[:machine].name.to_s)
130
148
  end
131
149
  exec("(rbd --pool #{@image["pool"]} --id #{@image["id"]} --conf #{@image["conf"]} ls | grep \"^#{file_to_check}\") || echo \"\"")
132
150
  end
133
151
 
134
152
 
135
153
  def launch_vm(env)
136
- launcher_path = File.join(File.dirname(__FILE__), LAUNCHER_SCRIPT)
154
+ if @net["type"] == "bridge"
155
+ launcher_path = File.join(File.dirname(__FILE__), LAUNCHER_BRIDGE_SCRIPT)
156
+ else
157
+ launcher_path = File.join(File.dirname(__FILE__), LAUNCHER_SCRIPT)
158
+ end
159
+
137
160
  @ui.info("Launching the VM on #{@site}")
138
161
  # Checking the subnet job
139
162
  # uploading the launcher
140
- launcher_remote_path = File.join(cwd(env), LAUNCHER_SCRIPT)
163
+ launcher_remote_path = File.join(cwd(), LAUNCHER_SCRIPT)
141
164
  upload(launcher_path, launcher_remote_path)
142
165
 
143
166
  # Generate partial arguments for the kvm command
144
- drive = _generate_drive(env)
167
+ # NOTE: net is first dur the the shape of the bridge launcher script
168
+ # TODO: clean / improve this (that smells)
145
169
  net = _generate_net()
146
-
147
- args = [drive, net].join(" ")
170
+ drive = _generate_drive(env)
171
+
172
+ args = [net, drive].join(" ")
148
173
  # Submitting a new job
149
174
  # Getting the job_id as a ruby string
150
- job_id = exec("oarsub --json -t allow_classic_ssh -l \"#{@oar}nodes=1,walltime=#{@walltime}\" --name #{env[:machine].name} --checkpoint 60 --signal 12 \"#{launcher_remote_path} #{args}\" | grep \"job_id\"| cut -d':' -f2").gsub(/"/,"").strip
175
+ cmd = []
176
+ cmd << "oarsub"
177
+ cmd << "--json"
178
+ cmd << "-t allow_classic_ssh"
179
+ cmd << "-l \"#{@oar}nodes=1,walltime=#{@walltime}\""
180
+ cmd << "--name #{env[:machine].name}"
181
+ cmd << "--checkpoint 60 --signal 12"
182
+ cmd << "'#{launcher_remote_path} #{args}'"
183
+ cmd << "| grep \"job_id\"| cut -d':' -f2"
184
+ job_id = exec(cmd.join(" ")).gsub(/"/,"").strip
185
+ # saving the id asap
186
+ env[:machine].id = job_id
187
+ wait_for_vm(job_id)
188
+ end
189
+
190
+ def wait_for_vm(job_id)
191
+ _wait_for(job_id)
192
+ _update_subnet_use("+")
193
+ @ui.info("ready @#{@site} on #{@node}")
194
+ end
151
195
 
196
+ def _wait_for(job_id)
152
197
  begin
153
198
  retryable(:on => VagrantPlugins::G5K::Errors::JobNotRunning, :tries => 100, :sleep => 1) do
154
199
  job = check_job(job_id)
@@ -159,18 +204,15 @@ module VagrantPlugins
159
204
  @ui.info("Waiting for the job to be running")
160
205
  raise VagrantPlugins::G5K::Errors::JobNotRunning
161
206
  end
162
- # saving the id
163
- env[:machine].id = job["Job_Id"]
164
207
  break
165
208
  end
166
209
  rescue VagrantPlugins::G5K::Errors::JobNotRunning
167
210
  @ui.error("Tired of waiting")
168
211
  raise VagrantPlugins::G5K::Errors::JobNotRunning
169
212
  end
170
- @ui.info("booted @#{@site} on #{@node}")
171
-
172
213
  end
173
214
 
215
+
174
216
  def delete_disk(env)
175
217
  if [STRATEGY_DIRECT, STRATEGY_SNAPSHOT].include?(@image["backing"])
176
218
  @ui.error("Destroy not support for the strategy #{@image["backing"]}")
@@ -178,10 +220,10 @@ module VagrantPlugins
178
220
  end
179
221
 
180
222
  if @image["pool"].nil?
181
- disk = File.join(cwd(env), env[:machine].name.to_s)
223
+ disk = File.join(cwd(), env[:machine].name.to_s)
182
224
  exec("rm -f #{disk}")
183
225
  else
184
- disk = File.join(@image["pool"], cwd(env), env[:machine].name.to_s)
226
+ disk = File.join(@image["pool"], cwd(), env[:machine].name.to_s)
185
227
  begin
186
228
  retryable(:on => VagrantPlugins::G5K::Errors::CommandError, :tries => 10, :sleep => 5) do
187
229
  exec("rbd rm #{disk} --conf #{@image["conf"]} --id #{@image["id"]}" )
@@ -199,11 +241,9 @@ module VagrantPlugins
199
241
  @driver[:session].close
200
242
  end
201
243
 
202
-
203
-
204
244
  def exec(cmd)
205
245
  @driver.exec(cmd)
206
- end
246
+ end
207
247
 
208
248
  def upload(src, dst)
209
249
  @driver.upload(src, dst)
@@ -256,7 +296,7 @@ module VagrantPlugins
256
296
 
257
297
  def _rbd_clone_or_copy_image(env, clone = true)
258
298
  # destination in the same pool under the .vagrant ns
259
- destination = File.join(@image["pool"], cwd(env), env[:machine].name.to_s)
299
+ destination = File.join(@image["pool"], cwd(), env[:machine].name.to_s)
260
300
  # Even if nothing bad will happen when the destination already exist, we should test it before
261
301
  exists = _check_rbd_local_storage(env)
262
302
  if exists == ""
@@ -278,7 +318,7 @@ module VagrantPlugins
278
318
 
279
319
  def _file_clone_or_copy_image(env, clone = true)
280
320
  @ui.info("Clone the file image")
281
- file = File.join(cwd(env), env[:machine].name.to_s)
321
+ file = File.join(cwd(), env[:machine].name.to_s)
282
322
  exists = _check_file_local_storage(env)
283
323
  if exists == ""
284
324
  if clone
@@ -291,13 +331,100 @@ module VagrantPlugins
291
331
  end
292
332
 
293
333
  def _generate_net()
294
- fwd_ports = @ports.map do |p|
334
+ net = ""
335
+ @logger.debug(@net)
336
+ if @net["type"] == "bridge"
337
+ # we reserve a subnet if necessary and pick one mac/ip from it
338
+ lockable(:lock => VagrantPlugins::G5K.subnet_lock) do
339
+ subnet_job_id = _find_subnet
340
+ if subnet_job_id.nil?
341
+ subnet_job_id = _create_subnet
342
+ _wait_for(subnet_job_id)
343
+ # we can't call this inside the launcher script
344
+ # let's put it in a file instead...
345
+ exec("g5k-subnets -j #{subnet_job_id} -im > #{_subnet_file}" )
346
+ # initialize subnet count
347
+ exec("echo 0 > #{_subnet_count}")
348
+ end
349
+ @subnet_id = subnet_job_id
350
+ net = _subnet_file
351
+ end
352
+ else
353
+ fwd_ports = @net["ports"].map do |p|
295
354
  "hostfwd=tcp::#{p}"
296
355
  end.join(',')
297
356
  net = "-net nic,model=virtio -net user,#{fwd_ports}"
298
- @logger.info("Mapping ports")
357
+ end
358
+
359
+ @logger.debug("Generated net string : #{net}")
299
360
  return net
300
361
  end
362
+
363
+ def _subnet_file()
364
+ return File.join(cwd(), 'subnet')
365
+ end
366
+
367
+ def _subnet_count()
368
+ return File.join(cwd(), 'subnet-count')
369
+ end
370
+
371
+
372
+ def _find_subnet(vmid = nil)
373
+ begin
374
+ jobs = exec("oarstat -u --json")
375
+ jobs = JSON.load(jobs)
376
+ s = jobs.select{|k,v| v["name"] == "#{@project_id}-net" }.values.first
377
+ # we set @node to the ip in the vnet
378
+ # if there's a subnet and a vmid, read the mac/ip
379
+ # rebuild the ip associated with that vm
380
+ if not vmid.nil?
381
+ subnet = exec("cat #{_subnet_file}" )
382
+ .split("\n")
383
+ .map{|macip| macip.split("\t")}
384
+ # recalculate ip given to this VM
385
+ macip = subnet[vmid.to_i.modulo(1022)]
386
+ @node = macip[0]
387
+ @logger.debug("#{subnet.size} - #{vmid} - #{macip}")
388
+ end
389
+ return s["Job_Id"]
390
+ rescue Exception => e
391
+ @logger.debug(e)
392
+ end
393
+ nil
394
+ end
395
+
396
+ def _create_subnet()
397
+ cmd = []
398
+ cmd << "oarsub"
399
+ cmd << "--json"
400
+ cmd << "--name '#{@project_id}-net'"
401
+ cmd << "-l 'slash_22=1, walltime=#{@walltime}' 'sleep 84400'"
402
+ # getting the job_id for this subnet
403
+ cmd << "| grep 'job_id'"
404
+ cmd << "| cut -d':' -f2"
405
+
406
+ exec(cmd.join(" ")).gsub(/"/,"").strip
407
+ end
408
+
409
+ # Update the subnet use
410
+ # op is a string "+" or "-"
411
+ # if after the update the subnet use is 0
412
+ # the subnet in use is also deleted
413
+ def _update_subnet_use(op)
414
+ cmd = []
415
+ cmd << "c=$(cat #{_subnet_count});"
416
+ cmd << "echo $(($c #{op} 1)) > #{_subnet_count};"
417
+ cmd << "cat #{_subnet_count}"
418
+ count = exec(cmd.join(" "))
419
+ @logger.info("subnet_count = #{count}")
420
+ if count.to_i <= 0
421
+ @logger.info("deleteting the associated subnet")
422
+ subnet_id = _find_subnet()
423
+ exec("oardel #{subnet_id}")
424
+ end
425
+
426
+ end
427
+
301
428
  end
302
429
  end
303
430
  end
@@ -0,0 +1,58 @@
1
+ #!/bin/bash
2
+
3
+ # This script is borrowed from pmorrillon.
4
+ # Thanks to him !
5
+
6
+ #OAR -l slash_22=1+{virtual!='none'}/nodes=1,walltime=06:00:00
7
+ #OAR --checkpoint 60
8
+ #OAR --signal 12
9
+
10
+ set -x
11
+
12
+ SUBNET_FILE=$1
13
+ shift
14
+
15
+ # As we chose a stateless designe,let's calculate here the ip and mac
16
+ # assuming we got a slash_22
17
+ ipnumber=$(($OAR_JOB_ID % 1022))
18
+ IP_MAC=$(cat $SUBNET_FILE|head -n $((ipnumber + 1))|tail -n 1)
19
+ IP_ADDR=$(echo $IP_MAC|awk '{print $1}')
20
+ MAC_ADDR=$(echo $IP_MAC|awk '{print $2}')
21
+
22
+ echo $(hostname)
23
+ echo "VM IP informations :"
24
+ echo "SUBNET_FILE: $SUBNET_FILE"
25
+ echo "OAR_JOB_ID : $OAR_JOB_ID"
26
+ echo "ipnumber: $ipnumber"
27
+ echo "IP_MAC: $IP_MAC"
28
+ echo "IP address: $IP_ADDR"
29
+ echo "MAC address: $MAC_ADDR"
30
+
31
+ # create tap
32
+ TAP=$(sudo create_tap)
33
+
34
+ # Directory for qcow2 snapshots
35
+ export TMPDIR=/tmp
36
+
37
+ # Memory allocation
38
+ KEEP_SYSTEM_MEM=1 # Gb
39
+ TOTAL_MEM=$(cat /proc/meminfo | grep -e '^MemTotal:' | awk '{print $2}')
40
+ VM_MEM=$(( ($TOTAL_MEM / 1024) - $KEEP_SYSTEM_MEM * 1024 ))
41
+
42
+ # CPU
43
+ SMP=$(nproc)
44
+
45
+ # Clean shutdown of the VM at the end of the OAR job
46
+ clean_shutdown() {
47
+ echo "Caught shutdown signal at $(date)"
48
+ echo "system_powerdown" | nc -U /tmp/vagrant-g5k.mon
49
+ }
50
+
51
+ trap clean_shutdown 12
52
+
53
+ # Launch virtual machine
54
+ #kvm -m $VM_MEM -smp $SMP -drive file=$IMAGE,if=virtio -snapshot -fsdev local,security_model=none,id=fsdev0,path=$HOME -device virtio-9p-pci,id=fs0,fsdev=fsdev0,mount_tag=hostshare -nographic -net nic,model=virtio,macaddr=$MAC_ADDR -net tap,ifname=$TAP,script=no -monitor unix:/tmp/vagrant-g5k.mon,server,nowait -localtime -enable-kvm &
55
+ kvm -m $VM_MEM -smp $SMP -fsdev local,security_model=none,id=fsdev0,path=$HOME -device virtio-9p-pci,id=fs0,fsdev=fsdev0,mount_tag=hostshare -nographic -monitor unix:/tmp/vagrant-g5k.mon,server,nowait -localtime -enable-kvm -net nic,model=virtio,macaddr=$MAC_ADDR -net tap,ifname=$TAP,script=no $@ &
56
+
57
+ wait
58
+