vagrant-g5k 0.0.15 → 0.0.16

Sign up to get free protection for your applications and to get access to all the features.
Files changed (35) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +5 -0
  3. data/README.md +19 -0
  4. data/Vagrantfile +18 -31
  5. data/Vagrantfile.multisite +37 -0
  6. data/lib/vagrant-g5k/errors.rb +8 -3
  7. data/lib/vagrant-g5k/util/g5k_utils.rb +21 -13
  8. data/lib/vagrant-g5k/version.rb +1 -1
  9. data/locales/en.yml +5 -1
  10. data/results/.gitignore +2 -0
  11. data/results/README.md +43 -0
  12. data/results/Vagrantfile +126 -0
  13. data/results/ansible.cfg +2 -0
  14. data/results/boilerplate.retry +1 -0
  15. data/results/boilerplate.yml +248 -0
  16. data/results/files/grafana/dashboard.json +4572 -0
  17. data/results/files/grafana/dedicated.json +5486 -0
  18. data/results/files/grafana/haproxy.json +2632 -0
  19. data/results/files/heka/config.json +47 -0
  20. data/results/files/heka/heka-globals.toml +2 -0
  21. data/results/files/heka/lua_decoders/haproxy_log.lua +162 -0
  22. data/results/files/heka/lua_decoders/os_keystone_apache_log.lua +78 -0
  23. data/results/files/heka/lua_decoders/os_mysql_log.lua +56 -0
  24. data/results/files/heka/lua_decoders/os_openstack_log.lua +146 -0
  25. data/results/files/heka/lua_decoders/os_rabbitmq_log.lua +79 -0
  26. data/results/files/kibana/all_objects.json +81 -0
  27. data/results/files/nginx.conf +34 -0
  28. data/results/templates/heka-elasticsearch.toml.j2 +18 -0
  29. data/results/templates/heka-haproxy.toml.j2 +10 -0
  30. data/results/templates/heka-keystone.toml.j2 +14 -0
  31. data/results/templates/heka-mariadb.toml.j2 +14 -0
  32. data/results/templates/heka-openstack.toml.j2 +15 -0
  33. data/results/templates/heka-rabbitmq.toml.j2 +21 -0
  34. data/results/test.rb +32 -0
  35. metadata +28 -2
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 4bb98de93c858a960bbdbc3918b688c7c053b642
4
- data.tar.gz: 4f05183a56b167b8bb535e91646e386c4fd58463
3
+ metadata.gz: d1df4a32a68cbad5242e82101c86660aec6edf0b
4
+ data.tar.gz: 2978fdaf2984face9272df254040893082497a7d
5
5
  SHA512:
6
- metadata.gz: 4d1dd22b192c8eff2c6a77991ce555bf877b1c6970bd9f43721e960dc473a0f68459a5f699f2456285edf50d3b9a9ca8b5d654c38de130bacf9f11283bfda8dd
7
- data.tar.gz: f326dc3f70a5b63ced19f3d8562ae8e8b8c72e786e2b498d1e9f6ca8f5e75bb78b7572c11af0496c70976248c37fafc83be867b977d62cba3777738fc73372f7
6
+ metadata.gz: 24b08525b133f8f58a185315f8553322b57c1cf16b544634dd3df56695d496a632e073fb5c790cd4a1c0a11a724d5ff9371e40bcb659d0dd3294f35fb47b795a
7
+ data.tar.gz: 1229bd98b6478fb89a7d68cb486d6c089b88682a2493558d87d9519bd47a3cdb7e955402ac9d345634d97d53c7b1196b4af95fa641ad471216b738b8db65ae50
data/CHANGELOG.md CHANGED
@@ -1,3 +1,8 @@
1
+ # 0.0.16
2
+
3
+ * Improve stdout (add information on physical node)
4
+ * Stop the polling if the job is terminated
5
+
1
6
  # 0.0.15
2
7
 
3
8
  * Allow parallel boot
data/README.md CHANGED
@@ -45,5 +45,24 @@ to back the disk image of the virtual machines :
45
45
  * `vagrant halt`
46
46
  * `vagrant provision`
47
47
  * `vagrant ssh`
48
+ * `vagrant ssh-config`
48
49
  * `vagrant status`
49
50
  * `vagrant up`
51
+
52
+ ## Use ceph as backing strategy
53
+
54
+ Vagrant-g5k will look into `~/.ceph/config` on each frontend where VMs are started.
55
+ You can read[1] for further information on how to configure ceph on grid'5000.
56
+
57
+ [1] : https://www.grid5000.fr/mediawiki/index.php/Ceph
58
+
59
+ ## Developping
60
+
61
+ * clone the repository
62
+ * use `$ bundle` to install all the dependencies (this may take some time)
63
+ * then test your code against the provided (or modified) Vagrantfile using :
64
+ ```
65
+ VAGRANT_LOG=debug VAGRANT_DEFAULT_PROVIDER=g5k bundle exec vagrant up
66
+ ```
67
+
68
+
data/Vagrantfile CHANGED
@@ -3,12 +3,12 @@
3
3
  #
4
4
  # Testing purpose only
5
5
  #Vagrant.require_plugin "vagrant-g5k"
6
- NB_G5K=1
7
- NB_IGRIDA=1
6
+
7
+ SITES=['rennes']
8
8
 
9
9
  Vagrant.configure(2) do |config|
10
- (0..NB_G5K-1).each do |i|
11
- config.vm.define "vm-g5k-#{i}" do |my|
10
+ SITES.each do |site|
11
+ config.vm.define "vm-#{site}" do |my|
12
12
  my.vm.box = "dummy"
13
13
 
14
14
  my.ssh.username = "root"
@@ -16,37 +16,24 @@ Vagrant.configure(2) do |config|
16
16
 
17
17
  my.vm.provider "g5k" do |g5k|
18
18
  g5k.project_id = "vagrant-g5k"
19
- g5k.site = "nancy"
19
+ g5k.site = "#{site}"
20
20
  g5k.gateway = "access.grid5000.fr"
21
- g5k.image = {
22
- "pool" => "msimonin_rbds",
23
- "rbd" => "bases/alpine_docker",
24
- "snapshot" => "parent",
25
- "id" => "$USER",
26
- "conf" => "$HOME/.ceph/config",
27
- "backing" => "snapshot"
28
- }
29
- g5k.ports = ['2222-:22']
30
- end #g5k
31
- end #vm
32
- end # each
33
-
34
-
35
- (0..NB_IGRIDA-1).each do |i|
36
- config.vm.define "vm-igrida-#{i}" do |my|
37
- my.vm.box = "dummy"
38
-
39
- my.vm.provider "g5k" do |g5k|
40
- g5k.project_id = "vagrant-g5k"
41
- g5k.site = "igrida-oar-frontend"
42
- g5k.gateway = "transit.irisa.fr"
43
- g5k.image = {
44
- "path" => "/udd/msimonin/precise.qcow2",
21
+ # g5k.image = {
22
+ # "pool" => "msimonin_rbds",
23
+ # "rbd" => "bases/alpine_docker",
24
+ # "snapshot" => "parent",
25
+ # "id" => "$USER",
26
+ # "conf" => "$HOME/.ceph/config",
27
+ # "backing" => "snapshot"
28
+ # }
29
+ g5k.image = {
30
+ "path" => "/grid5000/virt-images/alpine_docker.qcow2",
45
31
  "backing" => "snapshot"
46
- }
32
+ }
47
33
  g5k.ports = ['2222-:22']
34
+ g5k.oar = "virtual != 'none'"
48
35
  end #g5k
49
- end # vm
36
+ end #vm
50
37
  end # each
51
38
 
52
39
  end
@@ -0,0 +1,37 @@
1
+ # -*- mode: ruby -*-
2
+ # vi: set ft=ruby :
3
+ #
4
+ # Testing purpose only
5
+ #Vagrant.require_plugin "vagrant-g5k"
6
+
7
+ SITES=['rennes', 'nancy', 'nantes', 'lille', 'luxembourg', 'grenoble', 'lyon']
8
+
9
+ Vagrant.configure(2) do |config|
10
+ SITES.each do |site|
11
+ config.vm.define "vm-#{site}" do |my|
12
+ my.vm.box = "dummy"
13
+
14
+ my.ssh.username = "root"
15
+ my.ssh.password = ""
16
+
17
+ my.vm.provider "g5k" do |g5k|
18
+ g5k.project_id = "vagrant-g5k"
19
+ g5k.site = "#{site}"
20
+ g5k.gateway = "access.grid5000.fr"
21
+ g5k.image = {
22
+ "pool" => "msimonin_rbds",
23
+ "rbd" => "bases/alpine_docker",
24
+ "snapshot" => "parent",
25
+ "id" => "$USER",
26
+ "conf" => "$HOME/.ceph/config",
27
+ "backing" => "snapshot"
28
+ }
29
+ g5k.ports = ['2222-:22']
30
+ g5k.oar = "virtual != 'none'"
31
+ end #g5k
32
+ end #vm
33
+ end # each
34
+
35
+ end
36
+
37
+
@@ -8,15 +8,20 @@ module VagrantPlugins
8
8
  end
9
9
 
10
10
  class TimeoutOnJobSubmissionError < VagrantG5KError
11
- error_key("tired of waiting")
11
+ error_key("tired_of_waiting")
12
12
  end
13
13
 
14
14
  class JobNotRunning < VagrantG5KError
15
- error_key("tired of waiting")
15
+ error_key("tired_of_waiting")
16
16
  end
17
17
 
18
+ class JobError < VagrantG5KError
19
+ error_key("job_error")
20
+ end
21
+
22
+
18
23
  class CommandError < VagrantG5KError
19
- error_key("remote command error")
24
+ error_key("remote_command_error")
20
25
  end
21
26
 
22
27
 
@@ -18,7 +18,7 @@ module VagrantPlugins
18
18
  class Connection
19
19
  include Vagrant::Util::Retryable
20
20
 
21
- attr_accessor :session
21
+ attr_accessor :driver
22
22
 
23
23
  attr_accessor :username
24
24
 
@@ -74,19 +74,24 @@ module VagrantPlugins
74
74
 
75
75
 
76
76
  def check_job(job_id)
77
- oarstat = exec("oarstat --json")
78
- oarstat = JSON.load(oarstat)
79
- @logger.debug("Looking for the job id #{job_id} and username #{@username}")
80
- r = oarstat.select!{ |k,v| k == job_id and v["owner"] == @username }.values.first
81
- @logger.debug(r.inspect)
82
- # update the assigned hostname
83
- # this will be used to reach the vm
77
+ oarstat = exec("oarstat -j #{job_id} --json")
78
+ # json is
79
+ # { "job_id" : {description}}
80
+ r = JSON.load(oarstat)["#{job_id}"]
84
81
  if !r.nil?
85
82
  @node = r["assigned_network_address"].first
86
83
  end
87
84
  return r
88
85
  end
89
86
 
87
+ def process_errors(job_id)
88
+ job = check_job(job_id)
89
+ stderr_file = job["stderr_file"]
90
+ stderr = exec("cat #{stderr_file}")
91
+ @ui.error("#{stderr_file}: #{stderr}")
92
+ raise VagrantPlugins::G5K::Errors::JobError
93
+ end
94
+
90
95
  def delete_job(job_id)
91
96
  @ui.info("Deleting the associated job")
92
97
  exec("oardel -c -s 12 #{job_id}")
@@ -145,10 +150,13 @@ module VagrantPlugins
145
150
  job_id = exec("oarsub --json -t allow_classic_ssh -l \"#{@oar}nodes=1,walltime=#{@walltime}\" --name #{env[:machine].name} --checkpoint 60 --signal 12 \"#{launcher_remote_path} #{args}\" | grep \"job_id\"| cut -d':' -f2").gsub(/"/,"").strip
146
151
 
147
152
  begin
148
- retryable(:on => VagrantPlugins::G5K::Errors::JobNotRunning, :tries => 100, :sleep => 2) do
149
- @ui.info("Waiting for the job to be running")
153
+ retryable(:on => VagrantPlugins::G5K::Errors::JobNotRunning, :tries => 100, :sleep => 1) do
150
154
  job = check_job(job_id)
151
- if job.nil? or job["state"] != "Running"
155
+ if !job.nil? and ["Error", "Terminated"].include?(job["state"])
156
+ process_errors(job_id)
157
+ end
158
+ if job.nil? or (!job.nil? and job["state"] != "Running")
159
+ @ui.info("Waiting for the job to be running")
152
160
  raise VagrantPlugins::G5K::Errors::JobNotRunning
153
161
  end
154
162
  # saving the id
@@ -159,7 +167,7 @@ module VagrantPlugins
159
167
  @ui.error("Tired of waiting")
160
168
  raise VagrantPlugins::G5K::Errors::JobNotRunning
161
169
  end
162
- @ui.info("VM booted @#{@site} on #{@node}")
170
+ @ui.info("booted @#{@site} on #{@node}")
163
171
 
164
172
  end
165
173
 
@@ -229,7 +237,7 @@ module VagrantPlugins
229
237
  file = _rbd_clone_or_copy_image(env, clone = false)
230
238
  end
231
239
  # encapsulate the file to a qemu ready disk description
232
- file = "rbd:#{file}:id=#{@image["id"]}:conf=#{@image["conf"]}"
240
+ file = "rbd:#{file}:id=#{@image["id"]}:conf=#{@image["conf"]}:rbd_cache=true,cache=writeback"
233
241
  @logger.debug("Generated drive string : #{file}")
234
242
  return file
235
243
  end
@@ -1,5 +1,5 @@
1
1
  module VagrantPlugins
2
2
  module G5K
3
- VERSION = '0.0.15'
3
+ VERSION = '0.0.16'
4
4
  end
5
5
  end
data/locales/en.yml CHANGED
@@ -80,7 +80,11 @@ en:
80
80
  One or more of the needed AWS credentials are missing. No environment variables
81
81
  are set nor profile '%{profile}' exists at '%{location}'
82
82
 
83
- errors:
83
+ errors:
84
+ job_error: Job error
85
+ tired_of_waiting: Tired of waiting
86
+ remote_command_error: Remote command error
87
+
84
88
  fog_error: |-
85
89
  There was an error talking to AWS. The error message is shown
86
90
  below:
@@ -0,0 +1,2 @@
1
+ .vagrant
2
+ *.retry
data/results/README.md ADDED
@@ -0,0 +1,43 @@
1
+ # Post-Mortem analysis
2
+
3
+ ## What's inside the VMs ?
4
+
5
+ Each virtual machine (VM) offer a toolbox to analyse various datas from the experimentation made with kolla-g5k. The datas themselves are also located inside the VM during the creation of the VM.
6
+
7
+ You'll find :
8
+
9
+ * Nginx exposed on port 8000. It allows you to browse Rally reports, confs and logs.
10
+ * Grafana exposed on port 3000. It gives access of all the metrics collected during the experimentation
11
+ * Kibana exposed on port 5601. It let you explores the logs.
12
+
13
+
14
+ ## List of provided experimentation
15
+
16
+ * idle : *todo wiki url*
17
+ * load-ded : *todo wiki url*
18
+ * load-default : *todo wiki url*
19
+ * [load-clust-rabbit](https://github.com/BeyondTheClouds/kolla-g5k/wiki/load-clust-rabbitmq)
20
+ * concurrency : *todo wiki url*
21
+
22
+ ## Accessing the results
23
+
24
+ Start a specific virtual machine :
25
+
26
+ ```
27
+ vagrant up <experimentation>
28
+ ```
29
+
30
+ Shutdown the virtual machine
31
+ ```
32
+ vagrant halt <experimentation>
33
+ ```
34
+
35
+ ## Creating new results
36
+
37
+ *todo*
38
+
39
+ ## Known limitation
40
+
41
+ * The current implementation is tight to Grid'5000.
42
+ * When creating a new set of result, indexing all the logs in elasticsearch will take some time.
43
+ * From time to time VMs root filesystem goes read-only. Manually fsck the root partition and reboot may solve the issue `fsck /dev/vg0/lv_root`.
@@ -0,0 +1,126 @@
1
+ # coding: utf-8
2
+ # -*- mode: ruby -*-
3
+ # vi: set ft=ruby :
4
+
5
+
6
+ # This Vagrantfile makes use of the plugin vagrant-g5k
7
+ # https://github.com/msimonin/vagrant-g5k
8
+ #
9
+ # version 0.0.13
10
+
11
+ # The list of experimentation. There will be one VM per
12
+ # experimentation. You can access it thought eg, `vagrant ssh idle`.
13
+ XPS =[
14
+ {
15
+ :name => "idle",
16
+ :confs => [ "cpt20-nfk05", "cpt20-nfk10", "cpt20-nfk25", "cpt20-nfk50" ]
17
+ },
18
+ {
19
+ # dedicated 1 node for each mariadb, haproxy, conductor, rabbitmq, memcached
20
+ # with rally benchmark
21
+ :name => "load-ded",
22
+ :confs => [ "cpt20-nfk05", "cpt20-nfk10", "cpt20-nfk25", "cpt20-nfk50-stopped"]
23
+ },
24
+ {
25
+ # default topology
26
+ # with rally benchmark
27
+ :name => "load-default",
28
+ :confs => [ "cpt20-nfk05", "cpt20-nfk10", 'cpt20-nfk25']
29
+ },
30
+ {
31
+ :name => "concurrency",
32
+ :confs => [ "ccy0001-0015-cpt20-nfk50",
33
+ "ccy0025-0045-cpt20-nfk50",
34
+ "ccy0100-1000-cpt20-nfk05" ]
35
+ },
36
+ {
37
+ # load test with a clustered rabbitmq
38
+ :name => "load-clust-rabbit",
39
+ :confs => [ "cpt20-nfk50",
40
+ "cpt20-nfk50-tuned-report-sync-intervals",
41
+ "cpt20-nfk50-tuned-report-sync-intervals-handshake-timeout",
42
+ "cpt20-nfk50-cond10-tuned-handshake-timeout"]
43
+ }
44
+ # Add another experimentation
45
+ # ,{ :name => "vanilla",
46
+ # :confs => [ "cpt20-nfk05", "cpt20-nfk10", "cpt20-nfk25", "cpt20-nfk50" ]}
47
+ ]
48
+
49
+ Vagrant.configure(2) do |config|
50
+ # user to log with inside the vm
51
+ # config.ssh.username = "root"
52
+ # password to use to log inside the vm
53
+
54
+ config.vm.provider "g5k" do |g5k|
55
+ # The project id.
56
+ # It is used to generate uniq remote storage for images
57
+ # It must be uniq accros all project managed by vagrant.
58
+ g5k.project_id = "vagrant-g5k"
59
+
60
+ # user name used to connect to g5k
61
+ g5k.username = "msimonin"
62
+
63
+ # private key
64
+ # g5k.private_key = File.join(ENV['HOME'], ".ssh/id_rsa_discovery")
65
+
66
+ # site to use
67
+ g5k.site = "rennes"
68
+ g5k.gateway = "access.grid5000.fr"
69
+
70
+ # walltime to use
71
+ g5k.walltime = "03:00:00"
72
+
73
+ # image location
74
+ # g5k.image = {
75
+ # "path" => "$HOME/public/ubuntu1404.qcow2",
76
+ # "backing" => "copy"
77
+ #}
78
+
79
+ # it could be backed by the ceph
80
+ g5k.image = {
81
+ "pool" => "msimonin_rbds",
82
+ "rbd" => "bases/bocutter-ubuntu1404_home_ceph",
83
+ "id" => "msimonin",
84
+ "conf" => "/home/msimonin/.ceph/config",
85
+ "backing" => "copy"
86
+ }
87
+
88
+ # ports to expose (at least ssh has to be forwarded)
89
+ g5k.ports = ['2222-:22','3000-:3000', '8000-:80', '5601-:5601']
90
+ end
91
+
92
+ XPS.each do |xp|
93
+ config.vm.define xp[:name] do |my|
94
+ # box isn't used
95
+ my.vm.box = "dummy"
96
+
97
+ # From `boilerplate.yml`: this playbook relies on an `xps` variable.
98
+ # The `xps` variable is a list that contains the name of all
99
+ # experimentation. For instance, this list is as following for the
100
+ # idle experimentation:
101
+ # - idle-cpt20-nfk05
102
+ # - idle-cpt20-nfk10
103
+ # - idle-cpt20-nfk25
104
+ # - idle-cpt20-nfk50
105
+ #
106
+ # This ruby method computes this list and gives it to
107
+ # `ansible-playbook`. The construction of the list is based on the
108
+ # name of the experimentation `xp[:name]` and the list of
109
+ # experimentation done `xp[:confs]`
110
+ xps = {:xps => xp[:confs].map { |cfg| xp[:name] + "-" + cfg } }
111
+
112
+ # For the provision to run a dedicated proxy command seems necessary
113
+ # in your ssh config (the provisionner seems to not take into account
114
+ # the ssh-config of vagrant)
115
+ # Host *.grid5000.fr
116
+ # User <login>
117
+ # StrictHostKeyChecking no
118
+ # ProxyCommand ssh <login>@194.254.60.4 "nc %h %p"
119
+ my.vm.provision :ansible do |ansible|
120
+ ansible.playbook = "boilerplate.yml"
121
+ ansible.extra_vars = xps
122
+ ansible.verbose = "-vvvv"
123
+ end
124
+ end
125
+ end
126
+ end