vagrant-g5k 0.0.16 → 0.0.17
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +8 -0
- data/README.md +30 -2
- data/Vagrantfile +49 -20
- data/lib/vagrant-g5k.rb +16 -0
- data/lib/vagrant-g5k/action.rb +52 -20
- data/lib/vagrant-g5k/action/connect_g5k.rb +3 -8
- data/lib/vagrant-g5k/action/create_local_working_dir.rb +1 -1
- data/lib/vagrant-g5k/action/get_state.rb +23 -0
- data/lib/vagrant-g5k/action/is_created.rb +6 -2
- data/lib/vagrant-g5k/action/{message_already_created.rb → message_already_running.rb} +1 -1
- data/lib/vagrant-g5k/action/message_not_running.rb +16 -0
- data/lib/vagrant-g5k/action/read_ssh_info.rb +16 -14
- data/lib/vagrant-g5k/action/read_state.rb +8 -0
- data/lib/vagrant-g5k/action/run_instance.rb +1 -0
- data/lib/vagrant-g5k/action/wait_instance.rb +18 -0
- data/lib/vagrant-g5k/config.rb +7 -3
- data/lib/vagrant-g5k/util/g5k_utils.rb +164 -37
- data/lib/vagrant-g5k/util/launch_vm_bridge.sh +58 -0
- data/lib/vagrant-g5k/version.rb +1 -1
- data/vagrant-g5k.gemspec +6 -6
- metadata +24 -70
- data/Vagrantfile.multisite +0 -37
- data/results/.gitignore +0 -2
- data/results/README.md +0 -43
- data/results/Vagrantfile +0 -126
- data/results/ansible.cfg +0 -2
- data/results/boilerplate.retry +0 -1
- data/results/boilerplate.yml +0 -248
- data/results/files/grafana/dashboard.json +0 -4572
- data/results/files/grafana/dedicated.json +0 -5486
- data/results/files/grafana/haproxy.json +0 -2632
- data/results/files/heka/config.json +0 -47
- data/results/files/heka/heka-globals.toml +0 -2
- data/results/files/heka/lua_decoders/haproxy_log.lua +0 -162
- data/results/files/heka/lua_decoders/os_keystone_apache_log.lua +0 -78
- data/results/files/heka/lua_decoders/os_mysql_log.lua +0 -56
- data/results/files/heka/lua_decoders/os_openstack_log.lua +0 -146
- data/results/files/heka/lua_decoders/os_rabbitmq_log.lua +0 -79
- data/results/files/kibana/all_objects.json +0 -81
- data/results/files/nginx.conf +0 -34
- data/results/templates/heka-elasticsearch.toml.j2 +0 -18
- data/results/templates/heka-haproxy.toml.j2 +0 -10
- data/results/templates/heka-keystone.toml.j2 +0 -14
- data/results/templates/heka-mariadb.toml.j2 +0 -14
- data/results/templates/heka-openstack.toml.j2 +0 -15
- data/results/templates/heka-rabbitmq.toml.j2 +0 -21
- data/results/test.rb +0 -32
data/lib/vagrant-g5k/version.rb
CHANGED
data/vagrant-g5k.gemspec
CHANGED
@@ -16,14 +16,14 @@ Gem::Specification.new do |s|
|
|
16
16
|
s.rubyforge_project = "vagrant-g5k"
|
17
17
|
|
18
18
|
s.add_runtime_dependency "iniparse", "~> 1.4", ">= 1.4.2"
|
19
|
-
s.add_runtime_dependency "net-ssh", "~> 3.0.2"
|
20
|
-
s.add_runtime_dependency "net-scp", "~> 1.1.2"
|
21
|
-
s.add_runtime_dependency "net-ssh-multi", "~> 1.2.1
|
19
|
+
s.add_runtime_dependency "net-ssh", "~> 3.0", ">= 3.0.2"
|
20
|
+
s.add_runtime_dependency "net-scp", "~> 1.1", ">= 1.1.2"
|
21
|
+
s.add_runtime_dependency "net-ssh-multi", "~> 1.2", ">=1.2.1"
|
22
22
|
|
23
|
-
s.add_development_dependency "rake"
|
23
|
+
#s.add_development_dependency "rake"
|
24
24
|
# rspec 3.4 to mock File
|
25
|
-
s.add_development_dependency "rspec", "~> 3.4"
|
26
|
-
s.add_development_dependency "rspec-its"
|
25
|
+
#s.add_development_dependency "rspec", "~> 3.4"
|
26
|
+
#s.add_development_dependency "rspec-its"
|
27
27
|
|
28
28
|
# The following block of code determines the files that should be included
|
29
29
|
# in the gem. It does this by reading all the files in the directory where
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: vagrant-g5k
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.0.
|
4
|
+
version: 0.0.17
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Matthieu Simonin
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2016-
|
11
|
+
date: 2016-11-02 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: iniparse
|
@@ -35,6 +35,9 @@ dependencies:
|
|
35
35
|
requirement: !ruby/object:Gem::Requirement
|
36
36
|
requirements:
|
37
37
|
- - "~>"
|
38
|
+
- !ruby/object:Gem::Version
|
39
|
+
version: '3.0'
|
40
|
+
- - ">="
|
38
41
|
- !ruby/object:Gem::Version
|
39
42
|
version: 3.0.2
|
40
43
|
type: :runtime
|
@@ -42,6 +45,9 @@ dependencies:
|
|
42
45
|
version_requirements: !ruby/object:Gem::Requirement
|
43
46
|
requirements:
|
44
47
|
- - "~>"
|
48
|
+
- !ruby/object:Gem::Version
|
49
|
+
version: '3.0'
|
50
|
+
- - ">="
|
45
51
|
- !ruby/object:Gem::Version
|
46
52
|
version: 3.0.2
|
47
53
|
- !ruby/object:Gem::Dependency
|
@@ -49,6 +55,9 @@ dependencies:
|
|
49
55
|
requirement: !ruby/object:Gem::Requirement
|
50
56
|
requirements:
|
51
57
|
- - "~>"
|
58
|
+
- !ruby/object:Gem::Version
|
59
|
+
version: '1.1'
|
60
|
+
- - ">="
|
52
61
|
- !ruby/object:Gem::Version
|
53
62
|
version: 1.1.2
|
54
63
|
type: :runtime
|
@@ -56,6 +65,9 @@ dependencies:
|
|
56
65
|
version_requirements: !ruby/object:Gem::Requirement
|
57
66
|
requirements:
|
58
67
|
- - "~>"
|
68
|
+
- !ruby/object:Gem::Version
|
69
|
+
version: '1.1'
|
70
|
+
- - ">="
|
59
71
|
- !ruby/object:Gem::Version
|
60
72
|
version: 1.1.2
|
61
73
|
- !ruby/object:Gem::Dependency
|
@@ -64,56 +76,20 @@ dependencies:
|
|
64
76
|
requirements:
|
65
77
|
- - "~>"
|
66
78
|
- !ruby/object:Gem::Version
|
67
|
-
version: 1.2
|
68
|
-
type: :runtime
|
69
|
-
prerelease: false
|
70
|
-
version_requirements: !ruby/object:Gem::Requirement
|
71
|
-
requirements:
|
72
|
-
- - "~>"
|
73
|
-
- !ruby/object:Gem::Version
|
74
|
-
version: 1.2.1
|
75
|
-
- !ruby/object:Gem::Dependency
|
76
|
-
name: rake
|
77
|
-
requirement: !ruby/object:Gem::Requirement
|
78
|
-
requirements:
|
79
|
-
- - ">="
|
80
|
-
- !ruby/object:Gem::Version
|
81
|
-
version: '0'
|
82
|
-
type: :development
|
83
|
-
prerelease: false
|
84
|
-
version_requirements: !ruby/object:Gem::Requirement
|
85
|
-
requirements:
|
79
|
+
version: '1.2'
|
86
80
|
- - ">="
|
87
81
|
- !ruby/object:Gem::Version
|
88
|
-
version:
|
89
|
-
|
90
|
-
name: rspec
|
91
|
-
requirement: !ruby/object:Gem::Requirement
|
92
|
-
requirements:
|
93
|
-
- - "~>"
|
94
|
-
- !ruby/object:Gem::Version
|
95
|
-
version: '3.4'
|
96
|
-
type: :development
|
82
|
+
version: 1.2.1
|
83
|
+
type: :runtime
|
97
84
|
prerelease: false
|
98
85
|
version_requirements: !ruby/object:Gem::Requirement
|
99
86
|
requirements:
|
100
87
|
- - "~>"
|
101
88
|
- !ruby/object:Gem::Version
|
102
|
-
version: '
|
103
|
-
- !ruby/object:Gem::Dependency
|
104
|
-
name: rspec-its
|
105
|
-
requirement: !ruby/object:Gem::Requirement
|
106
|
-
requirements:
|
107
|
-
- - ">="
|
108
|
-
- !ruby/object:Gem::Version
|
109
|
-
version: '0'
|
110
|
-
type: :development
|
111
|
-
prerelease: false
|
112
|
-
version_requirements: !ruby/object:Gem::Requirement
|
113
|
-
requirements:
|
89
|
+
version: '1.2'
|
114
90
|
- - ">="
|
115
91
|
- !ruby/object:Gem::Version
|
116
|
-
version:
|
92
|
+
version: 1.2.1
|
117
93
|
description: Enables to boot a vm in the production environment of G5K.
|
118
94
|
email: matthieu.simonin@inria.fr
|
119
95
|
executables: []
|
@@ -127,7 +103,6 @@ files:
|
|
127
103
|
- LICENSE
|
128
104
|
- README.md
|
129
105
|
- Vagrantfile
|
130
|
-
- Vagrantfile.multisite
|
131
106
|
- lib/vagrant-g5k.rb
|
132
107
|
- lib/vagrant-g5k/.config.rb.swp
|
133
108
|
- lib/vagrant-g5k/action.rb
|
@@ -136,12 +111,15 @@ files:
|
|
136
111
|
- lib/vagrant-g5k/action/create_local_working_dir.rb
|
137
112
|
- lib/vagrant-g5k/action/delete_disk.rb
|
138
113
|
- lib/vagrant-g5k/action/delete_job.rb
|
114
|
+
- lib/vagrant-g5k/action/get_state.rb
|
139
115
|
- lib/vagrant-g5k/action/is_created.rb
|
140
|
-
- lib/vagrant-g5k/action/
|
116
|
+
- lib/vagrant-g5k/action/message_already_running.rb
|
141
117
|
- lib/vagrant-g5k/action/message_not_created.rb
|
118
|
+
- lib/vagrant-g5k/action/message_not_running.rb
|
142
119
|
- lib/vagrant-g5k/action/read_ssh_info.rb
|
143
120
|
- lib/vagrant-g5k/action/read_state.rb
|
144
121
|
- lib/vagrant-g5k/action/run_instance.rb
|
122
|
+
- lib/vagrant-g5k/action/wait_instance.rb
|
145
123
|
- lib/vagrant-g5k/command.rb
|
146
124
|
- lib/vagrant-g5k/config.rb
|
147
125
|
- lib/vagrant-g5k/driver.rb
|
@@ -149,34 +127,10 @@ files:
|
|
149
127
|
- lib/vagrant-g5k/plugin.rb
|
150
128
|
- lib/vagrant-g5k/provider.rb
|
151
129
|
- lib/vagrant-g5k/util/g5k_utils.rb
|
130
|
+
- lib/vagrant-g5k/util/launch_vm_bridge.sh
|
152
131
|
- lib/vagrant-g5k/util/launch_vm_fwd.sh
|
153
132
|
- lib/vagrant-g5k/version.rb
|
154
133
|
- locales/en.yml
|
155
|
-
- results/.gitignore
|
156
|
-
- results/README.md
|
157
|
-
- results/Vagrantfile
|
158
|
-
- results/ansible.cfg
|
159
|
-
- results/boilerplate.retry
|
160
|
-
- results/boilerplate.yml
|
161
|
-
- results/files/grafana/dashboard.json
|
162
|
-
- results/files/grafana/dedicated.json
|
163
|
-
- results/files/grafana/haproxy.json
|
164
|
-
- results/files/heka/config.json
|
165
|
-
- results/files/heka/heka-globals.toml
|
166
|
-
- results/files/heka/lua_decoders/haproxy_log.lua
|
167
|
-
- results/files/heka/lua_decoders/os_keystone_apache_log.lua
|
168
|
-
- results/files/heka/lua_decoders/os_mysql_log.lua
|
169
|
-
- results/files/heka/lua_decoders/os_openstack_log.lua
|
170
|
-
- results/files/heka/lua_decoders/os_rabbitmq_log.lua
|
171
|
-
- results/files/kibana/all_objects.json
|
172
|
-
- results/files/nginx.conf
|
173
|
-
- results/templates/heka-elasticsearch.toml.j2
|
174
|
-
- results/templates/heka-haproxy.toml.j2
|
175
|
-
- results/templates/heka-keystone.toml.j2
|
176
|
-
- results/templates/heka-mariadb.toml.j2
|
177
|
-
- results/templates/heka-openstack.toml.j2
|
178
|
-
- results/templates/heka-rabbitmq.toml.j2
|
179
|
-
- results/test.rb
|
180
134
|
- spec/vagrant-g5k/config_spec.rb
|
181
135
|
- vagrant-g5k.gemspec
|
182
136
|
homepage: https://github.com/msimonin/vagrant-g5k
|
data/Vagrantfile.multisite
DELETED
@@ -1,37 +0,0 @@
|
|
1
|
-
# -*- mode: ruby -*-
|
2
|
-
# vi: set ft=ruby :
|
3
|
-
#
|
4
|
-
# Testing purpose only
|
5
|
-
#Vagrant.require_plugin "vagrant-g5k"
|
6
|
-
|
7
|
-
SITES=['rennes', 'nancy', 'nantes', 'lille', 'luxembourg', 'grenoble', 'lyon']
|
8
|
-
|
9
|
-
Vagrant.configure(2) do |config|
|
10
|
-
SITES.each do |site|
|
11
|
-
config.vm.define "vm-#{site}" do |my|
|
12
|
-
my.vm.box = "dummy"
|
13
|
-
|
14
|
-
my.ssh.username = "root"
|
15
|
-
my.ssh.password = ""
|
16
|
-
|
17
|
-
my.vm.provider "g5k" do |g5k|
|
18
|
-
g5k.project_id = "vagrant-g5k"
|
19
|
-
g5k.site = "#{site}"
|
20
|
-
g5k.gateway = "access.grid5000.fr"
|
21
|
-
g5k.image = {
|
22
|
-
"pool" => "msimonin_rbds",
|
23
|
-
"rbd" => "bases/alpine_docker",
|
24
|
-
"snapshot" => "parent",
|
25
|
-
"id" => "$USER",
|
26
|
-
"conf" => "$HOME/.ceph/config",
|
27
|
-
"backing" => "snapshot"
|
28
|
-
}
|
29
|
-
g5k.ports = ['2222-:22']
|
30
|
-
g5k.oar = "virtual != 'none'"
|
31
|
-
end #g5k
|
32
|
-
end #vm
|
33
|
-
end # each
|
34
|
-
|
35
|
-
end
|
36
|
-
|
37
|
-
|
data/results/.gitignore
DELETED
data/results/README.md
DELETED
@@ -1,43 +0,0 @@
|
|
1
|
-
# Post-Mortem analysis
|
2
|
-
|
3
|
-
## What's inside the VMs ?
|
4
|
-
|
5
|
-
Each virtual machine (VM) offer a toolbox to analyse various datas from the experimentation made with kolla-g5k. The datas themselves are also located inside the VM during the creation of the VM.
|
6
|
-
|
7
|
-
You'll find :
|
8
|
-
|
9
|
-
* Nginx exposed on port 8000. It allows you to browse Rally reports, confs and logs.
|
10
|
-
* Grafana exposed on port 3000. It gives access of all the metrics collected during the experimentation
|
11
|
-
* Kibana exposed on port 5601. It let you explores the logs.
|
12
|
-
|
13
|
-
|
14
|
-
## List of provided experimentation
|
15
|
-
|
16
|
-
* idle : *todo wiki url*
|
17
|
-
* load-ded : *todo wiki url*
|
18
|
-
* load-default : *todo wiki url*
|
19
|
-
* [load-clust-rabbit](https://github.com/BeyondTheClouds/kolla-g5k/wiki/load-clust-rabbitmq)
|
20
|
-
* concurrency : *todo wiki url*
|
21
|
-
|
22
|
-
## Accessing the results
|
23
|
-
|
24
|
-
Start a specific virtual machine :
|
25
|
-
|
26
|
-
```
|
27
|
-
vagrant up <experimentation>
|
28
|
-
```
|
29
|
-
|
30
|
-
Shutdown the virtual machine
|
31
|
-
```
|
32
|
-
vagrant halt <experimentation>
|
33
|
-
```
|
34
|
-
|
35
|
-
## Creating new results
|
36
|
-
|
37
|
-
*todo*
|
38
|
-
|
39
|
-
## Known limitation
|
40
|
-
|
41
|
-
* The current implementation is tight to Grid'5000.
|
42
|
-
* When creating a new set of result, indexing all the logs in elasticsearch will take some time.
|
43
|
-
* From time to time VMs root filesystem goes read-only. Manually fsck the root partition and reboot may solve the issue `fsck /dev/vg0/lv_root`.
|
data/results/Vagrantfile
DELETED
@@ -1,126 +0,0 @@
|
|
1
|
-
# coding: utf-8
|
2
|
-
# -*- mode: ruby -*-
|
3
|
-
# vi: set ft=ruby :
|
4
|
-
|
5
|
-
|
6
|
-
# This Vagrantfile makes use of the plugin vagrant-g5k
|
7
|
-
# https://github.com/msimonin/vagrant-g5k
|
8
|
-
#
|
9
|
-
# version 0.0.13
|
10
|
-
|
11
|
-
# The list of experimentation. There will be one VM per
|
12
|
-
# experimentation. You can access it thought eg, `vagrant ssh idle`.
|
13
|
-
XPS =[
|
14
|
-
{
|
15
|
-
:name => "idle",
|
16
|
-
:confs => [ "cpt20-nfk05", "cpt20-nfk10", "cpt20-nfk25", "cpt20-nfk50" ]
|
17
|
-
},
|
18
|
-
{
|
19
|
-
# dedicated 1 node for each mariadb, haproxy, conductor, rabbitmq, memcached
|
20
|
-
# with rally benchmark
|
21
|
-
:name => "load-ded",
|
22
|
-
:confs => [ "cpt20-nfk05", "cpt20-nfk10", "cpt20-nfk25", "cpt20-nfk50-stopped"]
|
23
|
-
},
|
24
|
-
{
|
25
|
-
# default topology
|
26
|
-
# with rally benchmark
|
27
|
-
:name => "load-default",
|
28
|
-
:confs => [ "cpt20-nfk05", "cpt20-nfk10", 'cpt20-nfk25']
|
29
|
-
},
|
30
|
-
{
|
31
|
-
:name => "concurrency",
|
32
|
-
:confs => [ "ccy0001-0015-cpt20-nfk50",
|
33
|
-
"ccy0025-0045-cpt20-nfk50",
|
34
|
-
"ccy0100-1000-cpt20-nfk05" ]
|
35
|
-
},
|
36
|
-
{
|
37
|
-
# load test with a clustered rabbitmq
|
38
|
-
:name => "load-clust-rabbit",
|
39
|
-
:confs => [ "cpt20-nfk50",
|
40
|
-
"cpt20-nfk50-tuned-report-sync-intervals",
|
41
|
-
"cpt20-nfk50-tuned-report-sync-intervals-handshake-timeout",
|
42
|
-
"cpt20-nfk50-cond10-tuned-handshake-timeout"]
|
43
|
-
}
|
44
|
-
# Add another experimentation
|
45
|
-
# ,{ :name => "vanilla",
|
46
|
-
# :confs => [ "cpt20-nfk05", "cpt20-nfk10", "cpt20-nfk25", "cpt20-nfk50" ]}
|
47
|
-
]
|
48
|
-
|
49
|
-
Vagrant.configure(2) do |config|
|
50
|
-
# user to log with inside the vm
|
51
|
-
# config.ssh.username = "root"
|
52
|
-
# password to use to log inside the vm
|
53
|
-
|
54
|
-
config.vm.provider "g5k" do |g5k|
|
55
|
-
# The project id.
|
56
|
-
# It is used to generate uniq remote storage for images
|
57
|
-
# It must be uniq accros all project managed by vagrant.
|
58
|
-
g5k.project_id = "vagrant-g5k"
|
59
|
-
|
60
|
-
# user name used to connect to g5k
|
61
|
-
g5k.username = "msimonin"
|
62
|
-
|
63
|
-
# private key
|
64
|
-
# g5k.private_key = File.join(ENV['HOME'], ".ssh/id_rsa_discovery")
|
65
|
-
|
66
|
-
# site to use
|
67
|
-
g5k.site = "rennes"
|
68
|
-
g5k.gateway = "access.grid5000.fr"
|
69
|
-
|
70
|
-
# walltime to use
|
71
|
-
g5k.walltime = "03:00:00"
|
72
|
-
|
73
|
-
# image location
|
74
|
-
# g5k.image = {
|
75
|
-
# "path" => "$HOME/public/ubuntu1404.qcow2",
|
76
|
-
# "backing" => "copy"
|
77
|
-
#}
|
78
|
-
|
79
|
-
# it could be backed by the ceph
|
80
|
-
g5k.image = {
|
81
|
-
"pool" => "msimonin_rbds",
|
82
|
-
"rbd" => "bases/bocutter-ubuntu1404_home_ceph",
|
83
|
-
"id" => "msimonin",
|
84
|
-
"conf" => "/home/msimonin/.ceph/config",
|
85
|
-
"backing" => "copy"
|
86
|
-
}
|
87
|
-
|
88
|
-
# ports to expose (at least ssh has to be forwarded)
|
89
|
-
g5k.ports = ['2222-:22','3000-:3000', '8000-:80', '5601-:5601']
|
90
|
-
end
|
91
|
-
|
92
|
-
XPS.each do |xp|
|
93
|
-
config.vm.define xp[:name] do |my|
|
94
|
-
# box isn't used
|
95
|
-
my.vm.box = "dummy"
|
96
|
-
|
97
|
-
# From `boilerplate.yml`: this playbook relies on an `xps` variable.
|
98
|
-
# The `xps` variable is a list that contains the name of all
|
99
|
-
# experimentation. For instance, this list is as following for the
|
100
|
-
# idle experimentation:
|
101
|
-
# - idle-cpt20-nfk05
|
102
|
-
# - idle-cpt20-nfk10
|
103
|
-
# - idle-cpt20-nfk25
|
104
|
-
# - idle-cpt20-nfk50
|
105
|
-
#
|
106
|
-
# This ruby method computes this list and gives it to
|
107
|
-
# `ansible-playbook`. The construction of the list is based on the
|
108
|
-
# name of the experimentation `xp[:name]` and the list of
|
109
|
-
# experimentation done `xp[:confs]`
|
110
|
-
xps = {:xps => xp[:confs].map { |cfg| xp[:name] + "-" + cfg } }
|
111
|
-
|
112
|
-
# For the provision to run a dedicated proxy command seems necessary
|
113
|
-
# in your ssh config (the provisionner seems to not take into account
|
114
|
-
# the ssh-config of vagrant)
|
115
|
-
# Host *.grid5000.fr
|
116
|
-
# User <login>
|
117
|
-
# StrictHostKeyChecking no
|
118
|
-
# ProxyCommand ssh <login>@194.254.60.4 "nc %h %p"
|
119
|
-
my.vm.provision :ansible do |ansible|
|
120
|
-
ansible.playbook = "boilerplate.yml"
|
121
|
-
ansible.extra_vars = xps
|
122
|
-
ansible.verbose = "-vvvv"
|
123
|
-
end
|
124
|
-
end
|
125
|
-
end
|
126
|
-
end
|
data/results/ansible.cfg
DELETED
data/results/boilerplate.retry
DELETED
@@ -1 +0,0 @@
|
|
1
|
-
load-ded
|
data/results/boilerplate.yml
DELETED
@@ -1,248 +0,0 @@
|
|
1
|
-
---
|
2
|
-
# Note: this playbook relies on `xps` variable. The `xps` variable is
|
3
|
-
# a list that contains the name of all experimentation. For instance,
|
4
|
-
# this list is as following for the idle experimentation.
|
5
|
-
# - idle-cpt20-nfk05
|
6
|
-
# - idle-cpt20-nfk10
|
7
|
-
# - idle-cpt20-nfk25
|
8
|
-
# - idle-cpt20-nfk50
|
9
|
-
- hosts: all
|
10
|
-
sudo: True
|
11
|
-
tasks:
|
12
|
-
- name: The list of experimentation
|
13
|
-
debug: var=xps
|
14
|
-
|
15
|
-
- name: Making the results and archives directories
|
16
|
-
file: path=/results/archives state=directory
|
17
|
-
|
18
|
-
- name: Getting results from home and put them into /results/archives
|
19
|
-
command: "cp /home/{{ item }}.tar.gz /results/archives/"
|
20
|
-
args:
|
21
|
-
creates: "/results/archives/{{ item }}.tar.gz"
|
22
|
-
with_items: "{{ xps }}"
|
23
|
-
|
24
|
-
- name: Extracting experimentation results
|
25
|
-
command: "tar -C /results -xzf /results/archives/{{ item }}.tar.gz"
|
26
|
-
args:
|
27
|
-
creates: "/results/{{ item }}"
|
28
|
-
with_items: "{{ xps }}"
|
29
|
-
|
30
|
-
- name: Extracting everything
|
31
|
-
shell: "for i in $(ls /results/{{ item }}/*.tar.gz); do tar -C /results/{{ item }} -xzf $i; done"
|
32
|
-
with_items: "{{ xps }}"
|
33
|
-
|
34
|
-
# workaround the https://github.com/BeyondTheClouds/kolla-g5k/issues/44 for older results
|
35
|
-
# It will overwrite the logs from those in _data if this directory exists
|
36
|
-
- name: "Unnesting directory structure (workaround https://github.com/BeyondTheClouds/kolla-g5k/issues/44) "
|
37
|
-
shell: "cd /results/{{ item }}/tmp/kolla-logs; [ -d _data ] && cp -r _data/* . || echo 0"
|
38
|
-
with_items: "{{ xps }}"
|
39
|
-
|
40
|
-
- name: Extract haproxy logs from to one who holds it
|
41
|
-
shell: "cd /results/{{ item }}; for i in $(ls *.tar.gz); do tar -tvzf $i | grep haproxy.log | awk -v x=$i '$3 > 0 {print x}'; done | xargs -n 1 tar -xvzf"
|
42
|
-
with_items: "{{ xps }}"
|
43
|
-
|
44
|
-
- name: Fixing permissions
|
45
|
-
command: chmod 755 -R /results
|
46
|
-
|
47
|
-
- name: Starting influx container
|
48
|
-
docker:
|
49
|
-
name: "influx_{{ item.1 }}"
|
50
|
-
detach: yes
|
51
|
-
image: "tutum/influxdb:0.13"
|
52
|
-
state: started
|
53
|
-
restart_policy: always
|
54
|
-
ports:
|
55
|
-
- "{{ 8083 + item.0 * 100 | int }}:8083"
|
56
|
-
- "{{ 18086 + item.0 * 100 | int }}:8086"
|
57
|
-
volumes: "/results/{{ item.1 }}/influx-data:/data"
|
58
|
-
with_indexed_items: "{{ xps }}"
|
59
|
-
|
60
|
-
- name: Removing previous grafana container
|
61
|
-
docker:
|
62
|
-
image: "grafana/grafana:3.1.0"
|
63
|
-
name: "grafana"
|
64
|
-
state: absent
|
65
|
-
|
66
|
-
- name: Starting grafana container
|
67
|
-
docker:
|
68
|
-
detach: yes
|
69
|
-
image: "grafana/grafana:3.1.0"
|
70
|
-
name: "grafana"
|
71
|
-
ports: "3000:3000"
|
72
|
-
state: started
|
73
|
-
restart_policy: always
|
74
|
-
|
75
|
-
|
76
|
-
- name: Waiting for the grafana service to become available
|
77
|
-
wait_for:
|
78
|
-
# we use the bridge interface of docker
|
79
|
-
# instead of localhost
|
80
|
-
# api is accessible on loopback too quickly and isn't ready
|
81
|
-
host: "172.17.0.1"
|
82
|
-
port: 3000
|
83
|
-
state: started
|
84
|
-
delay: 2
|
85
|
-
timeout: 120
|
86
|
-
|
87
|
-
- name: Installing httplib2 library. Required by uri module
|
88
|
-
pip: name=httplib2
|
89
|
-
|
90
|
-
- name: Add the influx cadvisor data source
|
91
|
-
uri:
|
92
|
-
url: "http://localhost:3000/api/datasources"
|
93
|
-
user: admin
|
94
|
-
password: admin
|
95
|
-
force_basic_auth: yes
|
96
|
-
body_format: json
|
97
|
-
HEADER_Content-Type: application/json
|
98
|
-
method: POST
|
99
|
-
# we workaround this issue :
|
100
|
-
# https://github.com/ansible/ansible-modules-core/issues/265
|
101
|
-
# by adding an empty space at the beginning of the json ...
|
102
|
-
body: " { \"name\": \"cadvisor-{{ item.1 }}\", \"type\": \"influxdb\", \"url\": \"http://172.17.0.1:{{ 18086 + item.0 * 100 }}\", \"access\": \"proxy\", \"database\": \"cadvisor\", \"user\": \"root\", \"password\": \"root\", \"isDefault\": true }"
|
103
|
-
status_code: 200,500
|
104
|
-
with_indexed_items: "{{ xps }}"
|
105
|
-
|
106
|
-
- name: Add the influx collectd data source
|
107
|
-
uri:
|
108
|
-
url: "http://localhost:3000/api/datasources"
|
109
|
-
user: admin
|
110
|
-
password: admin
|
111
|
-
force_basic_auth: yes
|
112
|
-
body_format: json
|
113
|
-
HEADER_Content-Type: application/json
|
114
|
-
method: POST
|
115
|
-
# we workaround this issue :
|
116
|
-
# https://github.com/ansible/ansible-modules-core/issues/265
|
117
|
-
# by adding an empty space at the beginning of the json ...
|
118
|
-
body: " { \"name\": \"collectd-{{ item.1 }}\", \"type\": \"influxdb\", \"url\": \"http://172.17.0.1:{{ 18086 + item.0 * 100 }}\", \"access\": \"proxy\", \"database\": \"collectd\", \"user\": \"root\", \"password\": \"root\", \"isDefault\": true }"
|
119
|
-
return_content: yes
|
120
|
-
status_code: 200
|
121
|
-
with_indexed_items: "{{ xps }}"
|
122
|
-
|
123
|
-
- name: Coping the nginx configuration file
|
124
|
-
copy: src="files/nginx.conf" dest="/nginx.conf"
|
125
|
-
|
126
|
-
- name: Removing previous nginx server container
|
127
|
-
docker:
|
128
|
-
image: "nginx:alpine"
|
129
|
-
name: "nginx"
|
130
|
-
state: absent
|
131
|
-
|
132
|
-
- name: Starting the nginx server container
|
133
|
-
docker:
|
134
|
-
detach: yes
|
135
|
-
image: "nginx:alpine"
|
136
|
-
name: "nginx"
|
137
|
-
ports: "80:80"
|
138
|
-
restart_policy: always
|
139
|
-
volumes:
|
140
|
-
- "/results:/usr/share/nginx/html"
|
141
|
-
- "/nginx.conf:/etc/nginx/nginx.conf"
|
142
|
-
state: started
|
143
|
-
|
144
|
-
- name: Add elasticsearch
|
145
|
-
docker:
|
146
|
-
detach: yes
|
147
|
-
image: elasticsearch
|
148
|
-
name: elasticsearch
|
149
|
-
ports:
|
150
|
-
- "9200:9200"
|
151
|
-
- "9300:9300"
|
152
|
-
|
153
|
-
- name: Waiting for elasticsearch to become available
|
154
|
-
wait_for:
|
155
|
-
# we use the bridge interface of docker
|
156
|
-
# instead of localhost
|
157
|
-
# api is accessible on loopback too quickly and isn't ready
|
158
|
-
host: "172.17.0.1"
|
159
|
-
port: 9200
|
160
|
-
state: started
|
161
|
-
delay: 2
|
162
|
-
timeout: 120
|
163
|
-
|
164
|
-
|
165
|
-
# We use the Type field to differentiate different xps
|
166
|
-
# Do not analyse it will ease the analyse through kibana
|
167
|
-
#
|
168
|
-
- name: Check if heka index exists
|
169
|
-
uri :
|
170
|
-
url: "http://localhost:9200/heka"
|
171
|
-
method: GET
|
172
|
-
body_format: json
|
173
|
-
status_code: 200,404
|
174
|
-
register: index
|
175
|
-
|
176
|
-
- debug: var=index
|
177
|
-
|
178
|
-
- name: Deleting old index
|
179
|
-
uri :
|
180
|
-
url: "http://localhost:9200/heka"
|
181
|
-
method: DELETE
|
182
|
-
body_format: json
|
183
|
-
when: index.status == 200
|
184
|
-
|
185
|
-
- name: Create the heka index in elasticsearch
|
186
|
-
uri :
|
187
|
-
url: "http://localhost:9200/heka"
|
188
|
-
method: PUT
|
189
|
-
|
190
|
-
# Ansible 1.9.x doesn't seem to be able to load a json
|
191
|
-
# and use it in the body of the request properly
|
192
|
-
# e.g with ansible 2:
|
193
|
-
# body: "{{ lookup('file', 'files/mapping.json') | from_json | to_json}}"
|
194
|
-
# body_format: json
|
195
|
-
- name: Do not analyse some fields in elasticsearch (see mapping.json)
|
196
|
-
uri :
|
197
|
-
url: "http://localhost:9200/heka/_mapping/message"
|
198
|
-
method: PUT
|
199
|
-
body: " { \"properties\": {\"{{ item }}\": {\"type\":\"string\", \"index\": \"not_analyzed\"}}}"
|
200
|
-
body_format: json
|
201
|
-
with_items: ["Type", "request_id", "tenant_id", "user_id", "programname"]
|
202
|
-
|
203
|
-
- name: Add kibana
|
204
|
-
docker:
|
205
|
-
detach: yes
|
206
|
-
image: kibana
|
207
|
-
name: kibana
|
208
|
-
links:
|
209
|
-
- "elasticsearch:elasticsearch"
|
210
|
-
ports:
|
211
|
-
- "5601:5601"
|
212
|
-
|
213
|
-
- name: Create heka configuration directory
|
214
|
-
file: path=/etc/heka state=directory
|
215
|
-
|
216
|
-
- name: Copying heka config files
|
217
|
-
copy: src=files/heka/ dest=/etc/heka/{{ item }}
|
218
|
-
with_items: "{{ xps }}"
|
219
|
-
|
220
|
-
- name: Generating heka specific configuration
|
221
|
-
template: src=templates/heka-{{ item[0] }}.toml.j2 dest=/etc/heka/{{ item[1] }}/heka-{{ item[0] }}.toml
|
222
|
-
with_nested:
|
223
|
-
- ["elasticsearch", "openstack", "keystone", "mariadb", "rabbitmq"]
|
224
|
-
- "{{ xps }}"
|
225
|
-
|
226
|
-
- name: Add kolla/heka
|
227
|
-
docker:
|
228
|
-
command: kolla_start
|
229
|
-
detach: yes
|
230
|
-
image: kolla/centos-binary-heka:2.0.2
|
231
|
-
name: "heka-{{ item }}"
|
232
|
-
links:
|
233
|
-
- "elasticsearch:elasticsearch"
|
234
|
-
volumes:
|
235
|
-
- "/etc/heka/{{ item }}:/var/lib/kolla/config_files"
|
236
|
-
- "/etc/localtime:/etc/localtime:ro"
|
237
|
-
- "/results/{{ item }}/tmp/kolla-logs:/var/log/kolla"
|
238
|
-
- "heka-{{ item }}:/var/cache/hekad"
|
239
|
-
- "heka_socket-{{ item }}:/var/lib/kolla/heka"
|
240
|
-
# patch to support custom types
|
241
|
-
- "/etc/heka/{{ item }}/lua_decoders/os_openstack_log.lua:/usr/share/heka/lua_decoders/os_openstack_log.lua"
|
242
|
-
- "/etc/heka/{{ item }}/lua_decoders/os_keystone_apache_log.lua:/usr/share/heka/lua_decoders/os_keystone_apache_log.lua"
|
243
|
-
- "/etc/heka/{{ item }}/lua_decoders/os_mysql_log.lua:/usr/share/heka/lua_decoders/os_mysql_log.lua"
|
244
|
-
- "/etc/heka/{{ item }}/lua_decoders/os_rabbitmq_log.lua:/usr/share/heka/lua_decoders/os_rabbitmq_log.lua"
|
245
|
-
env:
|
246
|
-
SKIP_LOG_SETUP: true
|
247
|
-
KOLLA_CONFIG_STRATEGY: COPY_ALWAYS
|
248
|
-
with_items: "{{ xps }}"
|