wakame-vdc-agents 10.12.0 → 11.06.0
Sign up to get free protection for your applications and to get access to all the features.
- data/LICENSE +164 -201
- data/Rakefile +6 -11
- data/bin/hva +11 -1351
- data/bin/nsa +5 -9
- data/bin/sta +124 -71
- data/config/hva.conf.example +12 -0
- data/config/initializers/isono.rb +7 -23
- data/config/initializers/sequel.rb +11 -2
- data/lib/dcmgr.rb +70 -11
- data/lib/dcmgr/cli/base.rb +74 -0
- data/lib/dcmgr/cli/errors.rb +59 -0
- data/lib/dcmgr/cli/group.rb +101 -0
- data/lib/dcmgr/cli/host.rb +101 -0
- data/lib/dcmgr/cli/image.rb +108 -0
- data/lib/dcmgr/cli/keypair.rb +72 -0
- data/lib/dcmgr/cli/network.rb +198 -0
- data/lib/dcmgr/cli/quota.rb +28 -0
- data/lib/dcmgr/cli/spec.rb +82 -0
- data/lib/dcmgr/cli/storage.rb +88 -0
- data/lib/dcmgr/cli/tag.rb +81 -0
- data/lib/dcmgr/cli/vlan.rb +53 -0
- data/lib/dcmgr/drivers/hypervisor.rb +33 -0
- data/lib/dcmgr/drivers/iijgio_storage.rb +37 -0
- data/lib/dcmgr/drivers/kvm.rb +118 -0
- data/lib/dcmgr/drivers/lxc.rb +167 -0
- data/lib/dcmgr/drivers/s3_storage.rb +39 -0
- data/lib/dcmgr/drivers/snapshot_storage.rb +51 -0
- data/lib/dcmgr/endpoints/core_api.rb +188 -324
- data/lib/dcmgr/endpoints/core_api_mock.rb +52 -3
- data/lib/dcmgr/endpoints/errors.rb +73 -32
- data/lib/dcmgr/endpoints/metadata.rb +163 -16
- data/lib/dcmgr/helpers/cli_helper.rb +1 -1
- data/lib/dcmgr/helpers/nic_helper.rb +35 -0
- data/lib/dcmgr/logger.rb +5 -1
- data/lib/dcmgr/messaging_client.rb +117 -0
- data/lib/dcmgr/models/account.rb +27 -3
- data/lib/dcmgr/models/base_new.rb +21 -7
- data/lib/dcmgr/models/host_pool.rb +27 -7
- data/lib/dcmgr/models/image.rb +31 -3
- data/lib/dcmgr/models/instance.rb +72 -23
- data/lib/dcmgr/models/instance_nic.rb +12 -2
- data/lib/dcmgr/models/instance_spec.rb +16 -0
- data/lib/dcmgr/models/ip_lease.rb +37 -1
- data/lib/dcmgr/models/netfilter_group.rb +7 -7
- data/lib/dcmgr/models/network.rb +42 -3
- data/lib/dcmgr/models/quota.rb +25 -0
- data/lib/dcmgr/models/request_log.rb +26 -11
- data/lib/dcmgr/models/ssh_key_pair.rb +14 -1
- data/lib/dcmgr/models/storage_pool.rb +19 -72
- data/lib/dcmgr/models/tag.rb +5 -0
- data/lib/dcmgr/models/vlan_lease.rb +8 -0
- data/lib/dcmgr/models/volume.rb +26 -8
- data/lib/dcmgr/models/volume_snapshot.rb +37 -0
- data/lib/dcmgr/node_modules/hva_collector.rb +56 -36
- data/lib/dcmgr/node_modules/instance_ha.rb +1 -1
- data/lib/dcmgr/node_modules/instance_monitor.rb +70 -0
- data/lib/dcmgr/node_modules/service_netfilter.rb +914 -0
- data/lib/dcmgr/node_modules/sta_collector.rb +7 -30
- data/lib/dcmgr/rack/request_logger.rb +60 -0
- data/lib/dcmgr/rack/run_initializer.rb +42 -0
- data/lib/dcmgr/rpc/hva_handler.rb +388 -0
- data/lib/dcmgr/rubygems.rb +7 -0
- data/lib/dcmgr/storage_service.rb +98 -0
- data/lib/dcmgr/tags.rb +2 -2
- data/lib/dcmgr/version.rb +8 -0
- data/lib/ext/time.rb +8 -0
- data/lib/sinatra/respond_to.rb +3 -0
- data/lib/sinatra/sequel_transaction.rb +20 -5
- metadata +133 -100
- data/lib/dcmgr/models/physical_host.rb +0 -67
- data/lib/dcmgr/web/base.rb +0 -21
data/LICENSE
CHANGED
@@ -1,202 +1,165 @@
|
|
1
|
+
GNU LESSER GENERAL PUBLIC LICENSE
|
2
|
+
Version 3, 29 June 2007
|
1
3
|
|
2
|
-
|
3
|
-
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
has been advised of the possibility of such damages.
|
165
|
-
|
166
|
-
9. Accepting Warranty or Additional Liability. While redistributing
|
167
|
-
the Work or Derivative Works thereof, You may choose to offer,
|
168
|
-
and charge a fee for, acceptance of support, warranty, indemnity,
|
169
|
-
or other liability obligations and/or rights consistent with this
|
170
|
-
License. However, in accepting such obligations, You may act only
|
171
|
-
on Your own behalf and on Your sole responsibility, not on behalf
|
172
|
-
of any other Contributor, and only if You agree to indemnify,
|
173
|
-
defend, and hold each Contributor harmless for any liability
|
174
|
-
incurred by, or claims asserted against, such Contributor by reason
|
175
|
-
of your accepting any such warranty or additional liability.
|
176
|
-
|
177
|
-
END OF TERMS AND CONDITIONS
|
178
|
-
|
179
|
-
APPENDIX: How to apply the Apache License to your work.
|
180
|
-
|
181
|
-
To apply the Apache License to your work, attach the following
|
182
|
-
boilerplate notice, with the fields enclosed by brackets "[]"
|
183
|
-
replaced with your own identifying information. (Don't include
|
184
|
-
the brackets!) The text should be enclosed in the appropriate
|
185
|
-
comment syntax for the file format. We also recommend that a
|
186
|
-
file or class name and description of purpose be included on the
|
187
|
-
same "printed page" as the copyright notice for easier
|
188
|
-
identification within third-party archives.
|
189
|
-
|
190
|
-
Copyright [yyyy] [name of copyright owner]
|
191
|
-
|
192
|
-
Licensed under the Apache License, Version 2.0 (the "License");
|
193
|
-
you may not use this file except in compliance with the License.
|
194
|
-
You may obtain a copy of the License at
|
195
|
-
|
196
|
-
http://www.apache.org/licenses/LICENSE-2.0
|
197
|
-
|
198
|
-
Unless required by applicable law or agreed to in writing, software
|
199
|
-
distributed under the License is distributed on an "AS IS" BASIS,
|
200
|
-
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
201
|
-
See the License for the specific language governing permissions and
|
202
|
-
limitations under the License.
|
4
|
+
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
5
|
+
Everyone is permitted to copy and distribute verbatim copies
|
6
|
+
of this license document, but changing it is not allowed.
|
7
|
+
|
8
|
+
|
9
|
+
This version of the GNU Lesser General Public License incorporates
|
10
|
+
the terms and conditions of version 3 of the GNU General Public
|
11
|
+
License, supplemented by the additional permissions listed below.
|
12
|
+
|
13
|
+
0. Additional Definitions.
|
14
|
+
|
15
|
+
As used herein, "this License" refers to version 3 of the GNU Lesser
|
16
|
+
General Public License, and the "GNU GPL" refers to version 3 of the GNU
|
17
|
+
General Public License.
|
18
|
+
|
19
|
+
"The Library" refers to a covered work governed by this License,
|
20
|
+
other than an Application or a Combined Work as defined below.
|
21
|
+
|
22
|
+
An "Application" is any work that makes use of an interface provided
|
23
|
+
by the Library, but which is not otherwise based on the Library.
|
24
|
+
Defining a subclass of a class defined by the Library is deemed a mode
|
25
|
+
of using an interface provided by the Library.
|
26
|
+
|
27
|
+
A "Combined Work" is a work produced by combining or linking an
|
28
|
+
Application with the Library. The particular version of the Library
|
29
|
+
with which the Combined Work was made is also called the "Linked
|
30
|
+
Version".
|
31
|
+
|
32
|
+
The "Minimal Corresponding Source" for a Combined Work means the
|
33
|
+
Corresponding Source for the Combined Work, excluding any source code
|
34
|
+
for portions of the Combined Work that, considered in isolation, are
|
35
|
+
based on the Application, and not on the Linked Version.
|
36
|
+
|
37
|
+
The "Corresponding Application Code" for a Combined Work means the
|
38
|
+
object code and/or source code for the Application, including any data
|
39
|
+
and utility programs needed for reproducing the Combined Work from the
|
40
|
+
Application, but excluding the System Libraries of the Combined Work.
|
41
|
+
|
42
|
+
1. Exception to Section 3 of the GNU GPL.
|
43
|
+
|
44
|
+
You may convey a covered work under sections 3 and 4 of this License
|
45
|
+
without being bound by section 3 of the GNU GPL.
|
46
|
+
|
47
|
+
2. Conveying Modified Versions.
|
48
|
+
|
49
|
+
If you modify a copy of the Library, and, in your modifications, a
|
50
|
+
facility refers to a function or data to be supplied by an Application
|
51
|
+
that uses the facility (other than as an argument passed when the
|
52
|
+
facility is invoked), then you may convey a copy of the modified
|
53
|
+
version:
|
54
|
+
|
55
|
+
a) under this License, provided that you make a good faith effort to
|
56
|
+
ensure that, in the event an Application does not supply the
|
57
|
+
function or data, the facility still operates, and performs
|
58
|
+
whatever part of its purpose remains meaningful, or
|
59
|
+
|
60
|
+
b) under the GNU GPL, with none of the additional permissions of
|
61
|
+
this License applicable to that copy.
|
62
|
+
|
63
|
+
3. Object Code Incorporating Material from Library Header Files.
|
64
|
+
|
65
|
+
The object code form of an Application may incorporate material from
|
66
|
+
a header file that is part of the Library. You may convey such object
|
67
|
+
code under terms of your choice, provided that, if the incorporated
|
68
|
+
material is not limited to numerical parameters, data structure
|
69
|
+
layouts and accessors, or small macros, inline functions and templates
|
70
|
+
(ten or fewer lines in length), you do both of the following:
|
71
|
+
|
72
|
+
a) Give prominent notice with each copy of the object code that the
|
73
|
+
Library is used in it and that the Library and its use are
|
74
|
+
covered by this License.
|
75
|
+
|
76
|
+
b) Accompany the object code with a copy of the GNU GPL and this license
|
77
|
+
document.
|
78
|
+
|
79
|
+
4. Combined Works.
|
80
|
+
|
81
|
+
You may convey a Combined Work under terms of your choice that,
|
82
|
+
taken together, effectively do not restrict modification of the
|
83
|
+
portions of the Library contained in the Combined Work and reverse
|
84
|
+
engineering for debugging such modifications, if you also do each of
|
85
|
+
the following:
|
86
|
+
|
87
|
+
a) Give prominent notice with each copy of the Combined Work that
|
88
|
+
the Library is used in it and that the Library and its use are
|
89
|
+
covered by this License.
|
90
|
+
|
91
|
+
b) Accompany the Combined Work with a copy of the GNU GPL and this license
|
92
|
+
document.
|
93
|
+
|
94
|
+
c) For a Combined Work that displays copyright notices during
|
95
|
+
execution, include the copyright notice for the Library among
|
96
|
+
these notices, as well as a reference directing the user to the
|
97
|
+
copies of the GNU GPL and this license document.
|
98
|
+
|
99
|
+
d) Do one of the following:
|
100
|
+
|
101
|
+
0) Convey the Minimal Corresponding Source under the terms of this
|
102
|
+
License, and the Corresponding Application Code in a form
|
103
|
+
suitable for, and under terms that permit, the user to
|
104
|
+
recombine or relink the Application with a modified version of
|
105
|
+
the Linked Version to produce a modified Combined Work, in the
|
106
|
+
manner specified by section 6 of the GNU GPL for conveying
|
107
|
+
Corresponding Source.
|
108
|
+
|
109
|
+
1) Use a suitable shared library mechanism for linking with the
|
110
|
+
Library. A suitable mechanism is one that (a) uses at run time
|
111
|
+
a copy of the Library already present on the user's computer
|
112
|
+
system, and (b) will operate properly with a modified version
|
113
|
+
of the Library that is interface-compatible with the Linked
|
114
|
+
Version.
|
115
|
+
|
116
|
+
e) Provide Installation Information, but only if you would otherwise
|
117
|
+
be required to provide such information under section 6 of the
|
118
|
+
GNU GPL, and only to the extent that such information is
|
119
|
+
necessary to install and execute a modified version of the
|
120
|
+
Combined Work produced by recombining or relinking the
|
121
|
+
Application with a modified version of the Linked Version. (If
|
122
|
+
you use option 4d0, the Installation Information must accompany
|
123
|
+
the Minimal Corresponding Source and Corresponding Application
|
124
|
+
Code. If you use option 4d1, you must provide the Installation
|
125
|
+
Information in the manner specified by section 6 of the GNU GPL
|
126
|
+
for conveying Corresponding Source.)
|
127
|
+
|
128
|
+
5. Combined Libraries.
|
129
|
+
|
130
|
+
You may place library facilities that are a work based on the
|
131
|
+
Library side by side in a single library together with other library
|
132
|
+
facilities that are not Applications and are not covered by this
|
133
|
+
License, and convey such a combined library under terms of your
|
134
|
+
choice, if you do both of the following:
|
135
|
+
|
136
|
+
a) Accompany the combined library with a copy of the same work based
|
137
|
+
on the Library, uncombined with any other library facilities,
|
138
|
+
conveyed under the terms of this License.
|
139
|
+
|
140
|
+
b) Give prominent notice with the combined library that part of it
|
141
|
+
is a work based on the Library, and explaining where to find the
|
142
|
+
accompanying uncombined form of the same work.
|
143
|
+
|
144
|
+
6. Revised Versions of the GNU Lesser General Public License.
|
145
|
+
|
146
|
+
The Free Software Foundation may publish revised and/or new versions
|
147
|
+
of the GNU Lesser General Public License from time to time. Such new
|
148
|
+
versions will be similar in spirit to the present version, but may
|
149
|
+
differ in detail to address new problems or concerns.
|
150
|
+
|
151
|
+
Each version is given a distinguishing version number. If the
|
152
|
+
Library as you received it specifies that a certain numbered version
|
153
|
+
of the GNU Lesser General Public License "or any later version"
|
154
|
+
applies to it, you have the option of following the terms and
|
155
|
+
conditions either of that published version or of any later version
|
156
|
+
published by the Free Software Foundation. If the Library as you
|
157
|
+
received it does not specify a version number of the GNU Lesser
|
158
|
+
General Public License, you may choose any version of the GNU Lesser
|
159
|
+
General Public License ever published by the Free Software Foundation.
|
160
|
+
|
161
|
+
If the Library as you received it specifies that a proxy can decide
|
162
|
+
whether future versions of the GNU Lesser General Public License shall
|
163
|
+
apply, that proxy's public statement of acceptance of any version is
|
164
|
+
permanent authorization for you to choose that version for the
|
165
|
+
Library.
|
data/Rakefile
CHANGED
@@ -1,12 +1,9 @@
|
|
1
1
|
# -*- coding: utf-8 -*-
|
2
2
|
|
3
|
-
begin
|
4
|
-
require 'rubygems'
|
5
|
-
require 'bundler/setup'
|
6
|
-
rescue LoadError
|
7
|
-
end
|
8
3
|
$:.unshift 'lib'
|
9
4
|
|
5
|
+
require 'dcmgr/rubygems'
|
6
|
+
|
10
7
|
require 'rake/clean'
|
11
8
|
require 'dcmgr'
|
12
9
|
|
@@ -83,8 +80,7 @@ task :gem do
|
|
83
80
|
s.bindir='bin'
|
84
81
|
s.executables = %w(collector)
|
85
82
|
|
86
|
-
s.add_dependency "isono", "
|
87
|
-
s.add_dependency "eventmachine", "0.12.10"
|
83
|
+
s.add_dependency "isono", "0.2.3"
|
88
84
|
s.add_dependency "log4r"
|
89
85
|
s.add_dependency "extlib", '0.9.15'
|
90
86
|
s.add_dependency "configuration"
|
@@ -92,9 +88,9 @@ task :gem do
|
|
92
88
|
s.add_dependency "ruby-hmac"
|
93
89
|
s.add_dependency "ipaddress", '0.7.0'
|
94
90
|
s.add_dependency "rack", ">= 1.2.1"
|
95
|
-
s.add_dependency "sinatra", "1.
|
91
|
+
s.add_dependency "sinatra", "1.2.1"
|
96
92
|
s.add_dependency "json", ">= 1.2.0"
|
97
|
-
s.add_dependency "sequel", "3.
|
93
|
+
s.add_dependency "sequel", "3.21.0"
|
98
94
|
s.add_dependency "mysql", ">= 2.8.1"
|
99
95
|
|
100
96
|
s.add_development_dependency 'bacon'
|
@@ -123,8 +119,7 @@ task :gem do
|
|
123
119
|
s.bindir='bin'
|
124
120
|
s.executables = %w(hva sta nsa)
|
125
121
|
|
126
|
-
s.add_dependency "isono", "
|
127
|
-
s.add_dependency "eventmachine", "0.12.10"
|
122
|
+
s.add_dependency "isono", "0.2.3"
|
128
123
|
s.add_dependency "log4r"
|
129
124
|
s.add_dependency "extlib", '0.9.15'
|
130
125
|
s.add_dependency "configuration"
|
data/bin/hva
CHANGED
@@ -1,1363 +1,21 @@
|
|
1
1
|
#!/usr/bin/env ruby
|
2
2
|
# -*- coding: utf-8 -*-
|
3
3
|
|
4
|
-
|
5
|
-
require 'rubygems'
|
6
|
-
require 'bundler'
|
7
|
-
Bundler.setup(:default)
|
8
|
-
rescue Exception
|
9
|
-
end
|
4
|
+
$LOAD_PATH.unshift File.expand_path('../../lib', __FILE__)
|
10
5
|
|
11
|
-
require
|
6
|
+
require 'dcmgr/rubygems'
|
7
|
+
require 'dcmgr'
|
8
|
+
require 'isono'
|
12
9
|
|
13
10
|
include Isono::Runner::RpcServer
|
14
|
-
require 'fileutils'
|
15
|
-
require 'ipaddress'
|
16
|
-
|
17
|
-
class ServiceNetfilter < Isono::NodeModules::Base
|
18
|
-
include Dcmgr::Logger
|
19
|
-
|
20
|
-
initialize_hook do
|
21
|
-
@worker_thread = Isono::ThreadPool.new(1)
|
22
|
-
|
23
|
-
@worker_thread.pass {
|
24
|
-
myinstance.init_netfilter
|
25
|
-
}
|
26
|
-
|
27
|
-
event = Isono::NodeModules::EventChannel.new(node)
|
28
|
-
|
29
|
-
event.subscribe('hva/instance_started', '#') do |args|
|
30
|
-
@worker_thread.pass {
|
31
|
-
logger.info("refresh on instance_started: #{args.inspect}")
|
32
|
-
inst_id = args[0]
|
33
|
-
#logger.info("refresh_netfilter_by_friend_instance_id: #{inst_id}")
|
34
|
-
#myinstance.refresh_netfilter_by_friend_instance_id(inst_id, 'started')
|
35
|
-
myinstance.init_netfilter
|
36
|
-
}
|
37
|
-
end
|
38
|
-
|
39
|
-
event.subscribe('hva/instance_terminated', '#') do |args|
|
40
|
-
@worker_thread.pass {
|
41
|
-
logger.info("refresh on instance_terminated: #{args.inspect}")
|
42
|
-
inst_id = args[0]
|
43
|
-
#logger.info("refresh_netfilter_by_friend_instance_id: #{inst_id}")
|
44
|
-
#myinstance.refresh_netfilter_by_friend_instance_id(inst_id, 'terminated')
|
45
|
-
myinstance.init_netfilter
|
46
|
-
}
|
47
|
-
end
|
48
|
-
|
49
|
-
event.subscribe('hva/netfilter_updated', '#') do |args|
|
50
|
-
@worker_thread.pass {
|
51
|
-
logger.info("refresh on netfilter_updated: #{args.inspect}")
|
52
|
-
netfilter_group_id = args[0]
|
53
|
-
#myinstance.refresh_netfilter_by_joined_netfilter_group_id(netfilter_group_id)
|
54
|
-
myinstance.init_netfilter
|
55
|
-
}
|
56
|
-
end
|
57
|
-
end
|
58
|
-
|
59
|
-
def init_netfilter
|
60
|
-
begin
|
61
|
-
inst_maps = rpc.request('hva-collector', 'get_alive_instances', node.node_id)
|
62
|
-
|
63
|
-
viftable_map = {}
|
64
|
-
inst_maps = inst_maps.map { |inst_map|
|
65
|
-
viftable_map[ inst_map[:ips].first ] = inst_map[:instance_nics].first[:uuid]
|
66
|
-
|
67
|
-
# Does the hva have instance?
|
68
|
-
unless inst_map[:host_pool][:node_id] == node.node_id
|
69
|
-
logger.warn("no match for the instance: #{inst_map[:uuid]}")
|
70
|
-
next
|
71
|
-
end
|
72
|
-
# Does host have vif?
|
73
|
-
next unless valid_vif?(inst_map[:instance_nics].first[:uuid])
|
74
|
-
inst_maps
|
75
|
-
}.flatten.uniq.compact
|
76
|
-
|
77
|
-
init_iptables(inst_maps) if @node.manifest.config.enable_iptables
|
78
|
-
init_ebtables(inst_maps, viftable_map) if @node.manifest.config.enable_ebtables
|
79
|
-
logger.info("initialize netfilter")
|
80
|
-
rescue Exception => e
|
81
|
-
p e
|
82
|
-
end
|
83
|
-
end
|
84
|
-
|
85
|
-
# from event_subscriber
|
86
|
-
# def refresh_netfilter_by_friend_instance_id(inst_id, state = 'started')
|
87
|
-
# raise "UnknownInstanceID" if inst_id.nil?
|
88
|
-
#
|
89
|
-
# begin
|
90
|
-
# inst_map = rpc.request('hva-collector', 'get_instance', inst_id)
|
91
|
-
# ng = rpc.request('hva-collector', 'get_netfilter_groups_of_instance', inst_map[:uuid])
|
92
|
-
#
|
93
|
-
# inst_maps = ng.map { |g|
|
94
|
-
# rpc.request('hva-collector', 'get_instances_of_netfilter_group', g[:id])
|
95
|
-
# }
|
96
|
-
#
|
97
|
-
# # my instance_id
|
98
|
-
# # when terminated?
|
99
|
-
# if state == 'terminated'
|
100
|
-
# unless inst_map.nil?
|
101
|
-
# refresh_iptables(inst_map) if @node.manifest.config.enable_iptables
|
102
|
-
# refresh_ebtables(inst_map) if @node.manifest.config.enable_ebtables
|
103
|
-
# end
|
104
|
-
# end
|
105
|
-
#
|
106
|
-
# # friend instance(s)
|
107
|
-
# if inst_maps.size > 0
|
108
|
-
# inst_maps.flatten.uniq.each { |inst_map|
|
109
|
-
# unless inst_map.nil?
|
110
|
-
# refresh_iptables(inst_map) if @node.manifest.config.enable_iptables
|
111
|
-
# refresh_ebtables(inst_map) if @node.manifest.config.enable_ebtables
|
112
|
-
# end
|
113
|
-
# }
|
114
|
-
# end
|
115
|
-
# rescue Exception => e
|
116
|
-
# p e
|
117
|
-
# end
|
118
|
-
# end
|
119
|
-
|
120
|
-
# from event_subscriber
|
121
|
-
# def refresh_netfilter_by_joined_netfilter_group_id(netfilter_group_id)
|
122
|
-
# raise "UnknownNetfilterGroupID" if netfilter_group_id.nil?
|
123
|
-
#
|
124
|
-
# begin
|
125
|
-
# inst_maps = rpc.request('hva-collector', 'get_instances_of_netfilter_group', netfilter_group_id)
|
126
|
-
# inst_maps.each { |inst_map|
|
127
|
-
# unless inst_map.nil?
|
128
|
-
# refresh_iptables(inst_map) if @node.manifest.config.enable_iptables
|
129
|
-
# refresh_ebtables(inst_map) if @node.manifest.config.enable_ebtables
|
130
|
-
# end
|
131
|
-
# }
|
132
|
-
# rescue Exception => e
|
133
|
-
# p e
|
134
|
-
# end
|
135
|
-
# end
|
136
|
-
|
137
|
-
def init_ebtables(inst_maps = [], viftable_map = {})
|
138
|
-
cmd = "ebtables --init-table"
|
139
|
-
puts cmd
|
140
|
-
system(cmd)
|
141
|
-
|
142
|
-
basic_cmds = []
|
143
|
-
group_cmds = []
|
144
|
-
final_cmds = []
|
145
|
-
|
146
|
-
inst_maps.each { |inst_map|
|
147
|
-
vif_map = {
|
148
|
-
:uuid => inst_map[:instance_nics].first[:uuid],
|
149
|
-
:mac => inst_map[:instance_nics].first[:mac_addr].unpack('A2'*6).join(':'),
|
150
|
-
:ipv4 => inst_map[:ips].first,
|
151
|
-
}
|
152
|
-
|
153
|
-
basic_cmds << build_ebtables_basic_part(vif_map, inst_map)
|
154
|
-
group_cmds << build_ebtables_group_part(vif_map, inst_map, viftable_map)
|
155
|
-
final_cmds << build_ebtables_final_part(vif_map)
|
156
|
-
}
|
157
|
-
|
158
|
-
viftable_map.each { |k,v|
|
159
|
-
p "#{v} <-> #{k}"
|
160
|
-
}
|
161
|
-
|
162
|
-
logger.debug("basic_cmds ...")
|
163
|
-
basic_cmds.flatten.uniq.each { |cmd|
|
164
|
-
system(cmd)
|
165
|
-
}
|
166
|
-
|
167
|
-
logger.debug("group_cmds ...")
|
168
|
-
group_cmds.flatten.uniq.each { |cmd|
|
169
|
-
system(cmd)
|
170
|
-
}
|
171
|
-
|
172
|
-
logger.debug("final_cmds ...")
|
173
|
-
final_cmds.flatten.uniq.each { |cmd|
|
174
|
-
system(cmd)
|
175
|
-
}
|
176
|
-
end
|
177
|
-
|
178
|
-
def init_iptables(inst_maps = [])
|
179
|
-
[ 'nat', 'filter' ].each { |table|
|
180
|
-
[ 'F', 'Z', 'X' ].each { |xcmd|
|
181
|
-
cmd = "iptables -t #{table} -#{xcmd}"
|
182
|
-
puts cmd
|
183
|
-
system(cmd)
|
184
|
-
}
|
185
|
-
}
|
186
|
-
|
187
|
-
inst_maps.each { |inst_map|
|
188
|
-
refresh_iptables(inst_map, false)
|
189
|
-
}
|
190
|
-
end
|
191
|
-
|
192
|
-
def valid_vif?(vif)
|
193
|
-
cmd = "ifconfig #{vif} >/dev/null 2>&1"
|
194
|
-
system(cmd)
|
195
|
-
|
196
|
-
if $?.exitstatus == 0
|
197
|
-
true
|
198
|
-
else
|
199
|
-
logger.warn("#{vif}: error fetching interface information: Device not found")
|
200
|
-
false
|
201
|
-
end
|
202
|
-
end
|
203
|
-
|
204
|
-
# def refresh_ebtables(inst_map = {}, viftable_map = {})
|
205
|
-
# logger.debug("refresh_ebtables: #{inst_map[:uuid]} ...")
|
206
|
-
#
|
207
|
-
# vif_map = {
|
208
|
-
# :uuid => inst_map[:instance_nics].first[:uuid],
|
209
|
-
# :mac => inst_map[:instance_nics].first[:mac_addr].unpack('A2'*6).join(':'),
|
210
|
-
# }
|
211
|
-
#
|
212
|
-
# # xtables commands
|
213
|
-
# basic_cmds = build_ebtables_basic_part(vif_map, inst_map)
|
214
|
-
# group_cmds = build_ebtables_group_part(vif_map, inst_map, viftable_map)
|
215
|
-
# final_cmds = build_ebtables_final_part(vif_map)
|
216
|
-
#
|
217
|
-
# logger.debug("refresh_ebtables: #{inst_map[:uuid]} done.")
|
218
|
-
# end
|
219
|
-
|
220
|
-
def build_ebtables_basic_part(vif_map, inst_map)
|
221
|
-
basic_cmds = []
|
222
|
-
hva_ipv4 = Isono::Util.default_gw_ipaddr
|
223
|
-
|
224
|
-
################################
|
225
|
-
## 0. chain name
|
226
|
-
################################
|
227
|
-
|
228
|
-
# support IP protocol
|
229
|
-
protocol_maps = {
|
230
|
-
'ip4' => 'ip4',
|
231
|
-
'arp' => 'arp',
|
232
|
-
#'ip6' => 'ip6',
|
233
|
-
#'rarp' => '0x8035',
|
234
|
-
}
|
235
|
-
|
236
|
-
# make chain names.
|
237
|
-
chains = []
|
238
|
-
chains << "s_#{vif_map[:uuid]}"
|
239
|
-
chains << "d_#{vif_map[:uuid]}"
|
240
|
-
chains << "s_#{vif_map[:uuid]}_d_hst"
|
241
|
-
chains << "d_#{vif_map[:uuid]}_s_hst"
|
242
|
-
protocol_maps.each { |k,v|
|
243
|
-
chains << "s_#{vif_map[:uuid]}_#{k}"
|
244
|
-
chains << "d_#{vif_map[:uuid]}_#{k}"
|
245
|
-
chains << "s_#{vif_map[:uuid]}_d_hst_#{k}"
|
246
|
-
chains << "d_#{vif_map[:uuid]}_s_hst_#{k}"
|
247
|
-
}
|
248
|
-
|
249
|
-
################################
|
250
|
-
## 1. basic part
|
251
|
-
################################
|
252
|
-
|
253
|
-
# create user defined chains.
|
254
|
-
[ 'N' ].each { |xcmd|
|
255
|
-
chains.each { |chain|
|
256
|
-
basic_cmds << "ebtables -#{xcmd} #{chain}"
|
257
|
-
}
|
258
|
-
}
|
259
|
-
|
260
|
-
# jumt to user defined chains
|
261
|
-
basic_cmds << "ebtables -A FORWARD -i #{vif_map[:uuid]} -j s_#{vif_map[:uuid]}"
|
262
|
-
basic_cmds << "ebtables -A FORWARD -o #{vif_map[:uuid]} -j d_#{vif_map[:uuid]}"
|
263
|
-
basic_cmds << "ebtables -A INPUT -i #{vif_map[:uuid]} -j s_#{vif_map[:uuid]}_d_hst"
|
264
|
-
basic_cmds << "ebtables -A OUTPUT -o #{vif_map[:uuid]} -j d_#{vif_map[:uuid]}_s_hst"
|
265
|
-
|
266
|
-
# IP protocol routing
|
267
|
-
protocol_maps.each { |k,v|
|
268
|
-
basic_cmds << "ebtables -A s_#{vif_map[:uuid]} -p #{v} -j s_#{vif_map[:uuid]}_#{k}"
|
269
|
-
basic_cmds << "ebtables -A d_#{vif_map[:uuid]} -p #{v} -j d_#{vif_map[:uuid]}_#{k}"
|
270
|
-
basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst -p #{v} -j s_#{vif_map[:uuid]}_d_hst_#{k}"
|
271
|
-
basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_s_hst -p #{v} -j d_#{vif_map[:uuid]}_s_hst_#{k}"
|
272
|
-
}
|
273
|
-
|
274
|
-
# default drop
|
275
|
-
basic_cmds << "ebtables -A s_#{vif_map[:uuid]} --log-level warning --log-ip --log-arp --log-prefix 'D s_#{vif_map[:uuid]}:' -j CONTINUE"
|
276
|
-
basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst --log-level warning --log-ip --log-arp --log-prefix 'D s_#{vif_map[:uuid]}_d_hst:' -j CONTINUE"
|
277
|
-
basic_cmds << "ebtables -A s_#{vif_map[:uuid]} -j DROP"
|
278
|
-
basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst -j DROP"
|
279
|
-
|
280
|
-
# anti spoof: mac
|
281
|
-
# guest -> *
|
282
|
-
basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_arp --protocol arp --arp-mac-src ! #{vif_map[:mac]} --log-ip --log-arp --log-prefix 'Dmc s_#{vif_map[:uuid]}_arp:' -j CONTINUE"
|
283
|
-
basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-mac-src ! #{vif_map[:mac]} --log-ip --log-arp --log-prefix 'Dmc s_#{vif_map[:uuid]}_d_hst_arp:' -j CONTINUE"
|
284
|
-
basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_arp --protocol arp --arp-mac-src ! #{vif_map[:mac]} -j DROP"
|
285
|
-
basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-mac-src ! #{vif_map[:mac]} -j DROP"
|
286
|
-
|
287
|
-
# guest <- * (broadcast)
|
288
|
-
basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-mac-dst 00:00:00:00:00:00 --log-ip --log-arp --log-prefix 'Amc d_#{vif_map[:uuid]}_arp:' -j CONTINUE"
|
289
|
-
basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_s_hst_arp --protocol arp --arp-ip-src=#{hva_ipv4} --arp-mac-dst 00:00:00:00:00:00 --log-ip --log-arp --log-prefix 'Amc d_#{vif_map[:uuid]}_hst_arp:' -j CONTINUE"
|
290
|
-
basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-mac-dst 00:00:00:00:00:00 -j ACCEPT"
|
291
|
-
basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_s_hst_arp --protocol arp --arp-ip-src=#{hva_ipv4} --arp-mac-dst 00:00:00:00:00:00 -j ACCEPT"
|
292
|
-
|
293
|
-
# guest <- *
|
294
|
-
basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-mac-dst ! #{vif_map[:mac]} --log-ip --log-arp --log-prefix 'Dmc d_#{vif_map[:uuid]}_arp:' -j CONTINUE"
|
295
|
-
basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_s_hst_arp --protocol arp --arp-mac-dst ! #{vif_map[:mac]} --log-ip --log-arp --log-prefix 'Dmc d_#{vif_map[:uuid]}_s_hst_arp:' -j CONTINUE"
|
296
|
-
basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-mac-dst ! #{vif_map[:mac]} -j DROP"
|
297
|
-
basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_s_hst_arp --protocol arp --arp-mac-dst ! #{vif_map[:mac]} -j DROP"
|
298
|
-
|
299
|
-
# anti spoof: ipv4
|
300
|
-
inst_map[:ips].each { |ipv4|
|
301
|
-
# guest -> *
|
302
|
-
basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-src ! #{ipv4} --log-ip --log-arp --log-prefix 'Dip s_#{vif_map[:uuid]}_arp:' -j CONTINUE"
|
303
|
-
basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-ip-src ! #{ipv4} --log-ip --log-arp --log-prefix 'Dip s_#{vif_map[:uuid]}_d_hst_arp:' -j CONTINUE"
|
304
|
-
basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-src ! #{ipv4} -j DROP"
|
305
|
-
basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-ip-src ! #{ipv4} -j DROP"
|
306
|
-
# guest <- *
|
307
|
-
basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-dst ! #{ipv4} --log-ip --log-arp --log-prefix 'Dip d_#{vif_map[:uuid]}_arp:' -j CONTINUE"
|
308
|
-
basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_s_hst_arp --protocol arp --arp-ip-dst ! #{ipv4} --log-ip --log-arp --log-prefix 'Dip d_#{vif_map[:uuid]}_s_hst_arp:' -j CONTINUE"
|
309
|
-
basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-dst ! #{ipv4} -j DROP"
|
310
|
-
basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_s_hst_arp --protocol arp --arp-ip-dst ! #{ipv4} -j DROP"
|
311
|
-
}
|
312
|
-
|
313
|
-
basic_cmds
|
314
|
-
end
|
315
|
-
|
316
|
-
|
317
|
-
def build_ebtables_group_part(vif_map, inst_map, viftable_map)
|
318
|
-
group_cmds = []
|
319
|
-
hva_ipv4 = Isono::Util.default_gw_ipaddr
|
320
|
-
|
321
|
-
################################
|
322
|
-
## 2. group part
|
323
|
-
################################
|
324
|
-
same_subnet_ipv4s = rpc.request('hva-collector', 'get_group_instance_ipv4s', inst_map[:uuid])
|
325
|
-
|
326
|
-
# detect node joined network(s).
|
327
|
-
network_map = rpc.request('hva-collector', 'get_network', inst_map[:instance_nics].first[:network_id])
|
328
|
-
raise "UnknownNetworkId" if network_map.nil?
|
329
|
-
joined_network = IPAddress("#{network_map[:ipv4_gw]}/#{network_map[:prefix]}")
|
330
|
-
[ network_map[:dns_server], network_map[:dhcp_server] ].each { |ipv4|
|
331
|
-
next unless joined_network.include? IPAddress(ipv4)
|
332
|
-
same_subnet_ipv4s << ipv4
|
333
|
-
}
|
334
|
-
|
335
|
-
# network resource node(s)
|
336
|
-
ng_maps = rpc.request('hva-collector', 'get_netfilter_groups_of_instance', inst_map[:uuid])
|
337
|
-
rules = ng_maps.map { |ng_map|
|
338
|
-
ng_map[:rules].map { |rule| rule[:permission] }
|
339
|
-
}.flatten
|
340
|
-
build_rule(rules).each do |rule|
|
341
|
-
next unless joined_network.include? IPAddress(rule[:ip_source])
|
342
|
-
same_subnet_ipv4s << rule[:ip_source]
|
343
|
-
end
|
344
|
-
same_subnet_ipv4s << network_map[:ipv4_gw]
|
345
|
-
|
346
|
-
# guest node(s) in HyperVisor.
|
347
|
-
alive_inst_maps = rpc.request('hva-collector', 'get_alive_instances', node.node_id)
|
348
|
-
guest_ipv4s = alive_inst_maps.map { |alive_inst_map|
|
349
|
-
alive_inst_map[:ips]
|
350
|
-
}.flatten.uniq.compact
|
351
|
-
|
352
|
-
same_subnet_ipv4s.uniq.reverse_each do |ipv4|
|
353
|
-
next if vif_map[:ipv4] == ipv4
|
354
|
-
|
355
|
-
# get_macaddr_by_ipv4, ipv4
|
356
|
-
if ipv4 == hva_ipv4
|
357
|
-
#p "#{vif_map[:uuid]}(#{vif_map[:ipv4]}) -> [host] ***-****** (#{ipv4})"
|
358
|
-
group_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-ip-src #{vif_map[:ipv4]} --arp-ip-dst #{ipv4} --log-ip --log-arp --log-prefix 'Afw s_#{vif_map[:uuid]}_d_hst_arp:' -j CONTINUE"
|
359
|
-
group_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-ip-src #{vif_map[:ipv4]} --arp-ip-dst #{ipv4} -j ACCEPT"
|
360
|
-
elsif guest_ipv4s.include?(ipv4)
|
361
|
-
#p "#{vif_map[:uuid]}(#{vif_map[:ipv4]}) -> [guest] #{viftable_map[ipv4]}(#{ipv4})"
|
362
|
-
|
363
|
-
# guest->guest
|
364
|
-
group_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-src #{ipv4} --arp-ip-dst #{vif_map[:ipv4]} --log-ip --log-arp --log-prefix 'Afw d_#{vif_map[:uuid]}_arp:' -j CONTINUE"
|
365
|
-
group_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-src #{ipv4} --arp-ip-dst #{vif_map[:ipv4]} -j ACCEPT"
|
366
|
-
# guest->host
|
367
|
-
group_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-ip-src #{vif_map[:ipv4]} --arp-ip-dst #{ipv4} --log-ip --log-arp --log-prefix 'Afw s_#{vif_map[:uuid]}_d_hst_arp:' -j CONTINUE"
|
368
|
-
group_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-ip-src #{vif_map[:ipv4]} --arp-ip-dst #{ipv4} -j ACCEPT"
|
369
|
-
|
370
|
-
unless viftable_map[ipv4].nil?
|
371
|
-
# guest->guest
|
372
|
-
group_cmds << "ebtables -A d_#{viftable_map[ipv4]}_arp --protocol arp --arp-ip-src #{vif_map[:ipv4]} --arp-ip-dst #{ipv4} --log-ip --log-arp --log-prefix 'Arv d_#{viftable_map[ipv4]}_arp:' -j CONTINUE"
|
373
|
-
group_cmds << "ebtables -A d_#{viftable_map[ipv4]}_arp --protocol arp --arp-ip-src #{vif_map[:ipv4]} --arp-ip-dst #{ipv4} -j ACCEPT"
|
374
|
-
|
375
|
-
# guest->host
|
376
|
-
group_cmds << "ebtables -A s_#{viftable_map[ipv4]}_d_hst_arp --protocol arp --arp-ip-src #{ipv4} --arp-ip-dst #{vif_map[:ipv4]} --log-ip --log-arp --log-prefix 'Arv s_#{viftable_map[ipv4]}_d_hst_arp:' -j CONTINUE"
|
377
|
-
group_cmds << "ebtables -A s_#{viftable_map[ipv4]}_d_hst_arp --protocol arp --arp-ip-src #{ipv4} --arp-ip-dst #{vif_map[:ipv4]} -j ACCEPT"
|
378
|
-
end
|
379
|
-
else
|
380
|
-
#p "#{vif_map[:uuid]}(#{vif_map[:ipv4]}) -> [other] ***-******** (#{ipv4})"
|
381
|
-
group_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-src #{ipv4} --arp-ip-dst #{vif_map[:ipv4]} --log-ip --log-arp --log-prefix 'Afw :d_#{vif_map[:uuid]}_arp' -j CONTINUE"
|
382
|
-
group_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-src #{ipv4} --arp-ip-dst #{vif_map[:ipv4]} -j ACCEPT"
|
383
|
-
end
|
384
|
-
end
|
385
|
-
|
386
|
-
group_cmds
|
387
|
-
end
|
388
|
-
|
389
|
-
|
390
|
-
def build_ebtables_final_part(vif_map)
|
391
|
-
final_cmds = []
|
392
|
-
|
393
|
-
################################
|
394
|
-
## 3. final part
|
395
|
-
################################
|
396
|
-
# deny,allow
|
397
|
-
final_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --log-level warning --log-ip --log-arp --log-prefix 'D d_#{vif_map[:uuid]}_arp:' -j CONTINUE"
|
398
|
-
final_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --log-level warning --log-ip --log-arp --log-prefix 'D s_#{vif_map[:uuid]}_d_hst_arp:' -j CONTINUE"
|
399
|
-
final_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp -j DROP"
|
400
|
-
final_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp -j DROP"
|
401
|
-
|
402
|
-
final_cmds
|
403
|
-
end
|
404
|
-
|
405
|
-
def refresh_iptables(inst_map = {}, with_flush = 1)
|
406
|
-
logger.debug("refresh_iptables: #{inst_map[:uuid]} ...")
|
407
|
-
|
408
|
-
# Does the hva have instance?
|
409
|
-
unless inst_map[:host_pool][:node_id] == node.node_id
|
410
|
-
logger.warn "no match for the instance: #{inst_map[:uuid]}"
|
411
|
-
return
|
412
|
-
end
|
413
|
-
|
414
|
-
network_map = rpc.request('hva-collector', 'get_network', inst_map[:instance_nics].first[:network_id])
|
415
|
-
raise "UnknownNetworkId" if network_map.nil?
|
416
|
-
|
417
|
-
vif = inst_map[:instance_nics].first[:uuid]
|
418
|
-
vif_mac = inst_map[:instance_nics].first[:mac_addr].unpack('A2'*6).join(':')
|
419
|
-
|
420
|
-
if with_flush
|
421
|
-
flush_iptables(inst_map)
|
422
|
-
end
|
423
|
-
|
424
|
-
# Does host have vif?
|
425
|
-
unless valid_vif?(vif)
|
426
|
-
return
|
427
|
-
end
|
428
|
-
|
429
|
-
|
430
|
-
|
431
|
-
|
432
|
-
# group node IPv4 addresses.
|
433
|
-
ipv4s = rpc.request('hva-collector', 'get_group_instance_ipv4s', inst_map[:uuid])
|
434
|
-
|
435
|
-
ng_maps = rpc.request('hva-collector', 'get_netfilter_groups_of_instance', inst_map[:uuid])
|
436
|
-
rules = ng_maps.map { |ng_map|
|
437
|
-
ng_map[:rules].map { |rule| rule[:permission] }
|
438
|
-
}.flatten
|
439
|
-
|
440
|
-
|
441
|
-
|
442
|
-
|
443
|
-
# xtables commands
|
444
|
-
cmds = []
|
445
|
-
|
446
|
-
# support IP protocol
|
447
|
-
protocol_maps = {
|
448
|
-
'tcp' => 'tcp',
|
449
|
-
'udp' => 'udp',
|
450
|
-
'icmp' => 'icmp',
|
451
|
-
}
|
452
|
-
|
453
|
-
# make chain names.
|
454
|
-
chains = []
|
455
|
-
protocol_maps.each { |k,v|
|
456
|
-
chains << "s_#{vif}_#{k}"
|
457
|
-
chains << "d_#{vif}_#{k}"
|
458
|
-
}
|
459
|
-
chains << "s_#{vif}"
|
460
|
-
chains << "d_#{vif}"
|
461
|
-
|
462
|
-
# metadata-server
|
463
|
-
[ 'A' ].each { |xcmd|
|
464
|
-
system("iptables -t nat -#{xcmd} PREROUTING -m physdev --physdev-is-bridged --physdev-in #{vif} -s 0.0.0.0 -d 169.254.169.254 -p tcp --dport 80 -j DNAT --to-destination #{network_map[:metadata_server]}:80")
|
465
|
-
}
|
466
|
-
|
467
|
-
# create user defined chains.
|
468
|
-
[ 'N' ].each { |xcmd|
|
469
|
-
chains.each { |chain|
|
470
|
-
cmds << "iptables -#{xcmd} #{chain}"
|
471
|
-
|
472
|
-
# logger & drop
|
473
|
-
cmds << "iptables -N #{chain}_drop"
|
474
|
-
cmds << "iptables -A #{chain}_drop -j LOG --log-level 4 --log-prefix 'D #{chain}:'"
|
475
|
-
cmds << "iptables -A #{chain}_drop -j DROP"
|
476
|
-
}
|
477
|
-
}
|
478
|
-
|
479
|
-
# group nodes
|
480
|
-
ipv4s << network_map[:ipv4_gw]
|
481
|
-
ipv4s.uniq.reverse_each { |addr|
|
482
|
-
cmds << "iptables -A d_#{vif} -s #{addr} -j ACCEPT"
|
483
|
-
}
|
484
|
-
|
485
|
-
# IP protocol routing
|
486
|
-
[ 's', 'd' ].each do |bound|
|
487
|
-
protocol_maps.each { |k,v|
|
488
|
-
cmds << "iptables -N #{bound}_#{vif}_#{k}"
|
489
|
-
|
490
|
-
case k
|
491
|
-
when 'tcp'
|
492
|
-
case bound
|
493
|
-
when 's'
|
494
|
-
cmds << "iptables -A #{bound}_#{vif} -m state --state NEW,ESTABLISHED -p #{k} -j #{bound}_#{vif}_#{k}"
|
495
|
-
when 'd'
|
496
|
-
#cmds << "iptables -A #{bound}_#{vif} -m state --state ESTABLISHED -p #{k} -j #{bound}_#{vif}_#{k}"
|
497
|
-
cmds << "iptables -A #{bound}_#{vif} -m state --state RELATED,ESTABLISHED -p #{k} -j ACCEPT"
|
498
|
-
cmds << "iptables -A #{bound}_#{vif} -p #{k} -j #{bound}_#{vif}_#{k}"
|
499
|
-
end
|
500
|
-
when 'udp'
|
501
|
-
case bound
|
502
|
-
when 's'
|
503
|
-
cmds << "iptables -A #{bound}_#{vif} -m state --state NEW,ESTABLISHED -p #{k} -j #{bound}_#{vif}_#{k}"
|
504
|
-
when 'd'
|
505
|
-
#cmds << "iptables -A #{bound}_#{vif} -m state --state ESTABLISHED -p #{k} -j #{bound}_#{vif}_#{k}"
|
506
|
-
cmds << "iptables -A #{bound}_#{vif} -m state --state ESTABLISHED -p #{k} -j ACCEPT"
|
507
|
-
cmds << "iptables -A #{bound}_#{vif} -p #{k} -j #{bound}_#{vif}_#{k}"
|
508
|
-
end
|
509
|
-
when 'icmp'
|
510
|
-
case bound
|
511
|
-
when 's'
|
512
|
-
cmds << "iptables -A #{bound}_#{vif} -m state --state NEW,ESTABLISHED,RELATED -p #{k} -j #{bound}_#{vif}_#{k}"
|
513
|
-
when 'd'
|
514
|
-
#cmds << "iptables -A #{bound}_#{vif} -m state --state NEW,ESTABLISHED,RELATED -p #{k} -j #{bound}_#{vif}_#{k}"
|
515
|
-
cmds << "iptables -A #{bound}_#{vif} -m state --state ESTABLISHED,RELATED -p #{k} -j ACCEPT"
|
516
|
-
cmds << "iptables -A #{bound}_#{vif} -p #{k} -j #{bound}_#{vif}_#{k}"
|
517
|
-
end
|
518
|
-
end
|
519
|
-
}
|
520
|
-
end
|
521
|
-
|
522
|
-
cmds << "iptables -A FORWARD -m physdev --physdev-is-bridged --physdev-in #{vif} -j s_#{vif}"
|
523
|
-
cmds << "iptables -A FORWARD -m physdev --physdev-is-bridged --physdev-out #{vif} -j d_#{vif}"
|
524
|
-
|
525
|
-
##
|
526
|
-
## ACCEPT
|
527
|
-
##
|
528
|
-
# DHCP Server
|
529
|
-
cmds << "iptables -A d_#{vif}_udp -p udp -s #{network_map[:dhcp_server]} --sport 67 -j ACCEPT"
|
530
|
-
cmds << "iptables -A d_#{vif}_udp -p udp -s #{network_map[:dhcp_server]} --sport 68 -j ACCEPT"
|
531
|
-
|
532
|
-
#cmds << "iptables -A d_#{vif}_udp -p udp --sport 67 -j d_#{vif}_udp_drop"
|
533
|
-
# DNS Server
|
534
|
-
cmds << "iptables -A s_#{vif}_udp -p udp -d #{network_map[:dns_server]} --dport 53 -j ACCEPT"
|
535
|
-
|
536
|
-
##
|
537
|
-
## DROP
|
538
|
-
##
|
539
|
-
protocol_maps.each { |k,v|
|
540
|
-
# DHCP
|
541
|
-
cmds << "iptables -A s_#{vif} -d #{network_map[:dhcp_server]} -p #{k} -j s_#{vif}_#{k}_drop"
|
542
|
-
# DNS
|
543
|
-
cmds << "iptables -A s_#{vif} -d #{network_map[:dns_server]} -p #{k} -j s_#{vif}_#{k}_drop"
|
544
|
-
}
|
545
|
-
|
546
|
-
# security group
|
547
|
-
build_rule(rules).each do |rule|
|
548
|
-
case rule[:ip_protocol]
|
549
|
-
when 'tcp', 'udp'
|
550
|
-
cmds << "iptables -A d_#{vif}_#{rule[:ip_protocol]} -p #{rule[:ip_protocol]} -s #{rule[:ip_source]} --dport #{rule[:ip_dport]} -j ACCEPT"
|
551
|
-
when 'icmp'
|
552
|
-
# icmp
|
553
|
-
# This extension can be used if `--protocol icmp' is specified. It provides the following option:
|
554
|
-
# [!] --icmp-type {type[/code]|typename}
|
555
|
-
# This allows specification of the ICMP type, which can be a numeric ICMP type, type/code pair, or one of the ICMP type names shown by the command
|
556
|
-
# iptables -p icmp -h
|
557
|
-
if rule[:icmp_type] == -1 && rule[:icmp_code] == -1
|
558
|
-
cmds << "iptables -A d_#{vif}_#{rule[:ip_protocol]} -p #{rule[:ip_protocol]} -s #{rule[:ip_source]} -j ACCEPT"
|
559
|
-
else
|
560
|
-
cmds << "iptables -A d_#{vif}_#{rule[:ip_protocol]} -p #{rule[:ip_protocol]} -s #{rule[:ip_source]} --icmp-type #{rule[:icmp_type]}/#{rule[:icmp_code]} -j ACCEPT"
|
561
|
-
end
|
562
|
-
end
|
563
|
-
end
|
564
|
-
|
565
|
-
# drop other routings
|
566
|
-
protocol_maps.each { |k,v|
|
567
|
-
cmds << "iptables -A d_#{vif}_#{k} -p #{k} -j d_#{vif}_#{k}_drop"
|
568
|
-
}
|
569
|
-
|
570
|
-
# IP protocol routing
|
571
|
-
[ 'd' ].each do |bound|
|
572
|
-
protocol_maps.each { |k,v|
|
573
|
-
cmds << "iptables -A #{bound}_#{vif}_#{k} -j #{bound}_#{vif}_#{k}_drop"
|
574
|
-
}
|
575
|
-
end
|
576
|
-
|
577
|
-
cmds.uniq! if cmds.size > 0
|
578
|
-
cmds.compact.each { |cmd|
|
579
|
-
#puts cmd
|
580
|
-
system(cmd)
|
581
|
-
}
|
582
|
-
|
583
|
-
logger.debug("refresh_iptables: #{inst_map[:uuid]} done.")
|
584
|
-
end
|
585
|
-
|
586
|
-
def flush_ebtables(inst_map = {})
|
587
|
-
logger.debug("flush_ebtables: #{inst_map[:uuid]} ...")
|
588
|
-
|
589
|
-
# Does the hva have instance?
|
590
|
-
unless inst_map[:host_pool][:node_id] == node.node_id
|
591
|
-
logger.warn "no match for the instance: #{inst_map[:uuid]}"
|
592
|
-
return
|
593
|
-
end
|
594
|
-
|
595
|
-
network_map = rpc.request('hva-collector', 'get_network', inst_map[:host_pool][:network_id])
|
596
|
-
raise "UnknownNetworkId" if network_map.nil?
|
597
|
-
|
598
|
-
vif = inst_map[:instance_nics].first[:vif]
|
599
|
-
|
600
|
-
# support IP protocol
|
601
|
-
protocol_maps = {
|
602
|
-
'ip4' => 'ip4',
|
603
|
-
'arp' => 'arp',
|
604
|
-
#'ip6' => 'ip6',
|
605
|
-
#'rarp' => '0x8035',
|
606
|
-
}
|
607
|
-
|
608
|
-
# make chain names.
|
609
|
-
chains = []
|
610
|
-
chains << "s_#{vif}"
|
611
|
-
chains << "d_#{vif}"
|
612
|
-
chains << "s_#{vif}_d_hst"
|
613
|
-
chains << "d_#{vif}_s_hst"
|
614
|
-
protocol_maps.each { |k,v|
|
615
|
-
chains << "s_#{vif}_#{k}"
|
616
|
-
chains << "d_#{vif}_#{k}"
|
617
|
-
chains << "s_#{vif}_d_hst_#{k}"
|
618
|
-
chains << "d_#{vif}_s_hst_#{k}"
|
619
|
-
}
|
620
|
-
|
621
|
-
# clear rules if exists.
|
622
|
-
system("ebtables -L s_#{vif} >/dev/null 2>&1")
|
623
|
-
if $?.exitstatus == 0
|
624
|
-
cmd = "ebtables -D FORWARD -i #{vif} -j s_#{vif}"
|
625
|
-
puts cmd
|
626
|
-
system(cmd)
|
627
|
-
end
|
628
|
-
|
629
|
-
system("ebtables -L d_#{vif} >/dev/null 2>&1")
|
630
|
-
if $?.exitstatus == 0
|
631
|
-
cmd = "ebtables -D FORWARD -o #{vif} -j d_#{vif}"
|
632
|
-
puts cmd
|
633
|
-
system(cmd)
|
634
|
-
end
|
635
|
-
|
636
|
-
system("ebtables -L s_#{vif}_d_hst >/dev/null 2>&1")
|
637
|
-
if $?.exitstatus == 0
|
638
|
-
cmd = "ebtables -D INPUT -i #{vif} -j s_#{vif}_d_hst"
|
639
|
-
puts cmd
|
640
|
-
system(cmd)
|
641
|
-
end
|
642
|
-
|
643
|
-
system("ebtables -L d_#{vif}_s_hst >/dev/null 2>&1")
|
644
|
-
if $?.exitstatus == 0
|
645
|
-
cmd = "ebtables -D OUTPUT -o #{vif} -j d_#{vif}_s_hst"
|
646
|
-
puts cmd
|
647
|
-
system(cmd)
|
648
|
-
end
|
649
|
-
|
650
|
-
[ 'F', 'Z', 'X' ].each { |xcmd|
|
651
|
-
chains.each { |chain|
|
652
|
-
system("ebtables -L #{chain} >/dev/null 2>&1")
|
653
|
-
if $?.exitstatus == 0
|
654
|
-
cmd = "ebtables -#{xcmd} #{chain}"
|
655
|
-
puts cmd
|
656
|
-
system(cmd)
|
657
|
-
end
|
658
|
-
}
|
659
|
-
}
|
660
|
-
|
661
|
-
logger.debug("flush_ebtables: #{inst_map[:uuid]} #{vif} done.")
|
662
|
-
end
|
663
|
-
|
664
|
-
def flush_iptables(inst_map = {})
|
665
|
-
logger.debug("flush_iptables: #{inst_map[:uuid]} ...")
|
666
|
-
|
667
|
-
# Does the hva have instance?
|
668
|
-
unless inst_map[:host_pool][:node_id] == node.node_id
|
669
|
-
logger.warn "no match for the instance: #{inst_map[:uuid]}"
|
670
|
-
return
|
671
|
-
end
|
672
|
-
|
673
|
-
network_map = rpc.request('hva-collector', 'get_network', inst_map[:host_pool][:network_id])
|
674
|
-
raise "UnknownNetworkId" if network_map.nil?
|
675
|
-
|
676
|
-
vif = inst_map[:instance_nics].first[:vif]
|
677
|
-
|
678
|
-
# support IP protocol
|
679
|
-
protocol_maps = {
|
680
|
-
'tcp' => 'tcp',
|
681
|
-
'udp' => 'udp',
|
682
|
-
'icmp' => 'icmp',
|
683
|
-
}
|
684
|
-
|
685
|
-
# make chain names.
|
686
|
-
chains = []
|
687
|
-
protocol_maps.each { |k,v|
|
688
|
-
chains << "s_#{vif}_#{k}"
|
689
|
-
chains << "d_#{vif}_#{k}"
|
690
|
-
chains << "s_#{vif}_#{k}_drop"
|
691
|
-
chains << "d_#{vif}_#{k}_drop"
|
692
|
-
}
|
693
|
-
chains << "s_#{vif}"
|
694
|
-
chains << "d_#{vif}"
|
695
|
-
chains << "s_#{vif}_drop"
|
696
|
-
chains << "d_#{vif}_drop"
|
697
|
-
|
698
|
-
# metadata-server
|
699
|
-
[ 'D' ].each { |xcmd|
|
700
|
-
system("iptables -t nat -#{xcmd} PREROUTING -m physdev --physdev-is-bridged --physdev-in #{vif} -s 0.0.0.0 -d 169.254.169.254 -p tcp --dport 80 -j DNAT --to-destination #{network_map[:metadata_server]}:80 >/dev/null 2>&1")
|
701
|
-
}
|
702
|
-
|
703
|
-
# clean rules if exists.
|
704
|
-
system("iptables -nL s_#{vif} >/dev/null 2>&1")
|
705
|
-
if $?.exitstatus == 0
|
706
|
-
system("iptables -D FORWARD -m physdev --physdev-is-bridged --physdev-in #{vif} -j s_#{vif}")
|
707
|
-
end
|
708
|
-
|
709
|
-
system("iptables -nL d_#{vif} >/dev/null 2>&1")
|
710
|
-
if $?.exitstatus == 0
|
711
|
-
system("iptables -D FORWARD -m physdev --physdev-is-bridged --physdev-out #{vif} -j d_#{vif}")
|
712
|
-
end
|
713
|
-
|
714
|
-
[ 'F', 'Z', 'X' ].each { |xcmd|
|
715
|
-
chains.each { |chain|
|
716
|
-
system("iptables -nL #{chain} >/dev/null 2>&1")
|
717
|
-
if $?.exitstatus == 0
|
718
|
-
system("iptables -#{xcmd} #{chain}")
|
719
|
-
end
|
720
|
-
}
|
721
|
-
}
|
722
|
-
|
723
|
-
logger.debug("flush_iptables: #{inst_map[:uuid]} #{vif} done.")
|
724
|
-
end
|
725
|
-
|
726
|
-
def build_rule(rules = [])
|
727
|
-
rule_maps = []
|
728
|
-
|
729
|
-
rules.each do |rule|
|
730
|
-
rule = rule.strip.gsub(/[\s\t]+/, '')
|
731
|
-
from_group = false
|
732
|
-
ipv4s = []
|
733
|
-
|
734
|
-
# ex.
|
735
|
-
# "tcp:22,22,ip4:0.0.0.0"
|
736
|
-
# "udp:53,53,ip4:0.0.0.0"
|
737
|
-
# "icmp:-1,-1,ip4:0.0.0.0"
|
738
|
-
|
739
|
-
# 1st phase
|
740
|
-
# ip_dport : tcp,udp? 1 - 16bit, icmp: -1
|
741
|
-
# id_port has been separeted in first phase.
|
742
|
-
from_pair, ip_dport, source_pair = rule.split(',')
|
743
|
-
|
744
|
-
# TODO: more strict validations
|
745
|
-
next if from_pair.nil?
|
746
|
-
next if ip_dport.nil?
|
747
|
-
next if source_pair.nil?
|
748
|
-
|
749
|
-
# 2nd phase
|
750
|
-
# ip_protocol : [ tcp | udp | icmp ]
|
751
|
-
# ip_sport : tcp,udp? 1 - 16bit, icmp: -1
|
752
|
-
ip_protocol, ip_sport = from_pair.split(':')
|
753
|
-
|
754
|
-
# protocol : [ ip4 | ip6 | #{account_id} ]
|
755
|
-
# ip_source : ip4? xxx.xxx.xxx.xxx./[0-32], ip6? (not yet supprted), #{netfilter_group_id}
|
756
|
-
protocol, ip_source = source_pair.split(':')
|
757
|
-
|
758
|
-
begin
|
759
|
-
s = StringScanner.new(protocol)
|
760
|
-
until s.eos?
|
761
|
-
case
|
762
|
-
when s.scan(/ip6/)
|
763
|
-
# TODO#FUTURE: support IPv6 address format
|
764
|
-
next
|
765
|
-
when s.scan(/ip4/)
|
766
|
-
# IPAddress does't support prefix '0'.
|
767
|
-
ip_addr, prefix = ip_source.split('/', 2)
|
768
|
-
if prefix.to_i == 0
|
769
|
-
ip_source = ip_addr
|
770
|
-
end
|
771
|
-
when s.scan(/a-\w{8}/)
|
772
|
-
from_group = true
|
773
|
-
inst_maps = rpc.request('hva-collector', 'get_instances_of_account_netfilter_group', protocol, ip_source)
|
774
|
-
inst_maps.each { |inst_map|
|
775
|
-
ipv4s << inst_map[:ips]
|
776
|
-
}
|
777
|
-
else
|
778
|
-
raise "unexpected protocol '#{s.peep(20)}'"
|
779
|
-
end
|
780
|
-
end
|
781
|
-
rescue Exception => e
|
782
|
-
p e
|
783
|
-
next
|
784
|
-
end
|
785
|
-
|
786
|
-
begin
|
787
|
-
if from_group == false
|
788
|
-
#p "from_group:(#{from_group}) ip_source -> #{ip_source}"
|
789
|
-
ip = IPAddress(ip_source)
|
790
|
-
ip_source = case ip.u32
|
791
|
-
when 0
|
792
|
-
"#{ip.address}/0"
|
793
|
-
else
|
794
|
-
"#{ip.address}/#{ip.prefix}"
|
795
|
-
end
|
796
|
-
else
|
797
|
-
ipv4s = ipv4s.flatten.uniq
|
798
|
-
end
|
799
|
-
rescue Exception => e
|
800
|
-
p e
|
801
|
-
next
|
802
|
-
end
|
803
|
-
|
804
|
-
case ip_protocol
|
805
|
-
when 'tcp', 'udp'
|
806
|
-
if from_group == false
|
807
|
-
rule_maps << {
|
808
|
-
:ip_protocol => ip_protocol,
|
809
|
-
:ip_sport => ip_sport.to_i,
|
810
|
-
:ip_dport => ip_dport.to_i,
|
811
|
-
:protocol => protocol,
|
812
|
-
:ip_source => ip_source,
|
813
|
-
}
|
814
|
-
else
|
815
|
-
ipv4s.each { |ip|
|
816
|
-
rule_maps << {
|
817
|
-
:ip_protocol => ip_protocol,
|
818
|
-
:ip_sport => ip_sport.to_i,
|
819
|
-
:ip_dport => ip_dport.to_i,
|
820
|
-
:protocol => 'ip4',
|
821
|
-
:ip_source => ip,
|
822
|
-
}
|
823
|
-
}
|
824
|
-
end
|
825
|
-
when 'icmp'
|
826
|
-
# via http://docs.amazonwebservices.com/AWSEC2/latest/CommandLineReference/
|
827
|
-
#
|
828
|
-
# For the ICMP protocol, the ICMP type and code must be specified.
|
829
|
-
# This must be specified in the format type:code where both are integers.
|
830
|
-
# Type, code, or both can be specified as -1, which is a wildcard.
|
831
|
-
|
832
|
-
icmp_type = ip_dport.to_i
|
833
|
-
icmp_code = ip_sport.to_i
|
834
|
-
|
835
|
-
# icmp_type
|
836
|
-
case icmp_type
|
837
|
-
when -1
|
838
|
-
when 0, 3, 5, 8, 11, 12, 13, 14, 15, 16, 17, 18
|
839
|
-
else
|
840
|
-
next
|
841
|
-
end
|
842
|
-
|
843
|
-
# icmp_code
|
844
|
-
case icmp_code
|
845
|
-
when -1
|
846
|
-
when 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
|
847
|
-
# when icmp_type equals -1 icmp_code must equal -1.
|
848
|
-
next if icmp_type == -1
|
849
|
-
else
|
850
|
-
next
|
851
|
-
end
|
852
|
-
|
853
|
-
if from_group == false
|
854
|
-
rule_maps << {
|
855
|
-
:ip_protocol => ip_protocol,
|
856
|
-
:icmp_type => ip_dport.to_i, # ip_dport.to_i, # -1 or 0, 3, 5, 8, 11, 12, 13, 14, 15, 16, 17, 18
|
857
|
-
:icmp_code => ip_sport.to_i, # ip_sport.to_i, # -1 or 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
|
858
|
-
:protocol => protocol,
|
859
|
-
:ip_source => ip_source,
|
860
|
-
}
|
861
|
-
else
|
862
|
-
ipv4s.each { |ip|
|
863
|
-
rule_maps << {
|
864
|
-
:ip_protocol => ip_protocol,
|
865
|
-
:icmp_type => ip_dport.to_i, # ip_dport.to_i, # -1 or 0, 3, 5, 8, 11, 12, 13, 14, 15, 16, 17, 18
|
866
|
-
:icmp_code => ip_sport.to_i, # ip_sport.to_i, # -1 or 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
|
867
|
-
:protocol => 'ip4',
|
868
|
-
:ip_source => ip,
|
869
|
-
}
|
870
|
-
}
|
871
|
-
end
|
872
|
-
end
|
873
|
-
end
|
874
|
-
|
875
|
-
rule_maps
|
876
|
-
end
|
877
|
-
|
878
|
-
def rpc
|
879
|
-
@rpc ||= Isono::NodeModules::RpcChannel.new(@node)
|
880
|
-
end
|
881
|
-
|
882
|
-
def event
|
883
|
-
@event ||= Isono::NodeModules::EventChannel.new(@node)
|
884
|
-
end
|
885
|
-
|
886
|
-
end
|
887
|
-
|
888
|
-
require 'net/telnet'
|
889
|
-
|
890
|
-
module KvmHelper
|
891
|
-
# Establish telnet connection to KVM monitor console
|
892
|
-
def connect_monitor(port, &blk)
|
893
|
-
begin
|
894
|
-
telnet = ::Net::Telnet.new("Host" => "localhost",
|
895
|
-
"Port"=>port.to_s,
|
896
|
-
"Prompt" => /\n\(qemu\) \z/,
|
897
|
-
"Timeout" => 60,
|
898
|
-
"Waittime" => 0.2)
|
899
|
-
|
900
|
-
blk.call(telnet)
|
901
|
-
rescue => e
|
902
|
-
logger.error(e) if self.respond_to?(:logger)
|
903
|
-
raise e
|
904
|
-
ensure
|
905
|
-
telnet.close
|
906
|
-
end
|
907
|
-
end
|
908
|
-
end
|
909
|
-
|
910
|
-
class InstanceMonitor < Isono::NodeModules::Base
|
911
|
-
include KvmHelper
|
912
|
-
include Dcmgr::Logger
|
913
|
-
|
914
|
-
initialize_hook do
|
915
|
-
@thread_pool = Isono::ThreadPool.new(1)
|
916
|
-
@monitor = EventMachine::PeriodicTimer.new(5) {
|
917
|
-
@thread_pool.pass {
|
918
|
-
myinstance.check_instance
|
919
|
-
}
|
920
|
-
}
|
921
|
-
end
|
922
|
-
|
923
|
-
terminate_hook do
|
924
|
-
@monitor.cancel
|
925
|
-
@thread_pool.shutdown
|
926
|
-
end
|
927
|
-
|
928
|
-
def check_instance()
|
929
|
-
instlst = rpc.request('hva-collector', 'get_alive_instances', manifest.node_id)
|
930
|
-
instlst.find_all{|i| i[:state] == 'running' }.each { |i|
|
931
|
-
begin
|
932
|
-
check_kvm_process(i)
|
933
|
-
rescue Exception => e
|
934
|
-
if i[:status] == 'online'
|
935
|
-
logger.error("#{e.class}, #{e.message}")
|
936
|
-
|
937
|
-
rpc.request('hva-collector', 'update_instance', i[:uuid], {:status=>:offline}) { |req|
|
938
|
-
req.oneshot = true
|
939
|
-
}
|
940
|
-
event.publish('hva/fault_instance', :args=>[i[:uuid]])
|
941
|
-
end
|
942
|
-
next
|
943
|
-
end
|
944
|
-
|
945
|
-
if i[:status] != 'online'
|
946
|
-
rpc.request('hva-collector', 'update_instance', i[:uuid], {:status=>:online}) { |req|
|
947
|
-
req.oneshot = true
|
948
|
-
}
|
949
|
-
end
|
950
|
-
}
|
951
|
-
end
|
952
|
-
|
953
|
-
private
|
954
|
-
def check_kvm_process(i)
|
955
|
-
pid = File.read(File.expand_path("#{i[:uuid]}/kvm.pid", node.manifest.config.vm_data_dir)).to_i
|
956
|
-
unless File.exists?(File.expand_path(pid.to_s, '/proc'))
|
957
|
-
raise "Unable to find the pid of kvm process: #{pid}"
|
958
|
-
end
|
959
|
-
end
|
960
|
-
|
961
|
-
def rpc
|
962
|
-
@rpc ||= Isono::NodeModules::RpcChannel.new(@node)
|
963
|
-
end
|
964
|
-
|
965
|
-
def event
|
966
|
-
@event ||= Isono::NodeModules::EventChannel.new(@node)
|
967
|
-
end
|
968
|
-
end
|
969
|
-
|
970
|
-
class KvmHandler < EndpointBuilder
|
971
|
-
include Dcmgr::Logger
|
972
|
-
include Dcmgr::Helpers::CliHelper
|
973
|
-
include KvmHelper
|
974
|
-
|
975
|
-
def find_nic(ifindex = 2)
|
976
|
-
ifindex_map = {}
|
977
|
-
Dir.glob("/sys/class/net/*/ifindex").each do |ifindex_path|
|
978
|
-
device_name = File.split(File.split(ifindex_path).first)[1]
|
979
|
-
ifindex_num = File.readlines(ifindex_path).first.strip
|
980
|
-
ifindex_map[ifindex_num] = device_name
|
981
|
-
end
|
982
|
-
#p ifindex_map
|
983
|
-
ifindex_map[ifindex.to_s]
|
984
|
-
end
|
985
|
-
|
986
|
-
def nic_state(if_name = 'eth0')
|
987
|
-
operstate_path = "/sys/class/net/#{if_name}/operstate"
|
988
|
-
if File.exists?(operstate_path)
|
989
|
-
File.readlines(operstate_path).first.strip
|
990
|
-
end
|
991
|
-
end
|
992
|
-
|
993
|
-
def run_kvm(os_devpath)
|
994
|
-
# run vm
|
995
|
-
cmd = "kvm -m %d -smp %d -name vdc-%s -vnc :%d -drive file=%s -pidfile %s -daemonize -monitor telnet::%d,server,nowait"
|
996
|
-
args=[@inst[:instance_spec][:memory_size],
|
997
|
-
@inst[:instance_spec][:cpu_cores],
|
998
|
-
@inst_id,
|
999
|
-
@inst[:runtime_config][:vnc_port],
|
1000
|
-
os_devpath,
|
1001
|
-
File.expand_path('kvm.pid', @inst_data_dir),
|
1002
|
-
@inst[:runtime_config][:telnet_port]
|
1003
|
-
]
|
1004
|
-
if vnic = @inst[:instance_nics].first
|
1005
|
-
cmd += " -net nic,macaddr=%s -net tap,ifname=%s,script=,downscript="
|
1006
|
-
args << vnic[:mac_addr].unpack('A2'*6).join(':')
|
1007
|
-
args << vnic[:uuid]
|
1008
|
-
end
|
1009
|
-
sh(cmd, args)
|
1010
|
-
|
1011
|
-
unless vnic.nil?
|
1012
|
-
network_map = rpc.request('hva-collector', 'get_network', @inst[:instance_nics].first[:network_id])
|
1013
|
-
|
1014
|
-
# physical interface
|
1015
|
-
physical_if = find_nic(@node.manifest.config.hv_ifindex)
|
1016
|
-
raise "UnknownPhysicalNIC" if physical_if.nil?
|
1017
|
-
|
1018
|
-
if network_map[:vlan_id] == 0
|
1019
|
-
# bridge interface
|
1020
|
-
p bridge_if = @node.manifest.config.bridge_novlan
|
1021
|
-
unless FileTest.exist?("/sys/class/net/#{bridge_if}/ifindex")
|
1022
|
-
sh("/usr/sbin/brctl addbr %s", [bridge_if])
|
1023
|
-
sh("/usr/sbin/brctl addif %s %s", [bridge_if, physical_if])
|
1024
|
-
end
|
1025
|
-
else
|
1026
|
-
# vlan interface
|
1027
|
-
vlan_if = "#{physical_if}.#{network_map[:vlan_id]}"
|
1028
|
-
unless FileTest.exist?("/sys/class/net/#{vlan_if}/ifindex")
|
1029
|
-
sh("/sbin/vconfig add #{physical_if} #{network_map[:vlan_id]}")
|
1030
|
-
end
|
1031
|
-
|
1032
|
-
# bridge interface
|
1033
|
-
bridge_if = "#{@node.manifest.config.bridge_prefix}-#{physical_if}.#{network_map[:vlan_id]}"
|
1034
|
-
unless FileTest.exist?("/sys/class/net/#{bridge_if}/ifindex")
|
1035
|
-
sh("/usr/sbin/brctl addbr %s", [bridge_if])
|
1036
|
-
sh("/usr/sbin/brctl addif %s %s", [bridge_if, vlan_if])
|
1037
|
-
end
|
1038
|
-
end
|
1039
|
-
|
1040
|
-
|
1041
|
-
# interface up? down?
|
1042
|
-
[ vlan_if, bridge_if ].each do |ifname|
|
1043
|
-
if nic_state(ifname) == "down"
|
1044
|
-
sh("/sbin/ifconfig #{ifname} 0.0.0.0 up")
|
1045
|
-
end
|
1046
|
-
end
|
1047
|
-
|
1048
|
-
sh("/sbin/ifconfig %s 0.0.0.0 up", [vnic[:uuid]])
|
1049
|
-
sh("/usr/sbin/brctl addif %s %s", [bridge_if, vnic[:uuid]])
|
1050
|
-
end
|
1051
|
-
end
|
1052
|
-
|
1053
|
-
def attach_volume_to_host
|
1054
|
-
# check under until the dev file is created.
|
1055
|
-
# /dev/disk/by-path/ip-192.168.1.21:3260-iscsi-iqn.1986-03.com.sun:02:a1024afa-775b-65cf-b5b0-aa17f3476bfc-lun-0
|
1056
|
-
linux_dev_path = "/dev/disk/by-path/ip-%s-iscsi-%s-lun-%d" % ["#{@vol[:storage_pool][:ipaddr]}:3260",
|
1057
|
-
@vol[:transport_information][:iqn],
|
1058
|
-
@vol[:transport_information][:lun]]
|
1059
|
-
|
1060
|
-
tryagain do
|
1061
|
-
next true if File.exist?(linux_dev_path)
|
1062
|
-
|
1063
|
-
sh("iscsiadm -m discovery -t sendtargets -p %s", [@vol[:storage_pool][:ipaddr]])
|
1064
|
-
sh("iscsiadm -m node -l -T '%s' --portal '%s'",
|
1065
|
-
[@vol[:transport_information][:iqn], @vol[:storage_pool][:ipaddr]])
|
1066
|
-
sleep 1
|
1067
|
-
end
|
1068
|
-
|
1069
|
-
rpc.request('sta-collector', 'update_volume', {
|
1070
|
-
:volume_id=>@vol_id,
|
1071
|
-
:state=>:attaching,
|
1072
|
-
:host_device_name => linux_dev_path})
|
1073
|
-
end
|
1074
|
-
|
1075
|
-
def detach_volume_from_host
|
1076
|
-
# iscsi logout
|
1077
|
-
sh("iscsiadm -m node -T '%s' --logout", [@vol[:transport_information][:iqn]])
|
1078
|
-
|
1079
|
-
rpc.request('sta-collector', 'update_volume', {
|
1080
|
-
:volume_id=>@vol_id,
|
1081
|
-
:state=>:available,
|
1082
|
-
:host_device_name=>nil,
|
1083
|
-
:instance_id=>nil,
|
1084
|
-
})
|
1085
|
-
event.publish('hva/volume_detached', :args=>[@inst_id, @vol_id])
|
1086
|
-
end
|
1087
|
-
|
1088
|
-
def terminate_instance
|
1089
|
-
kvm_pid=`pgrep -u root -f vdc-#{@inst_id}`
|
1090
|
-
if $?.exitstatus == 0 && kvm_pid.to_s =~ /^\d+$/
|
1091
|
-
sh("/bin/kill #{kvm_pid}")
|
1092
|
-
else
|
1093
|
-
logger.error("Can not find the KVM process. Skipping: kvm -name vdc-#{@inst_id}")
|
1094
|
-
end
|
1095
|
-
end
|
1096
|
-
|
1097
|
-
def update_instance_state(opts, ev)
|
1098
|
-
raise "Can't update instance info without setting @inst_id" if @inst_id.nil?
|
1099
|
-
rpc.request('hva-collector', 'update_instance', @inst_id, opts)
|
1100
|
-
event.publish(ev, :args=>[@inst_id])
|
1101
|
-
end
|
1102
|
-
|
1103
|
-
def update_volume_state(opts, ev)
|
1104
|
-
raise "Can't update volume info without setting @vol_id" if @vol_id.nil?
|
1105
|
-
rpc.request('sta-collector', 'update_volume', opts.merge(:volume_id=>@vol_id))
|
1106
|
-
event.publish(ev, :args=>[@vol_id])
|
1107
|
-
end
|
1108
|
-
|
1109
|
-
job :run_local_store, proc {
|
1110
|
-
@inst_id = request.args[0]
|
1111
|
-
logger.info("Booting #{@inst_id}")
|
1112
|
-
|
1113
|
-
@inst = rpc.request('hva-collector', 'get_instance', @inst_id)
|
1114
|
-
raise "Invalid instance state: #{@inst[:state]}" unless %w(init failingover).member?(@inst[:state].to_s)
|
1115
|
-
|
1116
|
-
rpc.request('hva-collector', 'update_instance', @inst_id, {:state=>:starting})
|
1117
|
-
# setup vm data folder
|
1118
|
-
@inst_data_dir = File.expand_path("#{@inst_id}", @node.manifest.config.vm_data_dir)
|
1119
|
-
FileUtils.mkdir(@inst_data_dir) unless File.exists?(@inst_data_dir)
|
1120
|
-
# copy image file
|
1121
|
-
img_src = @inst[:image][:source]
|
1122
|
-
case img_src[:type].to_sym
|
1123
|
-
when :http
|
1124
|
-
img_path = File.expand_path("#{@inst[:uuid]}", @inst_data_dir)
|
1125
|
-
sh("curl --silent -o '#{img_path}' #{img_src[:uri]}")
|
1126
|
-
else
|
1127
|
-
raise "Unknown image source type: #{img_src[:type]}"
|
1128
|
-
end
|
1129
|
-
|
1130
|
-
run_kvm(img_path)
|
1131
|
-
update_instance_state({:state=>:running}, 'hva/instance_started')
|
1132
|
-
}, proc {
|
1133
|
-
update_instance_state({:state=>:terminated, :terminated_at=>Time.now},
|
1134
|
-
'hva/instance_terminated')
|
1135
|
-
}
|
1136
|
-
|
1137
|
-
job :run_vol_store, proc {
|
1138
|
-
@inst_id = request.args[0]
|
1139
|
-
@vol_id = request.args[1]
|
1140
|
-
|
1141
|
-
@inst = rpc.request('hva-collector', 'get_instance', @inst_id)
|
1142
|
-
@vol = rpc.request('sta-collector', 'get_volume', @vol_id)
|
1143
|
-
logger.info("Booting #{@inst_id}")
|
1144
|
-
raise "Invalid instance state: #{@inst[:state]}" unless %w(init failingover).member?(@inst[:state].to_s)
|
1145
|
-
|
1146
|
-
rpc.request('hva-collector', 'update_instance', @inst_id, {:state=>:starting})
|
1147
|
-
|
1148
|
-
# setup vm data folder
|
1149
|
-
@inst_data_dir = File.expand_path("#{@inst_id}", @node.manifest.config.vm_data_dir)
|
1150
|
-
FileUtils.mkdir(@inst_data_dir) unless File.exists?(@inst_data_dir)
|
1151
|
-
|
1152
|
-
# create volume from snapshot
|
1153
|
-
jobreq.run("zfs-handle.#{@vol[:storage_pool][:node_id]}", "create_volume", @vol_id)
|
1154
|
-
|
1155
|
-
logger.debug("volume created on #{@vol[:storage_pool][:node_id]}: #{@vol_id}")
|
1156
|
-
# reload volume info
|
1157
|
-
@vol = rpc.request('sta-collector', 'get_volume', @vol_id)
|
1158
|
-
|
1159
|
-
rpc.request('sta-collector', 'update_volume', {:volume_id=>@vol_id, :state=>:attaching})
|
1160
|
-
logger.info("Attaching #{@vol_id} on #{@inst_id}")
|
1161
|
-
# check under until the dev file is created.
|
1162
|
-
# /dev/disk/by-path/ip-192.168.1.21:3260-iscsi-iqn.1986-03.com.sun:02:a1024afa-775b-65cf-b5b0-aa17f3476bfc-lun-0
|
1163
|
-
linux_dev_path = "/dev/disk/by-path/ip-%s-iscsi-%s-lun-%d" % ["#{@vol[:storage_pool][:ipaddr]}:3260",
|
1164
|
-
@vol[:transport_information][:iqn],
|
1165
|
-
@vol[:transport_information][:lun]]
|
1166
|
-
|
1167
|
-
# attach disk
|
1168
|
-
attach_volume_to_host
|
1169
|
-
|
1170
|
-
# run vm
|
1171
|
-
run_kvm(linux_dev_path)
|
1172
|
-
update_instance_state({:state=>:running}, 'hva/instance_started')
|
1173
|
-
update_volume_state({:state=>:attached}, 'hva/volume_attached')
|
1174
|
-
}, proc {
|
1175
|
-
update_instance_state({:state=>:terminated, :terminated_at=>Time.now},
|
1176
|
-
'hva/instance_terminated')
|
1177
|
-
}
|
1178
|
-
|
1179
|
-
job :terminate do
|
1180
|
-
@inst_id = request.args[0]
|
1181
|
-
|
1182
|
-
@inst = rpc.request('hva-collector', 'get_instance', @inst_id)
|
1183
|
-
raise "Invalid instance state: #{@inst[:state]}" unless @inst[:state].to_s == 'running'
|
1184
|
-
|
1185
|
-
begin
|
1186
|
-
rpc.request('hva-collector', 'update_instance', @inst_id, {:state=>:shuttingdown})
|
1187
|
-
|
1188
|
-
terminate_instance
|
1189
|
-
|
1190
|
-
unless @inst[:volume].nil?
|
1191
|
-
@inst[:volume].each { |volid, v|
|
1192
|
-
@vol_id = volid
|
1193
|
-
@vol = v
|
1194
|
-
# force to continue detaching volumes during termination.
|
1195
|
-
detach_volume_from_host rescue logger.error($!)
|
1196
|
-
}
|
1197
|
-
end
|
1198
|
-
|
1199
|
-
# cleanup vm data folder
|
1200
|
-
FileUtils.rm_r(File.expand_path("#{@inst_id}", @node.manifest.config.vm_data_dir))
|
1201
|
-
ensure
|
1202
|
-
update_instance_state({:state=>:terminated,:terminated_at=>Time.now},
|
1203
|
-
'hva/instance_terminated')
|
1204
|
-
end
|
1205
|
-
end
|
1206
|
-
|
1207
|
-
# just do terminate instance and unmount volumes not to affect
|
1208
|
-
# state management.
|
1209
|
-
# called from HA at which the faluty instance get cleaned properly.
|
1210
|
-
job :cleanup do
|
1211
|
-
@inst_id = request.args[0]
|
1212
|
-
|
1213
|
-
@inst = rpc.request('hva-collector', 'get_instance', @inst_id)
|
1214
|
-
raise "Invalid instance state: #{@inst[:state]}" unless @inst[:state].to_s == 'running'
|
1215
|
-
|
1216
|
-
begin
|
1217
|
-
terminate_instance
|
1218
|
-
|
1219
|
-
unless @inst[:volume].nil?
|
1220
|
-
@inst[:volume].each { |volid, v|
|
1221
|
-
@vol_id = volid
|
1222
|
-
@vol = v
|
1223
|
-
# force to continue detaching volumes during termination.
|
1224
|
-
detach_volume_from_host rescue logger.error($!)
|
1225
|
-
}
|
1226
|
-
end
|
1227
|
-
end
|
1228
|
-
|
1229
|
-
end
|
1230
|
-
|
1231
|
-
job :attach, proc {
|
1232
|
-
@inst_id = request.args[0]
|
1233
|
-
@vol_id = request.args[1]
|
1234
|
-
|
1235
|
-
@job = Dcmgr::Stm::VolumeContext.new(@vol_id)
|
1236
|
-
@inst = rpc.request('hva-collector', 'get_instance', @inst_id)
|
1237
|
-
@vol = rpc.request('sta-collector', 'get_volume', @vol_id)
|
1238
|
-
logger.info("Attaching #{@vol_id}")
|
1239
|
-
@job.stm.state = @vol[:state].to_sym
|
1240
|
-
raise "Invalid volume state: #{@vol[:state]}" unless @vol[:state].to_s == 'available'
|
1241
|
-
|
1242
|
-
@job.stm.on_attach
|
1243
|
-
rpc.request('sta-collector', 'update_volume', {:volume_id=>@vol_id, :state=>:attaching})
|
1244
|
-
# check under until the dev file is created.
|
1245
|
-
# /dev/disk/by-path/ip-192.168.1.21:3260-iscsi-iqn.1986-03.com.sun:02:a1024afa-775b-65cf-b5b0-aa17f3476bfc-lun-0
|
1246
|
-
linux_dev_path = "/dev/disk/by-path/ip-%s-iscsi-%s-lun-%d" % ["#{@vol[:storage_pool][:ipaddr]}:3260",
|
1247
|
-
@vol[:transport_information][:iqn],
|
1248
|
-
@vol[:transport_information][:lun]]
|
1249
|
-
|
1250
|
-
# attach disk on host os
|
1251
|
-
attach_volume_to_host
|
1252
|
-
|
1253
|
-
logger.info("Attaching #{@vol_id} on #{@inst_id}")
|
1254
|
-
@job.stm.on_attach
|
1255
|
-
@job.on_attach
|
1256
|
-
|
1257
|
-
# attach disk on guest os
|
1258
|
-
|
1259
|
-
# pci_devddr consists of three hex numbers with colon separator.
|
1260
|
-
# dom <= 0xffff && bus <= 0xff && val <= 0x1f
|
1261
|
-
# see: qemu-0.12.5/hw/pci.c
|
1262
|
-
# /*
|
1263
|
-
# * Parse [[<domain>:]<bus>:]<slot>, return -1 on error
|
1264
|
-
# */
|
1265
|
-
# static int pci_parse_devaddr(const char *addr, int *domp, int *busp, unsigned *slotp)
|
1266
|
-
pci_devaddr = nil
|
1267
|
-
|
1268
|
-
sddev = File.expand_path(File.readlink(linux_dev_path), '/dev/disk/by-path')
|
1269
|
-
connect_monitor(@inst[:runtime_config][:telnet_port]) { |t|
|
1270
|
-
# success message:
|
1271
|
-
# OK domain 0, bus 0, slot 4, function 0
|
1272
|
-
# error message:
|
1273
|
-
# failed to add file=/dev/xxxx,if=virtio
|
1274
|
-
c = t.cmd("pci_add auto storage file=#{sddev},if=scsi")
|
1275
|
-
# Note: pci_parse_devaddr() called in "pci_add" uses strtoul()
|
1276
|
-
# with base 16 so that the input is expected in hex. however
|
1277
|
-
# at the result display, void pci_device_hot_add_print() uses
|
1278
|
-
# %d for showing bus and slot addresses. use hex to preserve
|
1279
|
-
# those values to keep consistent.
|
1280
|
-
if c =~ /\nOK domain ([0-9a-fA-F]+), bus ([0-9a-fA-F]+), slot ([0-9a-fA-F]+), function/m
|
1281
|
-
# numbers in OK result is decimal. convert them to hex.
|
1282
|
-
pci_devaddr = [$1, $2, $3].map{|i| i.to_i.to_s(16) }
|
1283
|
-
else
|
1284
|
-
raise "Error in qemu console: #{c}"
|
1285
|
-
end
|
1286
|
-
|
1287
|
-
# double check the pci address.
|
1288
|
-
c = t.cmd("info pci")
|
1289
|
-
|
1290
|
-
# static void pci_info_device(PCIBus *bus, PCIDevice *d)
|
1291
|
-
# called in "info pci" gets back PCI bus info with %d.
|
1292
|
-
if c.split(/\n/).grep(/^\s+Bus\s+#{pci_devaddr[1].to_i(16)}, device\s+#{pci_devaddr[2].to_i(16)}, function/).empty?
|
1293
|
-
raise "Could not find new disk device attached to qemu-kvm: #{pci_devaddr.join(':')}"
|
1294
|
-
end
|
1295
|
-
}
|
1296
|
-
|
1297
|
-
rpc.request('sta-collector', 'update_volume', @job.to_hash(:guest_device_name=>pci_devaddr.join(':')))
|
1298
|
-
event.publish('hva/volume_attached', :args=>[@inst_id, @vol_id])
|
1299
|
-
logger.info("Attached #{@vol_id} on #{@inst_id}")
|
1300
|
-
}
|
1301
|
-
|
1302
|
-
job :detach do
|
1303
|
-
@inst_id = request.args[0]
|
1304
|
-
@vol_id = request.args[1]
|
1305
|
-
|
1306
|
-
@job = Dcmgr::Stm::VolumeContext.new(@vol_id)
|
1307
|
-
@inst = rpc.request('hva-collector', 'get_instance', @inst_id)
|
1308
|
-
@vol = rpc.request('sta-collector', 'get_volume', @vol_id)
|
1309
|
-
logger.info("Detaching #{@vol_id} on #{@inst_id}")
|
1310
|
-
@job.stm.state = @vol[:state].to_sym
|
1311
|
-
raise "Invalid volume state: #{@vol[:state]}" unless @vol[:state].to_s == 'attached'
|
1312
|
-
|
1313
|
-
@job.stm.on_detach
|
1314
|
-
rpc.request('sta-collector', 'update_volume', @job.to_hash)
|
1315
|
-
# detach disk on guest os
|
1316
|
-
pci_devaddr = @vol[:guest_device_name]
|
1317
|
-
|
1318
|
-
connect_monitor(@inst[:runtime_config][:telnet_port]) { |t|
|
1319
|
-
t.cmd("pci_del #{pci_devaddr}")
|
1320
|
-
#
|
1321
|
-
# Bus 0, device 4, function 0:
|
1322
|
-
# SCSI controller: PCI device 1af4:1001
|
1323
|
-
# IRQ 0.
|
1324
|
-
# BAR0: I/O at 0x1000 [0x103f].
|
1325
|
-
# BAR1: 32 bit memory at 0x08000000 [0x08000fff].
|
1326
|
-
# id ""
|
1327
|
-
c = t.cmd("info pci")
|
1328
|
-
pci_devaddr = pci_devaddr.split(':')
|
1329
|
-
unless c.split(/\n/).grep(/\s+Bus\s+#{pci_devaddr[1].to_i(16)}, device\s+#{pci_devaddr[2].to_i(16)}, function/).empty?
|
1330
|
-
raise "Detached disk device still be attached in qemu-kvm: #{pci_devaddr.join(':')}"
|
1331
|
-
end
|
1332
|
-
}
|
1333
|
-
|
1334
|
-
detach_volume_from_host
|
1335
|
-
|
1336
|
-
@job.stm.on_detach
|
1337
|
-
@job.on_detach
|
1338
|
-
end
|
1339
|
-
|
1340
|
-
def rpc
|
1341
|
-
@rpc ||= Isono::NodeModules::RpcChannel.new(@node)
|
1342
|
-
end
|
1343
|
-
|
1344
|
-
def jobreq
|
1345
|
-
@jobreq ||= Isono::NodeModules::JobChannel.new(@node)
|
1346
|
-
end
|
1347
|
-
|
1348
|
-
def event
|
1349
|
-
@event ||= Isono::NodeModules::EventChannel.new(@node)
|
1350
|
-
end
|
1351
|
-
end
|
1352
|
-
|
1353
11
|
|
1354
12
|
manifest = DEFAULT_MANIFEST.dup
|
1355
13
|
manifest.instance_eval do
|
1356
14
|
node_name 'hva'
|
1357
15
|
node_instance_id "#{Isono::Util.default_gw_ipaddr}"
|
1358
16
|
load_module Isono::NodeModules::NodeHeartbeat
|
1359
|
-
load_module ServiceNetfilter
|
1360
|
-
load_module InstanceMonitor
|
17
|
+
load_module Dcmgr::NodeModules::ServiceNetfilter
|
18
|
+
load_module Dcmgr::NodeModules::InstanceMonitor
|
1361
19
|
|
1362
20
|
config do |c|
|
1363
21
|
c.vm_data_dir = '/var/lib/vm'
|
@@ -1366,12 +24,14 @@ manifest.instance_eval do
|
|
1366
24
|
c.hv_ifindex = 2 # ex. /sys/class/net/eth0/ifindex => 2
|
1367
25
|
c.bridge_prefix = 'br'
|
1368
26
|
c.bridge_novlan = 'br0'
|
27
|
+
c.verbose_netfilter = false
|
28
|
+
c.packet_drop_log = false
|
29
|
+
c.debug_iptables = false
|
1369
30
|
end
|
1370
31
|
|
1371
|
-
|
1372
|
-
load_config
|
32
|
+
load_config File.expand_path('config/hva.conf', app_root)
|
1373
33
|
end
|
1374
34
|
|
1375
35
|
start(manifest) do
|
1376
|
-
endpoint "kvm-handle.#{@node.node_id}",
|
36
|
+
endpoint "kvm-handle.#{@node.node_id}", Dcmgr::Rpc::HvaHandler
|
1377
37
|
end
|