wakame-vdc-agents 10.11.0
Sign up to get free protection for your applications and to get access to all the features.
- data/LICENSE +202 -0
- data/NOTICE +1 -0
- data/Rakefile +142 -0
- data/bin/hva +972 -0
- data/bin/nsa +147 -0
- data/bin/sta +182 -0
- data/config/hva.conf.example +10 -0
- data/config/initializers/isono.rb +43 -0
- data/config/initializers/passenger.rb +6 -0
- data/config/initializers/sequel.rb +21 -0
- data/config/nsa.conf.example +9 -0
- data/config/path_resolver.rb +12 -0
- data/lib/dcmgr.rb +115 -0
- data/lib/dcmgr/endpoints/core_api.rb +1004 -0
- data/lib/dcmgr/endpoints/core_api_mock.rb +816 -0
- data/lib/dcmgr/endpoints/errors.rb +55 -0
- data/lib/dcmgr/endpoints/metadata.rb +129 -0
- data/lib/dcmgr/logger.rb +44 -0
- data/lib/dcmgr/models/account.rb +104 -0
- data/lib/dcmgr/models/account_resource.rb +16 -0
- data/lib/dcmgr/models/base.rb +69 -0
- data/lib/dcmgr/models/base_new.rb +371 -0
- data/lib/dcmgr/models/frontend_system.rb +38 -0
- data/lib/dcmgr/models/host_pool.rb +102 -0
- data/lib/dcmgr/models/image.rb +46 -0
- data/lib/dcmgr/models/instance.rb +255 -0
- data/lib/dcmgr/models/instance_netfilter_group.rb +16 -0
- data/lib/dcmgr/models/instance_nic.rb +68 -0
- data/lib/dcmgr/models/instance_spec.rb +21 -0
- data/lib/dcmgr/models/ip_lease.rb +42 -0
- data/lib/dcmgr/models/netfilter_group.rb +88 -0
- data/lib/dcmgr/models/netfilter_rule.rb +21 -0
- data/lib/dcmgr/models/network.rb +32 -0
- data/lib/dcmgr/models/physical_host.rb +67 -0
- data/lib/dcmgr/models/request_log.rb +25 -0
- data/lib/dcmgr/models/ssh_key_pair.rb +55 -0
- data/lib/dcmgr/models/storage_pool.rb +134 -0
- data/lib/dcmgr/models/tag.rb +126 -0
- data/lib/dcmgr/models/tag_mapping.rb +28 -0
- data/lib/dcmgr/models/volume.rb +130 -0
- data/lib/dcmgr/models/volume_snapshot.rb +47 -0
- data/lib/dcmgr/node_modules/hva_collector.rb +134 -0
- data/lib/dcmgr/node_modules/sta_collector.rb +72 -0
- data/lib/dcmgr/scheduler.rb +12 -0
- data/lib/dcmgr/scheduler/find_last.rb +16 -0
- data/lib/dcmgr/scheduler/find_random.rb +16 -0
- data/lib/dcmgr/stm/instance.rb +25 -0
- data/lib/dcmgr/stm/snapshot_context.rb +33 -0
- data/lib/dcmgr/stm/volume_context.rb +65 -0
- data/lib/dcmgr/web/base.rb +21 -0
- data/lib/sinatra/accept_media_types.rb +128 -0
- data/lib/sinatra/lazy_auth.rb +56 -0
- data/lib/sinatra/rabbit.rb +278 -0
- data/lib/sinatra/respond_to.rb +272 -0
- data/lib/sinatra/sequel_transaction.rb +27 -0
- data/lib/sinatra/static_assets.rb +83 -0
- data/lib/sinatra/url_for.rb +44 -0
- metadata +270 -0
data/LICENSE
ADDED
@@ -0,0 +1,202 @@
|
|
1
|
+
|
2
|
+
Apache License
|
3
|
+
Version 2.0, January 2004
|
4
|
+
http://www.apache.org/licenses/
|
5
|
+
|
6
|
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
7
|
+
|
8
|
+
1. Definitions.
|
9
|
+
|
10
|
+
"License" shall mean the terms and conditions for use, reproduction,
|
11
|
+
and distribution as defined by Sections 1 through 9 of this document.
|
12
|
+
|
13
|
+
"Licensor" shall mean the copyright owner or entity authorized by
|
14
|
+
the copyright owner that is granting the License.
|
15
|
+
|
16
|
+
"Legal Entity" shall mean the union of the acting entity and all
|
17
|
+
other entities that control, are controlled by, or are under common
|
18
|
+
control with that entity. For the purposes of this definition,
|
19
|
+
"control" means (i) the power, direct or indirect, to cause the
|
20
|
+
direction or management of such entity, whether by contract or
|
21
|
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
22
|
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
23
|
+
|
24
|
+
"You" (or "Your") shall mean an individual or Legal Entity
|
25
|
+
exercising permissions granted by this License.
|
26
|
+
|
27
|
+
"Source" form shall mean the preferred form for making modifications,
|
28
|
+
including but not limited to software source code, documentation
|
29
|
+
source, and configuration files.
|
30
|
+
|
31
|
+
"Object" form shall mean any form resulting from mechanical
|
32
|
+
transformation or translation of a Source form, including but
|
33
|
+
not limited to compiled object code, generated documentation,
|
34
|
+
and conversions to other media types.
|
35
|
+
|
36
|
+
"Work" shall mean the work of authorship, whether in Source or
|
37
|
+
Object form, made available under the License, as indicated by a
|
38
|
+
copyright notice that is included in or attached to the work
|
39
|
+
(an example is provided in the Appendix below).
|
40
|
+
|
41
|
+
"Derivative Works" shall mean any work, whether in Source or Object
|
42
|
+
form, that is based on (or derived from) the Work and for which the
|
43
|
+
editorial revisions, annotations, elaborations, or other modifications
|
44
|
+
represent, as a whole, an original work of authorship. For the purposes
|
45
|
+
of this License, Derivative Works shall not include works that remain
|
46
|
+
separable from, or merely link (or bind by name) to the interfaces of,
|
47
|
+
the Work and Derivative Works thereof.
|
48
|
+
|
49
|
+
"Contribution" shall mean any work of authorship, including
|
50
|
+
the original version of the Work and any modifications or additions
|
51
|
+
to that Work or Derivative Works thereof, that is intentionally
|
52
|
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
53
|
+
or by an individual or Legal Entity authorized to submit on behalf of
|
54
|
+
the copyright owner. For the purposes of this definition, "submitted"
|
55
|
+
means any form of electronic, verbal, or written communication sent
|
56
|
+
to the Licensor or its representatives, including but not limited to
|
57
|
+
communication on electronic mailing lists, source code control systems,
|
58
|
+
and issue tracking systems that are managed by, or on behalf of, the
|
59
|
+
Licensor for the purpose of discussing and improving the Work, but
|
60
|
+
excluding communication that is conspicuously marked or otherwise
|
61
|
+
designated in writing by the copyright owner as "Not a Contribution."
|
62
|
+
|
63
|
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
64
|
+
on behalf of whom a Contribution has been received by Licensor and
|
65
|
+
subsequently incorporated within the Work.
|
66
|
+
|
67
|
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
68
|
+
this License, each Contributor hereby grants to You a perpetual,
|
69
|
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
70
|
+
copyright license to reproduce, prepare Derivative Works of,
|
71
|
+
publicly display, publicly perform, sublicense, and distribute the
|
72
|
+
Work and such Derivative Works in Source or Object form.
|
73
|
+
|
74
|
+
3. Grant of Patent License. Subject to the terms and conditions of
|
75
|
+
this License, each Contributor hereby grants to You a perpetual,
|
76
|
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
77
|
+
(except as stated in this section) patent license to make, have made,
|
78
|
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
79
|
+
where such license applies only to those patent claims licensable
|
80
|
+
by such Contributor that are necessarily infringed by their
|
81
|
+
Contribution(s) alone or by combination of their Contribution(s)
|
82
|
+
with the Work to which such Contribution(s) was submitted. If You
|
83
|
+
institute patent litigation against any entity (including a
|
84
|
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
85
|
+
or a Contribution incorporated within the Work constitutes direct
|
86
|
+
or contributory patent infringement, then any patent licenses
|
87
|
+
granted to You under this License for that Work shall terminate
|
88
|
+
as of the date such litigation is filed.
|
89
|
+
|
90
|
+
4. Redistribution. You may reproduce and distribute copies of the
|
91
|
+
Work or Derivative Works thereof in any medium, with or without
|
92
|
+
modifications, and in Source or Object form, provided that You
|
93
|
+
meet the following conditions:
|
94
|
+
|
95
|
+
(a) You must give any other recipients of the Work or
|
96
|
+
Derivative Works a copy of this License; and
|
97
|
+
|
98
|
+
(b) You must cause any modified files to carry prominent notices
|
99
|
+
stating that You changed the files; and
|
100
|
+
|
101
|
+
(c) You must retain, in the Source form of any Derivative Works
|
102
|
+
that You distribute, all copyright, patent, trademark, and
|
103
|
+
attribution notices from the Source form of the Work,
|
104
|
+
excluding those notices that do not pertain to any part of
|
105
|
+
the Derivative Works; and
|
106
|
+
|
107
|
+
(d) If the Work includes a "NOTICE" text file as part of its
|
108
|
+
distribution, then any Derivative Works that You distribute must
|
109
|
+
include a readable copy of the attribution notices contained
|
110
|
+
within such NOTICE file, excluding those notices that do not
|
111
|
+
pertain to any part of the Derivative Works, in at least one
|
112
|
+
of the following places: within a NOTICE text file distributed
|
113
|
+
as part of the Derivative Works; within the Source form or
|
114
|
+
documentation, if provided along with the Derivative Works; or,
|
115
|
+
within a display generated by the Derivative Works, if and
|
116
|
+
wherever such third-party notices normally appear. The contents
|
117
|
+
of the NOTICE file are for informational purposes only and
|
118
|
+
do not modify the License. You may add Your own attribution
|
119
|
+
notices within Derivative Works that You distribute, alongside
|
120
|
+
or as an addendum to the NOTICE text from the Work, provided
|
121
|
+
that such additional attribution notices cannot be construed
|
122
|
+
as modifying the License.
|
123
|
+
|
124
|
+
You may add Your own copyright statement to Your modifications and
|
125
|
+
may provide additional or different license terms and conditions
|
126
|
+
for use, reproduction, or distribution of Your modifications, or
|
127
|
+
for any such Derivative Works as a whole, provided Your use,
|
128
|
+
reproduction, and distribution of the Work otherwise complies with
|
129
|
+
the conditions stated in this License.
|
130
|
+
|
131
|
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
132
|
+
any Contribution intentionally submitted for inclusion in the Work
|
133
|
+
by You to the Licensor shall be under the terms and conditions of
|
134
|
+
this License, without any additional terms or conditions.
|
135
|
+
Notwithstanding the above, nothing herein shall supersede or modify
|
136
|
+
the terms of any separate license agreement you may have executed
|
137
|
+
with Licensor regarding such Contributions.
|
138
|
+
|
139
|
+
6. Trademarks. This License does not grant permission to use the trade
|
140
|
+
names, trademarks, service marks, or product names of the Licensor,
|
141
|
+
except as required for reasonable and customary use in describing the
|
142
|
+
origin of the Work and reproducing the content of the NOTICE file.
|
143
|
+
|
144
|
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
145
|
+
agreed to in writing, Licensor provides the Work (and each
|
146
|
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
147
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
148
|
+
implied, including, without limitation, any warranties or conditions
|
149
|
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
150
|
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
151
|
+
appropriateness of using or redistributing the Work and assume any
|
152
|
+
risks associated with Your exercise of permissions under this License.
|
153
|
+
|
154
|
+
8. Limitation of Liability. In no event and under no legal theory,
|
155
|
+
whether in tort (including negligence), contract, or otherwise,
|
156
|
+
unless required by applicable law (such as deliberate and grossly
|
157
|
+
negligent acts) or agreed to in writing, shall any Contributor be
|
158
|
+
liable to You for damages, including any direct, indirect, special,
|
159
|
+
incidental, or consequential damages of any character arising as a
|
160
|
+
result of this License or out of the use or inability to use the
|
161
|
+
Work (including but not limited to damages for loss of goodwill,
|
162
|
+
work stoppage, computer failure or malfunction, or any and all
|
163
|
+
other commercial damages or losses), even if such Contributor
|
164
|
+
has been advised of the possibility of such damages.
|
165
|
+
|
166
|
+
9. Accepting Warranty or Additional Liability. While redistributing
|
167
|
+
the Work or Derivative Works thereof, You may choose to offer,
|
168
|
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
169
|
+
or other liability obligations and/or rights consistent with this
|
170
|
+
License. However, in accepting such obligations, You may act only
|
171
|
+
on Your own behalf and on Your sole responsibility, not on behalf
|
172
|
+
of any other Contributor, and only if You agree to indemnify,
|
173
|
+
defend, and hold each Contributor harmless for any liability
|
174
|
+
incurred by, or claims asserted against, such Contributor by reason
|
175
|
+
of your accepting any such warranty or additional liability.
|
176
|
+
|
177
|
+
END OF TERMS AND CONDITIONS
|
178
|
+
|
179
|
+
APPENDIX: How to apply the Apache License to your work.
|
180
|
+
|
181
|
+
To apply the Apache License to your work, attach the following
|
182
|
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
183
|
+
replaced with your own identifying information. (Don't include
|
184
|
+
the brackets!) The text should be enclosed in the appropriate
|
185
|
+
comment syntax for the file format. We also recommend that a
|
186
|
+
file or class name and description of purpose be included on the
|
187
|
+
same "printed page" as the copyright notice for easier
|
188
|
+
identification within third-party archives.
|
189
|
+
|
190
|
+
Copyright [yyyy] [name of copyright owner]
|
191
|
+
|
192
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
193
|
+
you may not use this file except in compliance with the License.
|
194
|
+
You may obtain a copy of the License at
|
195
|
+
|
196
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
197
|
+
|
198
|
+
Unless required by applicable law or agreed to in writing, software
|
199
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
200
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
201
|
+
See the License for the specific language governing permissions and
|
202
|
+
limitations under the License.
|
data/NOTICE
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
Copyright (c) 2009-2010 axsh Ltd.
|
data/Rakefile
ADDED
@@ -0,0 +1,142 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
|
3
|
+
begin
|
4
|
+
require 'rubygems'
|
5
|
+
require 'bundler/setup'
|
6
|
+
rescue LoadError
|
7
|
+
end
|
8
|
+
$:.unshift 'lib'
|
9
|
+
|
10
|
+
require 'rake/clean'
|
11
|
+
require 'dcmgr'
|
12
|
+
|
13
|
+
task :environment do
|
14
|
+
Dcmgr.configure 'dcmgr.conf'
|
15
|
+
Dcmgr.run_initializers
|
16
|
+
end
|
17
|
+
|
18
|
+
namespace :db do
|
19
|
+
desc 'Create all database tables'
|
20
|
+
task :init => [ :environment ] do
|
21
|
+
::Kernel.load(File.expand_path('../conf/initializers/sequel.rb', __FILE__))
|
22
|
+
require 'isono'
|
23
|
+
|
24
|
+
Dcmgr::Models::CREATE_TABLE_CLASSES.each { |c|
|
25
|
+
Dcmgr::Models.const_get(c).create_table!
|
26
|
+
}
|
27
|
+
Isono::Models::NodeState.create_table!
|
28
|
+
Isono::Models::JobState.create_table!
|
29
|
+
|
30
|
+
Dcmgr::Models::CREATE_TABLE_CLASSES.each { |c|
|
31
|
+
Dcmgr::Models.const_get(c).install_data
|
32
|
+
}
|
33
|
+
end
|
34
|
+
|
35
|
+
desc 'Drop all database tables'
|
36
|
+
task :drop => [ :environment ] do
|
37
|
+
require 'sequel'
|
38
|
+
require 'isono'
|
39
|
+
|
40
|
+
Dcmgr::Models::CREATE_TABLE_CLASSES.each { |c|
|
41
|
+
Dcmgr::Models.const_get(c).drop_table
|
42
|
+
}
|
43
|
+
Isono::Models::NodeState.drop_table
|
44
|
+
Isono::Models::JobState.drop_table
|
45
|
+
end
|
46
|
+
end
|
47
|
+
|
48
|
+
desc 'run bundle command to install vendored gems.'
|
49
|
+
task :bundle do
|
50
|
+
sh <<_ENDCMD
|
51
|
+
mkdir .bundle
|
52
|
+
cat <<END_ > .bundle/config
|
53
|
+
---
|
54
|
+
BUNDLE_DISABLE_SHARED_GEMS: "1"
|
55
|
+
BUNDLE_PATH: vendor/bundle
|
56
|
+
END_
|
57
|
+
_ENDCMD
|
58
|
+
sh "bundle install"
|
59
|
+
end
|
60
|
+
|
61
|
+
desc 'build gem packages'
|
62
|
+
task :gem do
|
63
|
+
require 'rubygems'
|
64
|
+
require 'rake/gempackagetask'
|
65
|
+
|
66
|
+
spec = Gem::Specification.new do |s|
|
67
|
+
s.platform = Gem::Platform::RUBY
|
68
|
+
s.version = Dcmgr::VERSION
|
69
|
+
s.authors = ['axsh Ltd.']
|
70
|
+
s.email = ['dev@axsh.net']
|
71
|
+
s.homepage = 'http://wakame.jp/'
|
72
|
+
s.name = 'wakame-vdc-dcmgr'
|
73
|
+
s.summary = "Datacenter management toolkit for IaaS Cloud: datacenter manager and support modules"
|
74
|
+
s.description = ''
|
75
|
+
s.require_path = 'lib'
|
76
|
+
s.required_ruby_version = '>= 1.8.7'
|
77
|
+
|
78
|
+
s.files = Dir['config/**/*.rb', 'lib/**/*.rb', 'web/api/public/**/*.*',
|
79
|
+
'web/metadata/public/**/*.*'] +
|
80
|
+
%w(Rakefile LICENSE NOTICE
|
81
|
+
web/api/config.ru web/metadata/config.ru config/dcmgr.conf.example)
|
82
|
+
|
83
|
+
s.bindir='bin'
|
84
|
+
s.executables = %w(collector)
|
85
|
+
|
86
|
+
s.add_dependency "isono", ">= 0.1.0", "< 0.2"
|
87
|
+
s.add_dependency "eventmachine", "0.12.10"
|
88
|
+
s.add_dependency "log4r"
|
89
|
+
s.add_dependency "extlib", '0.9.15'
|
90
|
+
s.add_dependency "configuration"
|
91
|
+
s.add_dependency "statemachine", '1.1.1'
|
92
|
+
s.add_dependency "ruby-hmac"
|
93
|
+
s.add_dependency "ipaddress", '0.7.0'
|
94
|
+
s.add_dependency "rack", ">= 1.2.1"
|
95
|
+
s.add_dependency "sinatra", "1.0"
|
96
|
+
s.add_dependency "json", ">= 1.2.0"
|
97
|
+
s.add_dependency "sequel", "3.16.0"
|
98
|
+
s.add_dependency "mysql", ">= 2.8.1"
|
99
|
+
|
100
|
+
s.add_development_dependency 'bacon'
|
101
|
+
s.add_development_dependency 'rake'
|
102
|
+
end
|
103
|
+
|
104
|
+
File.open("#{spec.name}.gemspec", 'w'){|f| f.write(spec.to_ruby) }
|
105
|
+
sh "gem build #{spec.name}.gemspec"
|
106
|
+
|
107
|
+
spec = Gem::Specification.new do |s|
|
108
|
+
s.platform = Gem::Platform::RUBY
|
109
|
+
s.version = Dcmgr::VERSION
|
110
|
+
s.authors = ['axsh Ltd.']
|
111
|
+
s.email = ['dev@axsh.net']
|
112
|
+
s.homepage = 'http://wakame.jp/'
|
113
|
+
s.name = 'wakame-vdc-agents'
|
114
|
+
s.summary = "Datacenter management toolkit for IaaS Cloud: agent modules"
|
115
|
+
s.description = ''
|
116
|
+
s.require_path = 'lib'
|
117
|
+
s.required_ruby_version = '>= 1.8.7'
|
118
|
+
|
119
|
+
s.files = Dir['config/**/*.rb', 'lib/**/*.rb'] +
|
120
|
+
%w(Rakefile LICENSE NOTICE
|
121
|
+
config/hva.conf.example config/nsa.conf.example)
|
122
|
+
|
123
|
+
s.bindir='bin'
|
124
|
+
s.executables = %w(hva sta nsa)
|
125
|
+
|
126
|
+
s.add_dependency "isono", ">= 0.1.0", "< 0.2"
|
127
|
+
s.add_dependency "eventmachine", "0.12.10"
|
128
|
+
s.add_dependency "log4r"
|
129
|
+
s.add_dependency "extlib", '0.9.15'
|
130
|
+
s.add_dependency "configuration"
|
131
|
+
s.add_dependency "statemachine", '1.1.1'
|
132
|
+
s.add_dependency "ruby-hmac"
|
133
|
+
s.add_dependency "ipaddress", '0.7.0'
|
134
|
+
s.add_dependency "open4"
|
135
|
+
|
136
|
+
s.add_development_dependency 'bacon'
|
137
|
+
s.add_development_dependency 'rake'
|
138
|
+
end
|
139
|
+
|
140
|
+
File.open("#{spec.name}.gemspec", 'w'){|f| f.write(spec.to_ruby) }
|
141
|
+
sh "gem build #{spec.name}.gemspec"
|
142
|
+
end
|
data/bin/hva
ADDED
@@ -0,0 +1,972 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
|
4
|
+
begin
|
5
|
+
require 'rubygems'
|
6
|
+
require 'bundler'
|
7
|
+
Bundler.setup(:default)
|
8
|
+
rescue Exception
|
9
|
+
end
|
10
|
+
|
11
|
+
require File.expand_path('../../config/path_resolver', __FILE__)
|
12
|
+
|
13
|
+
include Isono::Runner::RpcServer
|
14
|
+
require 'fileutils'
|
15
|
+
|
16
|
+
class ServiceNetfilter < Isono::NodeModules::Base
|
17
|
+
include Dcmgr::Logger
|
18
|
+
|
19
|
+
initialize_hook do
|
20
|
+
@worker_thread = Isono::ThreadPool.new(1)
|
21
|
+
|
22
|
+
@worker_thread.pass {
|
23
|
+
myinstance.init_netfilter
|
24
|
+
}
|
25
|
+
|
26
|
+
event = Isono::NodeModules::EventChannel.new(node)
|
27
|
+
|
28
|
+
event.subscribe('hva/instance_started', '#') do |args|
|
29
|
+
@worker_thread.pass {
|
30
|
+
logger.info("refresh on instance_started: #{args.inspect}")
|
31
|
+
inst_id = args[0]
|
32
|
+
logger.info("refresh_netfilter_by_friend_instance_id: #{inst_id}")
|
33
|
+
myinstance.refresh_netfilter_by_friend_instance_id(inst_id)
|
34
|
+
}
|
35
|
+
end
|
36
|
+
|
37
|
+
event.subscribe('hva/instance_terminated', '#') do |args|
|
38
|
+
@worker_thread.pass {
|
39
|
+
logger.info("refresh on instance_terminated: #{args.inspect}")
|
40
|
+
inst_id = args[0]
|
41
|
+
logger.info("refresh_netfilter_by_friend_instance_id: #{inst_id}")
|
42
|
+
myinstance.refresh_netfilter_by_friend_instance_id(inst_id)
|
43
|
+
}
|
44
|
+
end
|
45
|
+
|
46
|
+
event.subscribe('hva/netfilter_updated', '#') do |args|
|
47
|
+
@worker_thread.pass {
|
48
|
+
logger.info("refresh on netfilter_updated: #{args.inspect}")
|
49
|
+
netfilter_group_id = args[0]
|
50
|
+
myinstance.refresh_netfilter_by_joined_netfilter_group_id(netfilter_group_id)
|
51
|
+
}
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
55
|
+
def init_netfilter
|
56
|
+
begin
|
57
|
+
inst_maps = rpc.request('hva-collector', 'get_alive_instances', node.node_id)
|
58
|
+
|
59
|
+
init_ebtables(inst_maps) if @node.manifest.config.enable_ebtables
|
60
|
+
init_iptables(inst_maps) if @node.manifest.config.enable_iptables
|
61
|
+
logger.info("initialize netfilter")
|
62
|
+
rescue Exception => e
|
63
|
+
p e
|
64
|
+
end
|
65
|
+
end
|
66
|
+
|
67
|
+
# from event_subscriber
|
68
|
+
def refresh_netfilter_by_friend_instance_id(inst_id)
|
69
|
+
raise "UnknownInstanceID" if inst_id.nil?
|
70
|
+
|
71
|
+
begin
|
72
|
+
inst_map = rpc.request('hva-collector', 'get_instance', inst_id)
|
73
|
+
ng = rpc.request('hva-collector', 'get_netfilter_groups_of_instance', inst_map[:uuid])
|
74
|
+
|
75
|
+
inst_maps = ng.map { |g|
|
76
|
+
rpc.request('hva-collector', 'get_instances_of_netfilter_group', g[:id])
|
77
|
+
}
|
78
|
+
|
79
|
+
if inst_maps.size > 0
|
80
|
+
inst_maps.flatten.uniq.each { |inst_map|
|
81
|
+
unless inst_map.nil?
|
82
|
+
refresh_ebtables(inst_map) if @node.manifest.config.enable_ebtables
|
83
|
+
refresh_iptables(inst_map) if @node.manifest.config.enable_iptables
|
84
|
+
end
|
85
|
+
}
|
86
|
+
end
|
87
|
+
rescue Exception => e
|
88
|
+
p e
|
89
|
+
end
|
90
|
+
end
|
91
|
+
|
92
|
+
# from event_subscriber
|
93
|
+
def refresh_netfilter_by_joined_netfilter_group_id(netfilter_group_id)
|
94
|
+
raise "UnknownNetfilterGroupID" if netfilter_group_id.nil?
|
95
|
+
|
96
|
+
begin
|
97
|
+
inst_maps = rpc.request('hva-collector', 'get_instances_of_netfilter_group', netfilter_group_id)
|
98
|
+
inst_maps.each { |inst_map|
|
99
|
+
unless inst_map.nil?
|
100
|
+
refresh_ebtables(inst_map) if @node.manifest.config.enable_ebtables
|
101
|
+
refresh_iptables(inst_map) if @node.manifest.config.enable_iptables
|
102
|
+
end
|
103
|
+
}
|
104
|
+
rescue Exception => e
|
105
|
+
p e
|
106
|
+
end
|
107
|
+
end
|
108
|
+
|
109
|
+
def init_ebtables(inst_maps = [])
|
110
|
+
cmd = "sudo ebtables --init-table"
|
111
|
+
puts cmd
|
112
|
+
system(cmd)
|
113
|
+
|
114
|
+
inst_maps.each { |inst_map|
|
115
|
+
refresh_ebtables(inst_map)
|
116
|
+
}
|
117
|
+
end
|
118
|
+
|
119
|
+
def init_iptables(inst_maps = [])
|
120
|
+
[ 'nat', 'filter' ].each { |table|
|
121
|
+
[ 'F', 'Z', 'X' ].each { |xcmd|
|
122
|
+
cmd = "sudo iptables -t #{table} -#{xcmd}"
|
123
|
+
puts cmd
|
124
|
+
system(cmd)
|
125
|
+
}
|
126
|
+
}
|
127
|
+
|
128
|
+
inst_maps.each { |inst_map|
|
129
|
+
refresh_iptables(inst_map)
|
130
|
+
}
|
131
|
+
end
|
132
|
+
|
133
|
+
def valid_vif?(vif)
|
134
|
+
cmd = "ifconfig #{vif} >/dev/null 2>&1"
|
135
|
+
system(cmd)
|
136
|
+
|
137
|
+
if $?.exitstatus == 0
|
138
|
+
true
|
139
|
+
else
|
140
|
+
logger.warn("#{vif}: error fetching interface information: Device not found")
|
141
|
+
false
|
142
|
+
end
|
143
|
+
end
|
144
|
+
|
145
|
+
def refresh_ebtables(inst_map = {})
|
146
|
+
logger.debug("refresh_ebtables: #{inst_map[:uuid]} ...")
|
147
|
+
|
148
|
+
# Does the hva have instance?
|
149
|
+
unless inst_map[:host_pool][:node_id] == node.node_id
|
150
|
+
logger.warn("no match for the instance: #{inst_map[:uuid]}")
|
151
|
+
return
|
152
|
+
end
|
153
|
+
|
154
|
+
network_map = rpc.request('hva-collector', 'get_network', inst_map[:host_pool][:network_id])
|
155
|
+
raise "UnknownNetworkId" if network_map.nil?
|
156
|
+
|
157
|
+
vif = inst_map[:instance_nics].first[:vif]
|
158
|
+
vif_mac = inst_map[:instance_nics].first[:mac_addr].unpack('A2'*6).join(':')
|
159
|
+
|
160
|
+
flush_ebtables(inst_map)
|
161
|
+
|
162
|
+
# Does host have vif?
|
163
|
+
unless valid_vif?(vif)
|
164
|
+
return
|
165
|
+
end
|
166
|
+
|
167
|
+
# group node IPv4 addresses.
|
168
|
+
ipv4s = rpc.request('hva-collector', 'get_group_instance_ipv4s', inst_map[:uuid])
|
169
|
+
|
170
|
+
# xtables commands
|
171
|
+
cmds = []
|
172
|
+
|
173
|
+
# support IP protocol
|
174
|
+
protocol_maps = {
|
175
|
+
'ip4' => 'ip4',
|
176
|
+
'arp' => 'arp',
|
177
|
+
#ip6' => 'ip6',
|
178
|
+
#rarp' => '0x8035',
|
179
|
+
}
|
180
|
+
|
181
|
+
# make chain names.
|
182
|
+
chains = []
|
183
|
+
chains << "s_#{vif}"
|
184
|
+
chains << "d_#{vif}"
|
185
|
+
chains << "s_#{vif}_d_host"
|
186
|
+
protocol_maps.each { |k,v|
|
187
|
+
chains << "s_#{vif}_#{k}"
|
188
|
+
chains << "d_#{vif}_#{k}"
|
189
|
+
chains << "s_#{vif}_d_host_#{k}"
|
190
|
+
}
|
191
|
+
|
192
|
+
# create user defined chains.
|
193
|
+
[ 'N' ].each { |xcmd|
|
194
|
+
chains.each { |chain|
|
195
|
+
cmds << "sudo ebtables -#{xcmd} #{chain}"
|
196
|
+
}
|
197
|
+
}
|
198
|
+
|
199
|
+
# jumt to user defined chains
|
200
|
+
cmds << "sudo ebtables -A FORWARD -i #{vif} -j s_#{vif}"
|
201
|
+
cmds << "sudo ebtables -A FORWARD -o #{vif} -j d_#{vif}"
|
202
|
+
cmds << "sudo ebtables -A INPUT -i #{vif} -j s_#{vif}_d_host"
|
203
|
+
|
204
|
+
# IP protocol routing
|
205
|
+
protocol_maps.each { |k,v|
|
206
|
+
cmds << "sudo ebtables -A s_#{vif} -p #{v} -j s_#{vif}_#{k}"
|
207
|
+
cmds << "sudo ebtables -A d_#{vif} -p #{v} -j d_#{vif}_#{k}"
|
208
|
+
cmds << "sudo ebtables -A s_#{vif}_d_host -p #{v} -j s_#{vif}_d_host_#{k}"
|
209
|
+
}
|
210
|
+
|
211
|
+
# default drop
|
212
|
+
cmds << "sudo ebtables -A s_#{vif} --log-level warning --log-ip --log-arp --log-prefix 's_#{vif} DROP:' -j CONTINUE"
|
213
|
+
cmds << "sudo ebtables -A s_#{vif}_d_host --log-level warning --log-ip --log-arp --log-prefix 's_#{vif}_d_host DROP:' -j CONTINUE"
|
214
|
+
cmds << "sudo ebtables -A s_#{vif} -j DROP"
|
215
|
+
cmds << "sudo ebtables -A s_#{vif}_d_host -j DROP"
|
216
|
+
|
217
|
+
# anti spoof
|
218
|
+
#cmds << "sudo ebtables -A s_#{vif}_arp --protocol arp --arp-mac-src ! #{vif_mac} -j DROP"
|
219
|
+
#cmds << "sudo ebtables -A d_#{vif}_arp --protocol arp --arp-mac-dst ! #{vif_mac} -j DROP"
|
220
|
+
|
221
|
+
# group nodes.
|
222
|
+
ipv4s << network_map[:ipv4_gw]
|
223
|
+
ipv4s << network_map[:dns_server]
|
224
|
+
ipv4s << network_map[:dhcp_server]
|
225
|
+
ipv4s.uniq.each do |ipv4|
|
226
|
+
cmds << "sudo ebtables -A d_#{vif}_arp --protocol arp --arp-ip-src #{ipv4} -j ACCEPT"
|
227
|
+
end
|
228
|
+
|
229
|
+
# deny,allow
|
230
|
+
cmds << "sudo ebtables -A d_#{vif}_arp --log-level warning --log-ip --log-arp --log-prefix 's_#{vif}_arp DROP:' -j CONTINUE"
|
231
|
+
cmds << "sudo ebtables -A s_#{vif}_d_host_arp --log-level warning --log-ip --log-arp --log-prefix 's_#{vif}_d_host_arp DROP:' -j CONTINUE"
|
232
|
+
cmds << "sudo ebtables -A d_#{vif}_arp -j DROP"
|
233
|
+
cmds << "sudo ebtables -A s_#{vif}_d_host_arp -j DROP"
|
234
|
+
|
235
|
+
cmds.uniq! if cmds.size > 0
|
236
|
+
cmds.compact.each { |cmd|
|
237
|
+
puts cmd
|
238
|
+
system(cmd)
|
239
|
+
}
|
240
|
+
|
241
|
+
logger.debug("refresh_ebtables: #{inst_map[:uuid]} done.")
|
242
|
+
end
|
243
|
+
|
244
|
+
def refresh_iptables(inst_map = {})
|
245
|
+
logger.debug("refresh_iptables: #{inst_map[:uuid]} ...")
|
246
|
+
|
247
|
+
# Does the hva have instance?
|
248
|
+
unless inst_map[:host_pool][:node_id] == node.node_id
|
249
|
+
logger.warn "no match for the instance: #{inst_map[:uuid]}"
|
250
|
+
return
|
251
|
+
end
|
252
|
+
|
253
|
+
network_map = rpc.request('hva-collector', 'get_network', inst_map[:host_pool][:network_id])
|
254
|
+
raise "UnknownNetworkId" if network_map.nil?
|
255
|
+
|
256
|
+
vif = inst_map[:instance_nics].first[:vif]
|
257
|
+
vif_mac = inst_map[:instance_nics].first[:mac_addr].unpack('A2'*6).join(':')
|
258
|
+
|
259
|
+
flush_iptables(inst_map)
|
260
|
+
|
261
|
+
# Does host have vif?
|
262
|
+
unless valid_vif?(vif)
|
263
|
+
return
|
264
|
+
end
|
265
|
+
|
266
|
+
# group node IPv4 addresses.
|
267
|
+
ipv4s = rpc.request('hva-collector', 'get_group_instance_ipv4s', inst_map[:uuid])
|
268
|
+
|
269
|
+
ng = rpc.request('hva-collector', 'get_netfilter_groups_of_instance', inst_map[:uuid])
|
270
|
+
rules = ng.map { |g|
|
271
|
+
g[:rules].map { |rule| rule[:permission] }
|
272
|
+
}
|
273
|
+
rules.flatten! if rules.size > 0
|
274
|
+
|
275
|
+
# xtables commands
|
276
|
+
cmds = []
|
277
|
+
|
278
|
+
# support IP protocol
|
279
|
+
protocol_maps = {
|
280
|
+
'tcp' => 'tcp',
|
281
|
+
'udp' => 'udp',
|
282
|
+
'icmp' => 'icmp',
|
283
|
+
}
|
284
|
+
|
285
|
+
# make chain names.
|
286
|
+
chains = []
|
287
|
+
protocol_maps.each { |k,v|
|
288
|
+
chains << "s_#{vif}_#{k}"
|
289
|
+
chains << "d_#{vif}_#{k}"
|
290
|
+
}
|
291
|
+
chains << "s_#{vif}"
|
292
|
+
chains << "d_#{vif}"
|
293
|
+
|
294
|
+
# metadata-server
|
295
|
+
[ 'A' ].each { |xcmd|
|
296
|
+
system("sudo iptables -t nat -#{xcmd} PREROUTING -m physdev --physdev-is-bridged --physdev-in #{vif} -s 0.0.0.0 -d 169.254.169.254 -p tcp --dport 80 -j DNAT --to-destination #{network_map[:metadata_server]}:80")
|
297
|
+
}
|
298
|
+
|
299
|
+
# create user defined chains.
|
300
|
+
[ 'N' ].each { |xcmd|
|
301
|
+
chains.each { |chain|
|
302
|
+
cmds << "sudo iptables -#{xcmd} #{chain}"
|
303
|
+
|
304
|
+
# logger & drop
|
305
|
+
cmds << "sudo iptables -N #{chain}_drop"
|
306
|
+
cmds << "sudo iptables -A #{chain}_drop -j LOG --log-level 4 --log-prefix '#{chain} DROP:'"
|
307
|
+
cmds << "sudo iptables -A #{chain}_drop -j DROP"
|
308
|
+
}
|
309
|
+
}
|
310
|
+
|
311
|
+
# group nodes
|
312
|
+
ipv4s << network_map[:ipv4_gw]
|
313
|
+
ipv4s.each { |addr|
|
314
|
+
cmds << "sudo iptables -A d_#{vif} -s #{addr} -j ACCEPT"
|
315
|
+
}
|
316
|
+
|
317
|
+
# IP protocol routing
|
318
|
+
[ 's', 'd' ].each do |bound|
|
319
|
+
protocol_maps.each { |k,v|
|
320
|
+
cmds << "sudo iptables -N #{bound}_#{vif}_#{k}"
|
321
|
+
|
322
|
+
case k
|
323
|
+
when 'tcp'
|
324
|
+
case bound
|
325
|
+
when 's'
|
326
|
+
cmds << "sudo iptables -A #{bound}_#{vif} -m state --state NEW,ESTABLISHED -p #{k} -j #{bound}_#{vif}_#{k}"
|
327
|
+
when 'd'
|
328
|
+
#cmds << "sudo iptables -A #{bound}_#{vif} -m state --state ESTABLISHED -p #{k} -j #{bound}_#{vif}_#{k}"
|
329
|
+
cmds << "sudo iptables -A #{bound}_#{vif} -m state --state RELATED,ESTABLISHED -p #{k} -j ACCEPT"
|
330
|
+
cmds << "sudo iptables -A #{bound}_#{vif} -p #{k} -j #{bound}_#{vif}_#{k}"
|
331
|
+
end
|
332
|
+
when 'udp'
|
333
|
+
case bound
|
334
|
+
when 's'
|
335
|
+
cmds << "sudo iptables -A #{bound}_#{vif} -m state --state NEW,ESTABLISHED -p #{k} -j #{bound}_#{vif}_#{k}"
|
336
|
+
when 'd'
|
337
|
+
#cmds << "sudo iptables -A #{bound}_#{vif} -m state --state ESTABLISHED -p #{k} -j #{bound}_#{vif}_#{k}"
|
338
|
+
cmds << "sudo iptables -A #{bound}_#{vif} -m state --state ESTABLISHED -p #{k} -j ACCEPT"
|
339
|
+
cmds << "sudo iptables -A #{bound}_#{vif} -p #{k} -j #{bound}_#{vif}_#{k}"
|
340
|
+
end
|
341
|
+
when 'icmp'
|
342
|
+
case bound
|
343
|
+
when 's'
|
344
|
+
cmds << "sudo iptables -A #{bound}_#{vif} -m state --state NEW,ESTABLISHED,RELATED -p #{k} -j #{bound}_#{vif}_#{k}"
|
345
|
+
when 'd'
|
346
|
+
#cmds << "sudo iptables -A #{bound}_#{vif} -m state --state NEW,ESTABLISHED,RELATED -p #{k} -j #{bound}_#{vif}_#{k}"
|
347
|
+
cmds << "sudo iptables -A #{bound}_#{vif} -m state --state ESTABLISHED,RELATED -p #{k} -j ACCEPT"
|
348
|
+
cmds << "sudo iptables -A #{bound}_#{vif} -p #{k} -j #{bound}_#{vif}_#{k}"
|
349
|
+
end
|
350
|
+
end
|
351
|
+
}
|
352
|
+
end
|
353
|
+
|
354
|
+
cmds << "sudo iptables -A FORWARD -m physdev --physdev-is-bridged --physdev-in #{vif} -j s_#{vif}"
|
355
|
+
cmds << "sudo iptables -A FORWARD -m physdev --physdev-is-bridged --physdev-out #{vif} -j d_#{vif}"
|
356
|
+
|
357
|
+
##
|
358
|
+
## ACCEPT
|
359
|
+
##
|
360
|
+
# DHCP Server
|
361
|
+
cmds << "sudo iptables -A d_#{vif}_udp -p udp -s #{network_map[:dhcp_server]} --sport 67 -j ACCEPT"
|
362
|
+
#cmds << "sudo iptables -A d_#{vif}_udp -p udp --sport 67 -j d_#{vif}_udp_drop"
|
363
|
+
# DNS Server
|
364
|
+
cmds << "sudo iptables -A s_#{vif}_udp -p udp -d #{network_map[:dns_server]} --dport 53 -j ACCEPT"
|
365
|
+
|
366
|
+
##
|
367
|
+
## DROP
|
368
|
+
##
|
369
|
+
protocol_maps.each { |k,v|
|
370
|
+
# DHCP
|
371
|
+
cmds << "sudo iptables -A s_#{vif} -d #{network_map[:dhcp_server]} -p #{k} -j s_#{vif}_#{k}_drop"
|
372
|
+
# DNS
|
373
|
+
cmds << "sudo iptables -A s_#{vif} -d #{network_map[:dns_server]} -p #{k} -j s_#{vif}_#{k}_drop"
|
374
|
+
}
|
375
|
+
|
376
|
+
# security group
|
377
|
+
# rules
|
378
|
+
build_rule(rules).each do |rule|
|
379
|
+
case rule[:ip_protocol]
|
380
|
+
when 'tcp', 'udp'
|
381
|
+
cmds << "sudo iptables -A d_#{vif}_#{rule[:ip_protocol]} -p #{rule[:ip_protocol]} -s #{rule[:ip_source]} --dport #{rule[:ip_dport]} -j ACCEPT"
|
382
|
+
when 'icmp'
|
383
|
+
# ToDo: implement
|
384
|
+
# - icmp_type : -1...
|
385
|
+
# - icmp_code : -1...
|
386
|
+
# cmds << "sudo iptables -A d_#{vif}_#{rule[:ip_protocol]} -p #{rule[:ip_protocol]} -s #{rule[:ip_source]} --icmp-type #{rule[:icmp_type]}/#{rule[:icmp_code]} -j ACCEPT"
|
387
|
+
cmds << "sudo iptables -A d_#{vif}_#{rule[:ip_protocol]} -p #{rule[:ip_protocol]} -s #{rule[:ip_source]} -j ACCEPT"
|
388
|
+
end
|
389
|
+
end
|
390
|
+
|
391
|
+
# drop other routings
|
392
|
+
protocol_maps.each { |k,v|
|
393
|
+
cmds << "sudo iptables -A d_#{vif}_#{k} -p #{k} -j d_#{vif}_#{k}_drop"
|
394
|
+
}
|
395
|
+
|
396
|
+
# IP protocol routing
|
397
|
+
[ 'd' ].each do |bound|
|
398
|
+
protocol_maps.each { |k,v|
|
399
|
+
cmds << "sudo iptables -A #{bound}_#{vif}_#{k} -j #{bound}_#{vif}_#{k}_drop"
|
400
|
+
}
|
401
|
+
end
|
402
|
+
|
403
|
+
cmds.uniq! if cmds.size > 0
|
404
|
+
cmds.compact.each { |cmd|
|
405
|
+
puts cmd
|
406
|
+
system(cmd)
|
407
|
+
}
|
408
|
+
|
409
|
+
logger.debug("refresh_iptables: #{inst_map[:uuid]} done.")
|
410
|
+
end
|
411
|
+
|
412
|
+
def flush_ebtables(inst_map = {})
|
413
|
+
logger.debug("flush_ebtables: #{inst_map[:uuid]} ...")
|
414
|
+
|
415
|
+
# Does the hva have instance?
|
416
|
+
unless inst_map[:host_pool][:node_id] == node.node_id
|
417
|
+
logger.warn "no match for the instance: #{inst_map[:uuid]}"
|
418
|
+
return
|
419
|
+
end
|
420
|
+
|
421
|
+
network_map = rpc.request('hva-collector', 'get_network', inst_map[:host_pool][:network_id])
|
422
|
+
raise "UnknownNetworkId" if network_map.nil?
|
423
|
+
|
424
|
+
vif = inst_map[:instance_nics].first[:vif]
|
425
|
+
|
426
|
+
# support IP protocol
|
427
|
+
protocol_maps = {
|
428
|
+
'ip4' => 'ip4',
|
429
|
+
'arp' => 'arp',
|
430
|
+
#ip6' => 'ip6',
|
431
|
+
#rarp' => '0x8035',
|
432
|
+
}
|
433
|
+
|
434
|
+
# make chain names.
|
435
|
+
chains = []
|
436
|
+
chains << "s_#{vif}"
|
437
|
+
chains << "d_#{vif}"
|
438
|
+
chains << "s_#{vif}_d_host"
|
439
|
+
protocol_maps.each { |k,v|
|
440
|
+
chains << "s_#{vif}_#{k}"
|
441
|
+
chains << "d_#{vif}_#{k}"
|
442
|
+
chains << "s_#{vif}_d_host_#{k}"
|
443
|
+
}
|
444
|
+
|
445
|
+
# clear rules if exists.
|
446
|
+
system("sudo ebtables -L s_#{vif} >/dev/null 2>&1")
|
447
|
+
if $?.exitstatus == 0
|
448
|
+
cmd = "sudo ebtables -D FORWARD -i #{vif} -j s_#{vif}"
|
449
|
+
puts cmd
|
450
|
+
system(cmd)
|
451
|
+
end
|
452
|
+
|
453
|
+
system("sudo ebtables -L d_#{vif} >/dev/null 2>&1")
|
454
|
+
if $?.exitstatus == 0
|
455
|
+
cmd = "sudo ebtables -D FORWARD -o #{vif} -j d_#{vif}"
|
456
|
+
puts cmd
|
457
|
+
system(cmd)
|
458
|
+
end
|
459
|
+
|
460
|
+
system("sudo ebtables -L s_#{vif}_d_host >/dev/null 2>&1")
|
461
|
+
if $?.exitstatus == 0
|
462
|
+
cmd = "sudo ebtables -D INPUT -i #{vif} -j s_#{vif}_d_host"
|
463
|
+
puts cmd
|
464
|
+
system(cmd)
|
465
|
+
end
|
466
|
+
|
467
|
+
[ 'F', 'Z', 'X' ].each { |xcmd|
|
468
|
+
chains.each { |chain|
|
469
|
+
system("sudo ebtables -L #{chain} >/dev/null 2>&1")
|
470
|
+
if $?.exitstatus == 0
|
471
|
+
cmd = "sudo ebtables -#{xcmd} #{chain}"
|
472
|
+
puts cmd
|
473
|
+
system(cmd)
|
474
|
+
end
|
475
|
+
}
|
476
|
+
}
|
477
|
+
|
478
|
+
logger.debug("flush_ebtables: #{inst_map[:uuid]} #{vif} done.")
|
479
|
+
end
|
480
|
+
|
481
|
+
def flush_iptables(inst_map = {})
|
482
|
+
logger.debug("flush_iptables: #{inst_map[:uuid]} ...")
|
483
|
+
|
484
|
+
# Does the hva have instance?
|
485
|
+
unless inst_map[:host_pool][:node_id] == node.node_id
|
486
|
+
logger.warn "no match for the instance: #{inst_map[:uuid]}"
|
487
|
+
return
|
488
|
+
end
|
489
|
+
|
490
|
+
network_map = rpc.request('hva-collector', 'get_network', inst_map[:host_pool][:network_id])
|
491
|
+
raise "UnknownNetworkId" if network_map.nil?
|
492
|
+
|
493
|
+
vif = inst_map[:instance_nics].first[:vif]
|
494
|
+
|
495
|
+
# support IP protocol
|
496
|
+
protocol_maps = {
|
497
|
+
'tcp' => 'tcp',
|
498
|
+
'udp' => 'udp',
|
499
|
+
'icmp' => 'icmp',
|
500
|
+
}
|
501
|
+
|
502
|
+
# make chain names.
|
503
|
+
chains = []
|
504
|
+
protocol_maps.each { |k,v|
|
505
|
+
chains << "s_#{vif}_#{k}"
|
506
|
+
chains << "d_#{vif}_#{k}"
|
507
|
+
chains << "s_#{vif}_#{k}_drop"
|
508
|
+
chains << "d_#{vif}_#{k}_drop"
|
509
|
+
}
|
510
|
+
chains << "s_#{vif}"
|
511
|
+
chains << "d_#{vif}"
|
512
|
+
chains << "s_#{vif}_drop"
|
513
|
+
chains << "d_#{vif}_drop"
|
514
|
+
|
515
|
+
# metadata-server
|
516
|
+
[ 'D' ].each { |xcmd|
|
517
|
+
system("sudo iptables -t nat -#{xcmd} PREROUTING -m physdev --physdev-is-bridged --physdev-in #{vif} -s 0.0.0.0 -d 169.254.169.254 -p tcp --dport 80 -j DNAT --to-destination #{network_map[:metadata_server]}:80 >/dev/null 2>&1")
|
518
|
+
}
|
519
|
+
|
520
|
+
# clean rules if exists.
|
521
|
+
system("sudo iptables -nL s_#{vif} >/dev/null 2>&1")
|
522
|
+
if $?.exitstatus == 0
|
523
|
+
system("sudo iptables -D FORWARD -m physdev --physdev-is-bridged --physdev-in #{vif} -j s_#{vif}")
|
524
|
+
end
|
525
|
+
|
526
|
+
system("sudo iptables -nL d_#{vif} >/dev/null 2>&1")
|
527
|
+
if $?.exitstatus == 0
|
528
|
+
system("sudo iptables -D FORWARD -m physdev --physdev-is-bridged --physdev-out #{vif} -j d_#{vif}")
|
529
|
+
end
|
530
|
+
|
531
|
+
[ 'F', 'Z', 'X' ].each { |xcmd|
|
532
|
+
chains.each { |chain|
|
533
|
+
system("sudo iptables -nL #{chain} >/dev/null 2>&1")
|
534
|
+
if $?.exitstatus == 0
|
535
|
+
system("sudo iptables -#{xcmd} #{chain}")
|
536
|
+
end
|
537
|
+
}
|
538
|
+
}
|
539
|
+
|
540
|
+
logger.debug("flush_iptables: #{inst_map[:uuid]} #{vif} done.")
|
541
|
+
end
|
542
|
+
|
543
|
+
def build_rule(rules = [])
|
544
|
+
require 'ipaddress'
|
545
|
+
|
546
|
+
rule_maps = []
|
547
|
+
|
548
|
+
rules.each do |rule|
|
549
|
+
# ex.
|
550
|
+
# "tcp:22,22,ip4:0.0.0.0"
|
551
|
+
# "udp:53,53,ip4:0.0.0.0"
|
552
|
+
# "icmp:-1,-1,ip4:0.0.0.0"
|
553
|
+
|
554
|
+
# 1st phase
|
555
|
+
# ip_dport : tcp,udp? 1 - 16bit, icmp: -1
|
556
|
+
# id_port has been separeted in first phase.
|
557
|
+
from_pair, ip_dport, source_pair = rule.split(',')
|
558
|
+
|
559
|
+
# 2nd phase
|
560
|
+
# ip_protocol : [ tcp | udp | icmp ]
|
561
|
+
# ip_sport : tcp,udp? 1 - 16bit, icmp: -1
|
562
|
+
ip_protocol, ip_sport = from_pair.split(':')
|
563
|
+
|
564
|
+
# protocol : [ ip4 | ip6 ]
|
565
|
+
# ip_source : ip4? xxx.xxx.xxx.xxx./[0-32], ip6?: not yet supprted.
|
566
|
+
protocol, ip_source = source_pair.split(':')
|
567
|
+
|
568
|
+
# validate
|
569
|
+
next unless protocol == 'ip4'
|
570
|
+
# next unless IPAddress.valid?(ip_source)
|
571
|
+
|
572
|
+
# IPAddress does't support prefix '0'.
|
573
|
+
ip_addr, prefix = ip_source.split('/', 2)
|
574
|
+
if prefix.to_i == 0
|
575
|
+
ip_source = ip_addr
|
576
|
+
end
|
577
|
+
|
578
|
+
begin
|
579
|
+
ip = IPAddress(ip_source)
|
580
|
+
ip_source = case ip.u32
|
581
|
+
when 0
|
582
|
+
"#{ip.address}/0"
|
583
|
+
else
|
584
|
+
"#{ip.address}/#{ip.prefix}"
|
585
|
+
end
|
586
|
+
|
587
|
+
rescue Exception => e
|
588
|
+
p e
|
589
|
+
next
|
590
|
+
end
|
591
|
+
|
592
|
+
case ip_protocol
|
593
|
+
when 'tcp', 'udp'
|
594
|
+
rule_maps << {
|
595
|
+
:ip_protocol => ip_protocol,
|
596
|
+
:ip_sport => ip_sport.to_i,
|
597
|
+
:ip_dport => ip_dport.to_i,
|
598
|
+
:protocol => protocol,
|
599
|
+
:ip_source => ip_source,
|
600
|
+
}
|
601
|
+
when 'icmp'
|
602
|
+
# via http://docs.amazonwebservices.com/AWSEC2/latest/CommandLineReference/
|
603
|
+
#
|
604
|
+
# For the ICMP protocol, the ICMP type and code must be specified.
|
605
|
+
# This must be specified in the format type:code where both are integers.
|
606
|
+
# Type, code, or both can be specified as -1, which is a wildcard.
|
607
|
+
|
608
|
+
rule_maps << {
|
609
|
+
:ip_protocol => ip_protocol,
|
610
|
+
:icmp_type => -1, # ip_dport.to_i, # -1 or 0, 3, 5, 8, 11, 12, 13, 14, 15, 16, 17, 18
|
611
|
+
:icmp_code => -1, # ip_sport.to_i, # -1 or 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
|
612
|
+
:protocol => protocol,
|
613
|
+
:ip_source => ip_source,
|
614
|
+
}
|
615
|
+
end
|
616
|
+
end
|
617
|
+
|
618
|
+
rule_maps
|
619
|
+
end
|
620
|
+
|
621
|
+
def rpc
|
622
|
+
@rpc ||= Isono::NodeModules::RpcChannel.new(@node)
|
623
|
+
end
|
624
|
+
|
625
|
+
def event
|
626
|
+
@event ||= Isono::NodeModules::EventChannel.new(@node)
|
627
|
+
end
|
628
|
+
|
629
|
+
end
|
630
|
+
|
631
|
+
require 'shellwords'
|
632
|
+
raise "Shellword is old version." unless Shellwords.respond_to?(:shellescape)
|
633
|
+
require 'open4'
|
634
|
+
|
635
|
+
module CliHelper
|
636
|
+
class TimeoutError < RuntimeError; end
|
637
|
+
|
638
|
+
def tryagain(opts={:timeout=>60, :retry=>3}, &blk)
|
639
|
+
timedout = false
|
640
|
+
curthread = Thread.current
|
641
|
+
|
642
|
+
timersig = EventMachine.add_timer(opts[:timeout]) {
|
643
|
+
timedout = true
|
644
|
+
if curthread
|
645
|
+
curthread.raise(TimeoutError.new("timeout"))
|
646
|
+
curthread.pass
|
647
|
+
end
|
648
|
+
}
|
649
|
+
|
650
|
+
begin
|
651
|
+
count = 0
|
652
|
+
begin
|
653
|
+
break if blk.call
|
654
|
+
end while !timedout && ((count += 1) < opts[:retry])
|
655
|
+
rescue TimeoutError => e
|
656
|
+
raise e
|
657
|
+
ensure
|
658
|
+
curthread = nil
|
659
|
+
EventMachine.cancel_timer(timersig) rescue nil
|
660
|
+
end
|
661
|
+
end
|
662
|
+
|
663
|
+
class CommandError < StandardError
|
664
|
+
attr_reader :stderr, :stdout
|
665
|
+
def initialize(msg, stdout, stderr)
|
666
|
+
super(msg)
|
667
|
+
@stdout = stdout
|
668
|
+
@stderr = stderr
|
669
|
+
end
|
670
|
+
end
|
671
|
+
|
672
|
+
def sh(cmd, args=[], opts={})
|
673
|
+
opts = opts.merge({:expect_exitcode=>0})
|
674
|
+
cmd = sprintf(cmd, *args.map {|a| Shellwords.shellescape(a.to_s) })
|
675
|
+
|
676
|
+
outbuf = errbuf = ''
|
677
|
+
blk = proc {|pid, stdin, stdout, stderr|
|
678
|
+
stdin.close
|
679
|
+
outbuf = stdout
|
680
|
+
errbuf = stderr
|
681
|
+
}
|
682
|
+
stat = Open4::popen4(cmd, &blk)
|
683
|
+
if self.respond_to? :logger
|
684
|
+
logger.debug("Exec command (pid=#{stat.pid}): #{cmd}")
|
685
|
+
logger.debug("STDOUT:\n#{outbuf}\nSTDERR:\n#{errbuf}")
|
686
|
+
end
|
687
|
+
if stat.exitstatus != opts[:expect_exitcode]
|
688
|
+
raise CommandError, "Unexpected exit code=#{stat.extstatus} (expected=#{opts{:expect_exitcode}})",
|
689
|
+
outbuf, errbuf
|
690
|
+
end
|
691
|
+
true
|
692
|
+
end
|
693
|
+
end
|
694
|
+
|
695
|
+
require 'net/telnet'
|
696
|
+
|
697
|
+
module KvmHelper
|
698
|
+
# Establish telnet connection to KVM monitor console
|
699
|
+
def connect_monitor(port, &blk)
|
700
|
+
begin
|
701
|
+
telnet = ::Net::Telnet.new("Host" => "localhost",
|
702
|
+
"Port"=>port.to_s,
|
703
|
+
"Prompt" => /\n\(qemu\) /,
|
704
|
+
"Timeout" => 60,
|
705
|
+
"Waittime" => 0.2)
|
706
|
+
|
707
|
+
blk.call(telnet)
|
708
|
+
rescue => e
|
709
|
+
logger.error(e) if self.respond_to?(:logger)
|
710
|
+
ensure
|
711
|
+
telnet.close
|
712
|
+
end
|
713
|
+
end
|
714
|
+
end
|
715
|
+
|
716
|
+
class KvmHandler < EndpointBuilder
|
717
|
+
include Dcmgr::Logger
|
718
|
+
include CliHelper
|
719
|
+
include KvmHelper
|
720
|
+
|
721
|
+
job :run_local_store do
|
722
|
+
#hva = rpc.delegate('hva-collector')
|
723
|
+
inst_id = request.args[0]
|
724
|
+
logger.info("Booting #{inst_id}")
|
725
|
+
#inst = hva.get_instance(inst_id)
|
726
|
+
|
727
|
+
inst = rpc.request('hva-collector', 'get_instance', inst_id)
|
728
|
+
raise "Invalid instance state: #{inst[:state]}" unless inst[:state].to_s == 'init'
|
729
|
+
|
730
|
+
# setup vm data folder
|
731
|
+
inst_data_dir = File.expand_path("#{inst_id}", @node.manifest.config.vm_data_dir)
|
732
|
+
FileUtils.mkdir(inst_data_dir)
|
733
|
+
# copy image file
|
734
|
+
img_src = inst[:image][:source]
|
735
|
+
case img_src[:type].to_sym
|
736
|
+
when :http
|
737
|
+
img_path = File.expand_path("#{inst_id}/#{inst[:uuid]}", @node.manifest.config.vm_data_dir)
|
738
|
+
sh("curl --silent -o '#{img_path}' #{img_src[:uri]}")
|
739
|
+
else
|
740
|
+
raise "Unknown image source type: #{img_src[:type]}"
|
741
|
+
end
|
742
|
+
|
743
|
+
# boot virtual machine
|
744
|
+
cmd = "kvm -m %d -smp %d -name vdc-%s -vnc :%d -drive file=%s -pidfile %s -daemonize -monitor telnet::%d,server,nowait"
|
745
|
+
args = [
|
746
|
+
inst[:instance_spec][:memory_size],
|
747
|
+
inst[:instance_spec][:cpu_cores],
|
748
|
+
inst_id,
|
749
|
+
inst[:runtime_config][:vnc_port],
|
750
|
+
img_path,
|
751
|
+
File.expand_path('kvm.pid', inst_data_dir),
|
752
|
+
inst[:runtime_config][:telnet_port]
|
753
|
+
]
|
754
|
+
sh(cmd, args)
|
755
|
+
|
756
|
+
rpc.request('hva-collector', 'update_instance', inst_id, {:state=>:running})
|
757
|
+
event.publish('hva/instance_started', :args=>[inst_id])
|
758
|
+
end
|
759
|
+
|
760
|
+
job :run_vol_store do
|
761
|
+
inst_id = request.args[0]
|
762
|
+
vol_id = request.args[1]
|
763
|
+
|
764
|
+
inst = rpc.request('hva-collector', 'get_instance', inst_id)
|
765
|
+
vol = rpc.request('sta-collector', 'get_volume', vol_id)
|
766
|
+
logger.info("Booting #{inst_id}")
|
767
|
+
raise "Invalid instance state: #{inst[:state]}" unless inst[:state].to_s == 'init'
|
768
|
+
|
769
|
+
# setup vm data folder
|
770
|
+
inst_data_dir = File.expand_path("#{inst_id}", @node.manifest.config.vm_data_dir)
|
771
|
+
FileUtils.mkdir(inst_data_dir)
|
772
|
+
|
773
|
+
# create volume from snapshot
|
774
|
+
jobreq.run("zfs-handle.#{vol[:storage_pool][:node_id]}", "create_volume", vol_id)
|
775
|
+
|
776
|
+
logger.debug("volume created on #{vol[:storage_pool][:node_id]}: #{vol_id}")
|
777
|
+
# reload volume info
|
778
|
+
vol = rpc.request('sta-collector', 'get_volume', vol_id)
|
779
|
+
|
780
|
+
# check under until the dev file is created.
|
781
|
+
# /dev/disk/by-path/ip-192.168.1.21:3260-iscsi-iqn.1986-03.com.sun:02:a1024afa-775b-65cf-b5b0-aa17f3476bfc-lun-0
|
782
|
+
linux_dev_path = "/dev/disk/by-path/ip-%s-iscsi-%s-lun-%d" % ["#{vol[:storage_pool][:ipaddr]}:3260",
|
783
|
+
vol[:transport_information][:iqn],
|
784
|
+
vol[:transport_information][:lun]]
|
785
|
+
|
786
|
+
# attach disk
|
787
|
+
tryagain do
|
788
|
+
sh("iscsiadm -m discovery -t sendtargets -p #{vol[:storage_pool][:ipaddr]}")
|
789
|
+
sh("iscsiadm -m node -l -T '#{vol[:transport_information][:iqn]}' --portal '#{vol[:storage_pool][:ipaddr]}:3260'")
|
790
|
+
sleep 1
|
791
|
+
File.exist?(linux_dev_path)
|
792
|
+
end
|
793
|
+
|
794
|
+
# run vm
|
795
|
+
cmd = "kvm -m %d -smp %d -name vdc-%s -vnc :%d -drive file=%s -pidfile %s -daemonize -monitor telnet::%d,server,nowait"
|
796
|
+
args=[inst[:instance_spec][:memory_size],
|
797
|
+
inst[:instance_spec][:cpu_cores],
|
798
|
+
inst_id,
|
799
|
+
inst[:runtime_config][:vnc_port],
|
800
|
+
linux_dev_path,
|
801
|
+
File.expand_path('kvm.pid', inst_data_dir),
|
802
|
+
inst[:runtime_config][:telnet_port]
|
803
|
+
]
|
804
|
+
if vnic = inst[:instance_nics].first
|
805
|
+
cmd += " -net nic,macaddr=%s -net tap,ifname=%s"
|
806
|
+
args << vnic[:mac_addr].unpack('A2'*6).join(':')
|
807
|
+
args << vnic[:vif]
|
808
|
+
end
|
809
|
+
sh(cmd, args)
|
810
|
+
|
811
|
+
rpc.request('hva-collector', 'update_instance', inst_id, {:state=>:running})
|
812
|
+
event.publish('hva/instance_started', :args=>[inst_id])
|
813
|
+
end
|
814
|
+
|
815
|
+
job :terminate do
|
816
|
+
inst_id = request.args[0]
|
817
|
+
|
818
|
+
inst = rpc.request('hva-collector', 'get_instance', inst_id)
|
819
|
+
raise "Invalid instance state: #{inst[:state]}" unless inst[:state].to_s == 'running'
|
820
|
+
|
821
|
+
rpc.request('hva-collector', 'update_instance', inst_id, {:state=>:shuttingdown})
|
822
|
+
|
823
|
+
kvm_pid=`pgrep -u root -f vdc-#{inst_id}`
|
824
|
+
unless $?.exitstatus == 0 && kvm_pid.to_s =~ /^\d+$/
|
825
|
+
raise "No such VM process: kvm -name vdc-#{inst_id}"
|
826
|
+
end
|
827
|
+
|
828
|
+
sh("/bin/kill #{kvm_pid}")
|
829
|
+
|
830
|
+
unless inst[:volume].nil?
|
831
|
+
inst[:volume].each { |volid, v|
|
832
|
+
sh("iscsiadm -m node -T '#{v[:transport_information][:iqn]}' --logout")
|
833
|
+
}
|
834
|
+
end
|
835
|
+
|
836
|
+
# cleanup vm data folder
|
837
|
+
FileUtils.rm_r(File.expand_path("#{inst_id}", @node.manifest.config.vm_data_dir))
|
838
|
+
|
839
|
+
rpc.request('hva-collector', 'update_instance', inst_id, {:state=>:terminated})
|
840
|
+
event.publish('hva/instance_terminated', :args=>[inst_id])
|
841
|
+
end
|
842
|
+
|
843
|
+
job :attach do
|
844
|
+
inst_id = request.args[0]
|
845
|
+
vol_id = request.args[1]
|
846
|
+
|
847
|
+
job = Dcmgr::Stm::VolumeContext.new(vol_id)
|
848
|
+
inst = rpc.request('hva-collector', 'get_instance', inst_id)
|
849
|
+
vol = rpc.request('sta-collector', 'get_volume', vol_id)
|
850
|
+
logger.info("Attaching #{vol_id}")
|
851
|
+
job.stm.state = vol[:state].to_sym
|
852
|
+
raise "Invalid volume state: #{vol[:state]}" unless vol[:state].to_s == 'available'
|
853
|
+
|
854
|
+
job.stm.on_attach
|
855
|
+
# check under until the dev file is created.
|
856
|
+
# /dev/disk/by-path/ip-192.168.1.21:3260-iscsi-iqn.1986-03.com.sun:02:a1024afa-775b-65cf-b5b0-aa17f3476bfc-lun-0
|
857
|
+
linux_dev_path = "/dev/disk/by-path/ip-%s-iscsi-%s-lun-%d" % ["#{vol[:storage_pool][:ipaddr]}:3260",
|
858
|
+
vol[:transport_information][:iqn],
|
859
|
+
vol[:transport_information][:lun]]
|
860
|
+
|
861
|
+
# attach disk on host os
|
862
|
+
tryagain do
|
863
|
+
sh("iscsiadm -m discovery -t sendtargets -p #{vol[:storage_pool][:ipaddr]}")
|
864
|
+
sh("iscsiadm -m node -l -T '#{vol[:transport_information][:iqn]}' --portal '#{vol[:storage_pool][:ipaddr]}:3260'")
|
865
|
+
sleep 1
|
866
|
+
File.exist?(linux_dev_path)
|
867
|
+
end
|
868
|
+
|
869
|
+
rpc.request('sta-collector', 'update_volume', job.to_hash(:host_device_name => linux_dev_path))
|
870
|
+
logger.info("Attaching #{vol_id} on #{inst_id}")
|
871
|
+
job.stm.on_attach
|
872
|
+
job.on_attach
|
873
|
+
|
874
|
+
# attach disk on guest os
|
875
|
+
require 'net/telnet'
|
876
|
+
slot_number = nil
|
877
|
+
pci = nil
|
878
|
+
|
879
|
+
slink = `ls -la #{linux_dev_path}`.scan(/.+\s..\/..\/([a-z]+)/)
|
880
|
+
raise "volume has not attached host os" if slink.nil?
|
881
|
+
|
882
|
+
begin
|
883
|
+
telnet = ::Net::Telnet.new("Host" => "localhost", "Port"=>"#{inst[:runtime_config][:telnet_port]}", "Prompt" => /\n\(qemu\) /, "Timeout" => 60, "Waittime" => 0.2)
|
884
|
+
telnet.cmd({"String" => "pci_add auto storage file=/dev/#{slink},if=scsi", "Match" => /.+slot\s[0-9]+.+/}){|c|
|
885
|
+
pci_add = c.scan(/.+slot\s([0-9]+).+/)
|
886
|
+
slot_number = pci_add unless pci_add.empty?
|
887
|
+
}
|
888
|
+
telnet.cmd("info pci"){|c|
|
889
|
+
pci = c.scan(/^(.+[a-zA-z]+.+[0-9],.+device.+#{slot_number},.+:)/)
|
890
|
+
}
|
891
|
+
rescue => e
|
892
|
+
logger.error(e)
|
893
|
+
ensure
|
894
|
+
telnet.close
|
895
|
+
end
|
896
|
+
raise "volume has not attached" if pci.nil?
|
897
|
+
rpc.request('sta-collector', 'update_volume', job.to_hash(:guest_device_name=>slot_number))
|
898
|
+
logger.info("Attached #{vol_id} on #{inst_id}")
|
899
|
+
end
|
900
|
+
|
901
|
+
job :detach do
|
902
|
+
inst_id = request.args[0]
|
903
|
+
vol_id = request.args[1]
|
904
|
+
|
905
|
+
job = Dcmgr::Stm::VolumeContext.new(vol_id)
|
906
|
+
inst = rpc.request('hva-collector', 'get_instance', inst_id)
|
907
|
+
vol = rpc.request('sta-collector', 'get_volume', vol_id)
|
908
|
+
logger.info("Detaching #{vol_id} on #{inst_id}")
|
909
|
+
job.stm.state = vol[:state].to_sym
|
910
|
+
raise "Invalid volume state: #{vol[:state]}" unless vol[:state].to_s == 'attached'
|
911
|
+
|
912
|
+
job.stm.on_detach
|
913
|
+
# detach disk on guest os
|
914
|
+
require 'net/telnet'
|
915
|
+
pci = nil
|
916
|
+
|
917
|
+
begin
|
918
|
+
telnet = ::Net::Telnet.new("Host" => "localhost", "Port"=>"#{inst[:runtime_config][:telnet_port]}", "Prompt" => /\n\(qemu\) /, "Timeout" => 60, "Waittime" => 0.2)
|
919
|
+
telnet.cmd("pci_del #{vol[:guest_device_name]}")
|
920
|
+
telnet.cmd("info pci"){|c|
|
921
|
+
pci = c.scan(/^(.+[a-zA-z]+.+[0-9],.+device.+#{vol[:guest_device_name]},.+:)/)
|
922
|
+
}
|
923
|
+
rescue => e
|
924
|
+
logger.error(e)
|
925
|
+
ensure
|
926
|
+
telnet.close
|
927
|
+
end
|
928
|
+
raise "volume has not detached" unless pci.empty?
|
929
|
+
rpc.request('sta-collector', 'update_volume', job.to_hash)
|
930
|
+
|
931
|
+
# iscsi logout
|
932
|
+
job.stm.on_detach
|
933
|
+
job.on_detach
|
934
|
+
logger.info("iscsi logout #{vol_id}: #{vol[:transport_information][:iqn]}")
|
935
|
+
initiator = `sudo iscsiadm -m node -T '#{vol[:transport_information][:iqn]}' --logout`
|
936
|
+
rpc.request('sta-collector', 'update_volume', job.to_hash)
|
937
|
+
end
|
938
|
+
|
939
|
+
def rpc
|
940
|
+
@rpc ||= Isono::NodeModules::RpcChannel.new(@node)
|
941
|
+
end
|
942
|
+
|
943
|
+
def jobreq
|
944
|
+
@jobreq ||= Isono::NodeModules::JobChannel.new(@node)
|
945
|
+
end
|
946
|
+
|
947
|
+
def event
|
948
|
+
@event ||= Isono::NodeModules::EventChannel.new(@node)
|
949
|
+
end
|
950
|
+
end
|
951
|
+
|
952
|
+
|
953
|
+
manifest = DEFAULT_MANIFEST.dup
|
954
|
+
manifest.instance_eval do
|
955
|
+
node_name 'hva'
|
956
|
+
node_instance_id "#{Isono::Util.default_gw_ipaddr}"
|
957
|
+
load_module Isono::NodeModules::NodeHeartbeat
|
958
|
+
load_module ServiceNetfilter
|
959
|
+
|
960
|
+
config do |c|
|
961
|
+
c.vm_data_dir = '/var/lib/vm'
|
962
|
+
c.enable_ebtables = true
|
963
|
+
c.enable_iptables = true
|
964
|
+
end
|
965
|
+
|
966
|
+
config_path File.expand_path('config/hva.conf', app_root)
|
967
|
+
load_config
|
968
|
+
end
|
969
|
+
|
970
|
+
start(manifest) do
|
971
|
+
endpoint "kvm-handle.#{@node.node_id}", KvmHandler
|
972
|
+
end
|