wakame-vdc-agents 10.11.0 → 10.12.0

Sign up to get free protection for your applications and to get access to all the features.
data/Rakefile CHANGED
@@ -11,14 +11,14 @@ require 'rake/clean'
11
11
  require 'dcmgr'
12
12
 
13
13
  task :environment do
14
- Dcmgr.configure 'dcmgr.conf'
14
+ Dcmgr.configure 'config/dcmgr.conf'
15
15
  Dcmgr.run_initializers
16
16
  end
17
17
 
18
18
  namespace :db do
19
19
  desc 'Create all database tables'
20
20
  task :init => [ :environment ] do
21
- ::Kernel.load(File.expand_path('../conf/initializers/sequel.rb', __FILE__))
21
+ ::Kernel.load(File.expand_path('../config/initializers/sequel.rb', __FILE__))
22
22
  require 'isono'
23
23
 
24
24
  Dcmgr::Models::CREATE_TABLE_CLASSES.each { |c|
data/bin/hva CHANGED
@@ -5,13 +5,14 @@ begin
5
5
  require 'rubygems'
6
6
  require 'bundler'
7
7
  Bundler.setup(:default)
8
- rescue Exception
8
+ rescue Exception
9
9
  end
10
10
 
11
11
  require File.expand_path('../../config/path_resolver', __FILE__)
12
12
 
13
13
  include Isono::Runner::RpcServer
14
14
  require 'fileutils'
15
+ require 'ipaddress'
15
16
 
16
17
  class ServiceNetfilter < Isono::NodeModules::Base
17
18
  include Dcmgr::Logger
@@ -29,8 +30,9 @@ class ServiceNetfilter < Isono::NodeModules::Base
29
30
  @worker_thread.pass {
30
31
  logger.info("refresh on instance_started: #{args.inspect}")
31
32
  inst_id = args[0]
32
- logger.info("refresh_netfilter_by_friend_instance_id: #{inst_id}")
33
- myinstance.refresh_netfilter_by_friend_instance_id(inst_id)
33
+ #logger.info("refresh_netfilter_by_friend_instance_id: #{inst_id}")
34
+ #myinstance.refresh_netfilter_by_friend_instance_id(inst_id, 'started')
35
+ myinstance.init_netfilter
34
36
  }
35
37
  end
36
38
 
@@ -38,8 +40,9 @@ class ServiceNetfilter < Isono::NodeModules::Base
38
40
  @worker_thread.pass {
39
41
  logger.info("refresh on instance_terminated: #{args.inspect}")
40
42
  inst_id = args[0]
41
- logger.info("refresh_netfilter_by_friend_instance_id: #{inst_id}")
42
- myinstance.refresh_netfilter_by_friend_instance_id(inst_id)
43
+ #logger.info("refresh_netfilter_by_friend_instance_id: #{inst_id}")
44
+ #myinstance.refresh_netfilter_by_friend_instance_id(inst_id, 'terminated')
45
+ myinstance.init_netfilter
43
46
  }
44
47
  end
45
48
 
@@ -47,7 +50,8 @@ class ServiceNetfilter < Isono::NodeModules::Base
47
50
  @worker_thread.pass {
48
51
  logger.info("refresh on netfilter_updated: #{args.inspect}")
49
52
  netfilter_group_id = args[0]
50
- myinstance.refresh_netfilter_by_joined_netfilter_group_id(netfilter_group_id)
53
+ #myinstance.refresh_netfilter_by_joined_netfilter_group_id(netfilter_group_id)
54
+ myinstance.init_netfilter
51
55
  }
52
56
  end
53
57
  end
@@ -56,8 +60,22 @@ class ServiceNetfilter < Isono::NodeModules::Base
56
60
  begin
57
61
  inst_maps = rpc.request('hva-collector', 'get_alive_instances', node.node_id)
58
62
 
59
- init_ebtables(inst_maps) if @node.manifest.config.enable_ebtables
63
+ viftable_map = {}
64
+ inst_maps = inst_maps.map { |inst_map|
65
+ viftable_map[ inst_map[:ips].first ] = inst_map[:instance_nics].first[:uuid]
66
+
67
+ # Does the hva have instance?
68
+ unless inst_map[:host_pool][:node_id] == node.node_id
69
+ logger.warn("no match for the instance: #{inst_map[:uuid]}")
70
+ next
71
+ end
72
+ # Does host have vif?
73
+ next unless valid_vif?(inst_map[:instance_nics].first[:uuid])
74
+ inst_maps
75
+ }.flatten.uniq.compact
76
+
60
77
  init_iptables(inst_maps) if @node.manifest.config.enable_iptables
78
+ init_ebtables(inst_maps, viftable_map) if @node.manifest.config.enable_ebtables
61
79
  logger.info("initialize netfilter")
62
80
  rescue Exception => e
63
81
  p e
@@ -65,68 +83,109 @@ class ServiceNetfilter < Isono::NodeModules::Base
65
83
  end
66
84
 
67
85
  # from event_subscriber
68
- def refresh_netfilter_by_friend_instance_id(inst_id)
69
- raise "UnknownInstanceID" if inst_id.nil?
86
+ # def refresh_netfilter_by_friend_instance_id(inst_id, state = 'started')
87
+ # raise "UnknownInstanceID" if inst_id.nil?
88
+ #
89
+ # begin
90
+ # inst_map = rpc.request('hva-collector', 'get_instance', inst_id)
91
+ # ng = rpc.request('hva-collector', 'get_netfilter_groups_of_instance', inst_map[:uuid])
92
+ #
93
+ # inst_maps = ng.map { |g|
94
+ # rpc.request('hva-collector', 'get_instances_of_netfilter_group', g[:id])
95
+ # }
96
+ #
97
+ # # my instance_id
98
+ # # when terminated?
99
+ # if state == 'terminated'
100
+ # unless inst_map.nil?
101
+ # refresh_iptables(inst_map) if @node.manifest.config.enable_iptables
102
+ # refresh_ebtables(inst_map) if @node.manifest.config.enable_ebtables
103
+ # end
104
+ # end
105
+ #
106
+ # # friend instance(s)
107
+ # if inst_maps.size > 0
108
+ # inst_maps.flatten.uniq.each { |inst_map|
109
+ # unless inst_map.nil?
110
+ # refresh_iptables(inst_map) if @node.manifest.config.enable_iptables
111
+ # refresh_ebtables(inst_map) if @node.manifest.config.enable_ebtables
112
+ # end
113
+ # }
114
+ # end
115
+ # rescue Exception => e
116
+ # p e
117
+ # end
118
+ # end
70
119
 
71
- begin
72
- inst_map = rpc.request('hva-collector', 'get_instance', inst_id)
73
- ng = rpc.request('hva-collector', 'get_netfilter_groups_of_instance', inst_map[:uuid])
120
+ # from event_subscriber
121
+ # def refresh_netfilter_by_joined_netfilter_group_id(netfilter_group_id)
122
+ # raise "UnknownNetfilterGroupID" if netfilter_group_id.nil?
123
+ #
124
+ # begin
125
+ # inst_maps = rpc.request('hva-collector', 'get_instances_of_netfilter_group', netfilter_group_id)
126
+ # inst_maps.each { |inst_map|
127
+ # unless inst_map.nil?
128
+ # refresh_iptables(inst_map) if @node.manifest.config.enable_iptables
129
+ # refresh_ebtables(inst_map) if @node.manifest.config.enable_ebtables
130
+ # end
131
+ # }
132
+ # rescue Exception => e
133
+ # p e
134
+ # end
135
+ # end
136
+
137
+ def init_ebtables(inst_maps = [], viftable_map = {})
138
+ cmd = "ebtables --init-table"
139
+ puts cmd
140
+ system(cmd)
141
+
142
+ basic_cmds = []
143
+ group_cmds = []
144
+ final_cmds = []
74
145
 
75
- inst_maps = ng.map { |g|
76
- rpc.request('hva-collector', 'get_instances_of_netfilter_group', g[:id])
146
+ inst_maps.each { |inst_map|
147
+ vif_map = {
148
+ :uuid => inst_map[:instance_nics].first[:uuid],
149
+ :mac => inst_map[:instance_nics].first[:mac_addr].unpack('A2'*6).join(':'),
150
+ :ipv4 => inst_map[:ips].first,
77
151
  }
78
152
 
79
- if inst_maps.size > 0
80
- inst_maps.flatten.uniq.each { |inst_map|
81
- unless inst_map.nil?
82
- refresh_ebtables(inst_map) if @node.manifest.config.enable_ebtables
83
- refresh_iptables(inst_map) if @node.manifest.config.enable_iptables
84
- end
85
- }
86
- end
87
- rescue Exception => e
88
- p e
89
- end
90
- end
153
+ basic_cmds << build_ebtables_basic_part(vif_map, inst_map)
154
+ group_cmds << build_ebtables_group_part(vif_map, inst_map, viftable_map)
155
+ final_cmds << build_ebtables_final_part(vif_map)
156
+ }
91
157
 
92
- # from event_subscriber
93
- def refresh_netfilter_by_joined_netfilter_group_id(netfilter_group_id)
94
- raise "UnknownNetfilterGroupID" if netfilter_group_id.nil?
158
+ viftable_map.each { |k,v|
159
+ p "#{v} <-> #{k}"
160
+ }
95
161
 
96
- begin
97
- inst_maps = rpc.request('hva-collector', 'get_instances_of_netfilter_group', netfilter_group_id)
98
- inst_maps.each { |inst_map|
99
- unless inst_map.nil?
100
- refresh_ebtables(inst_map) if @node.manifest.config.enable_ebtables
101
- refresh_iptables(inst_map) if @node.manifest.config.enable_iptables
102
- end
103
- }
104
- rescue Exception => e
105
- p e
106
- end
107
- end
162
+ logger.debug("basic_cmds ...")
163
+ basic_cmds.flatten.uniq.each { |cmd|
164
+ system(cmd)
165
+ }
108
166
 
109
- def init_ebtables(inst_maps = [])
110
- cmd = "sudo ebtables --init-table"
111
- puts cmd
112
- system(cmd)
167
+ logger.debug("group_cmds ...")
168
+ group_cmds.flatten.uniq.each { |cmd|
169
+ system(cmd)
170
+ }
113
171
 
114
- inst_maps.each { |inst_map|
115
- refresh_ebtables(inst_map)
172
+ logger.debug("final_cmds ...")
173
+ final_cmds.flatten.uniq.each { |cmd|
174
+ system(cmd)
116
175
  }
117
176
  end
118
177
 
119
178
  def init_iptables(inst_maps = [])
120
179
  [ 'nat', 'filter' ].each { |table|
121
180
  [ 'F', 'Z', 'X' ].each { |xcmd|
122
- cmd = "sudo iptables -t #{table} -#{xcmd}"
181
+ cmd = "iptables -t #{table} -#{xcmd}"
123
182
  puts cmd
124
183
  system(cmd)
125
184
  }
126
185
  }
127
186
 
128
187
  inst_maps.each { |inst_map|
129
- refresh_iptables(inst_map)
188
+ refresh_iptables(inst_map, false)
130
189
  }
131
190
  end
132
191
 
@@ -142,106 +201,208 @@ class ServiceNetfilter < Isono::NodeModules::Base
142
201
  end
143
202
  end
144
203
 
145
- def refresh_ebtables(inst_map = {})
146
- logger.debug("refresh_ebtables: #{inst_map[:uuid]} ...")
147
-
148
- # Does the hva have instance?
149
- unless inst_map[:host_pool][:node_id] == node.node_id
150
- logger.warn("no match for the instance: #{inst_map[:uuid]}")
151
- return
152
- end
153
-
154
- network_map = rpc.request('hva-collector', 'get_network', inst_map[:host_pool][:network_id])
155
- raise "UnknownNetworkId" if network_map.nil?
156
-
157
- vif = inst_map[:instance_nics].first[:vif]
158
- vif_mac = inst_map[:instance_nics].first[:mac_addr].unpack('A2'*6).join(':')
159
-
160
- flush_ebtables(inst_map)
161
-
162
- # Does host have vif?
163
- unless valid_vif?(vif)
164
- return
165
- end
166
-
167
- # group node IPv4 addresses.
168
- ipv4s = rpc.request('hva-collector', 'get_group_instance_ipv4s', inst_map[:uuid])
169
-
170
- # xtables commands
171
- cmds = []
204
+ # def refresh_ebtables(inst_map = {}, viftable_map = {})
205
+ # logger.debug("refresh_ebtables: #{inst_map[:uuid]} ...")
206
+ #
207
+ # vif_map = {
208
+ # :uuid => inst_map[:instance_nics].first[:uuid],
209
+ # :mac => inst_map[:instance_nics].first[:mac_addr].unpack('A2'*6).join(':'),
210
+ # }
211
+ #
212
+ # # xtables commands
213
+ # basic_cmds = build_ebtables_basic_part(vif_map, inst_map)
214
+ # group_cmds = build_ebtables_group_part(vif_map, inst_map, viftable_map)
215
+ # final_cmds = build_ebtables_final_part(vif_map)
216
+ #
217
+ # logger.debug("refresh_ebtables: #{inst_map[:uuid]} done.")
218
+ # end
219
+
220
+ def build_ebtables_basic_part(vif_map, inst_map)
221
+ basic_cmds = []
222
+ hva_ipv4 = Isono::Util.default_gw_ipaddr
223
+
224
+ ################################
225
+ ## 0. chain name
226
+ ################################
172
227
 
173
228
  # support IP protocol
174
229
  protocol_maps = {
175
230
  'ip4' => 'ip4',
176
231
  'arp' => 'arp',
177
- #ip6' => 'ip6',
178
- #rarp' => '0x8035',
232
+ #'ip6' => 'ip6',
233
+ #'rarp' => '0x8035',
179
234
  }
180
235
 
181
236
  # make chain names.
182
237
  chains = []
183
- chains << "s_#{vif}"
184
- chains << "d_#{vif}"
185
- chains << "s_#{vif}_d_host"
238
+ chains << "s_#{vif_map[:uuid]}"
239
+ chains << "d_#{vif_map[:uuid]}"
240
+ chains << "s_#{vif_map[:uuid]}_d_hst"
241
+ chains << "d_#{vif_map[:uuid]}_s_hst"
186
242
  protocol_maps.each { |k,v|
187
- chains << "s_#{vif}_#{k}"
188
- chains << "d_#{vif}_#{k}"
189
- chains << "s_#{vif}_d_host_#{k}"
243
+ chains << "s_#{vif_map[:uuid]}_#{k}"
244
+ chains << "d_#{vif_map[:uuid]}_#{k}"
245
+ chains << "s_#{vif_map[:uuid]}_d_hst_#{k}"
246
+ chains << "d_#{vif_map[:uuid]}_s_hst_#{k}"
190
247
  }
191
248
 
249
+ ################################
250
+ ## 1. basic part
251
+ ################################
252
+
192
253
  # create user defined chains.
193
254
  [ 'N' ].each { |xcmd|
194
255
  chains.each { |chain|
195
- cmds << "sudo ebtables -#{xcmd} #{chain}"
256
+ basic_cmds << "ebtables -#{xcmd} #{chain}"
196
257
  }
197
258
  }
198
259
 
199
260
  # jumt to user defined chains
200
- cmds << "sudo ebtables -A FORWARD -i #{vif} -j s_#{vif}"
201
- cmds << "sudo ebtables -A FORWARD -o #{vif} -j d_#{vif}"
202
- cmds << "sudo ebtables -A INPUT -i #{vif} -j s_#{vif}_d_host"
261
+ basic_cmds << "ebtables -A FORWARD -i #{vif_map[:uuid]} -j s_#{vif_map[:uuid]}"
262
+ basic_cmds << "ebtables -A FORWARD -o #{vif_map[:uuid]} -j d_#{vif_map[:uuid]}"
263
+ basic_cmds << "ebtables -A INPUT -i #{vif_map[:uuid]} -j s_#{vif_map[:uuid]}_d_hst"
264
+ basic_cmds << "ebtables -A OUTPUT -o #{vif_map[:uuid]} -j d_#{vif_map[:uuid]}_s_hst"
203
265
 
204
266
  # IP protocol routing
205
267
  protocol_maps.each { |k,v|
206
- cmds << "sudo ebtables -A s_#{vif} -p #{v} -j s_#{vif}_#{k}"
207
- cmds << "sudo ebtables -A d_#{vif} -p #{v} -j d_#{vif}_#{k}"
208
- cmds << "sudo ebtables -A s_#{vif}_d_host -p #{v} -j s_#{vif}_d_host_#{k}"
268
+ basic_cmds << "ebtables -A s_#{vif_map[:uuid]} -p #{v} -j s_#{vif_map[:uuid]}_#{k}"
269
+ basic_cmds << "ebtables -A d_#{vif_map[:uuid]} -p #{v} -j d_#{vif_map[:uuid]}_#{k}"
270
+ basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst -p #{v} -j s_#{vif_map[:uuid]}_d_hst_#{k}"
271
+ basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_s_hst -p #{v} -j d_#{vif_map[:uuid]}_s_hst_#{k}"
209
272
  }
210
273
 
211
274
  # default drop
212
- cmds << "sudo ebtables -A s_#{vif} --log-level warning --log-ip --log-arp --log-prefix 's_#{vif} DROP:' -j CONTINUE"
213
- cmds << "sudo ebtables -A s_#{vif}_d_host --log-level warning --log-ip --log-arp --log-prefix 's_#{vif}_d_host DROP:' -j CONTINUE"
214
- cmds << "sudo ebtables -A s_#{vif} -j DROP"
215
- cmds << "sudo ebtables -A s_#{vif}_d_host -j DROP"
275
+ basic_cmds << "ebtables -A s_#{vif_map[:uuid]} --log-level warning --log-ip --log-arp --log-prefix 'D s_#{vif_map[:uuid]}:' -j CONTINUE"
276
+ basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst --log-level warning --log-ip --log-arp --log-prefix 'D s_#{vif_map[:uuid]}_d_hst:' -j CONTINUE"
277
+ basic_cmds << "ebtables -A s_#{vif_map[:uuid]} -j DROP"
278
+ basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst -j DROP"
279
+
280
+ # anti spoof: mac
281
+ # guest -> *
282
+ basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_arp --protocol arp --arp-mac-src ! #{vif_map[:mac]} --log-ip --log-arp --log-prefix 'Dmc s_#{vif_map[:uuid]}_arp:' -j CONTINUE"
283
+ basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-mac-src ! #{vif_map[:mac]} --log-ip --log-arp --log-prefix 'Dmc s_#{vif_map[:uuid]}_d_hst_arp:' -j CONTINUE"
284
+ basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_arp --protocol arp --arp-mac-src ! #{vif_map[:mac]} -j DROP"
285
+ basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-mac-src ! #{vif_map[:mac]} -j DROP"
286
+
287
+ # guest <- * (broadcast)
288
+ basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-mac-dst 00:00:00:00:00:00 --log-ip --log-arp --log-prefix 'Amc d_#{vif_map[:uuid]}_arp:' -j CONTINUE"
289
+ basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_s_hst_arp --protocol arp --arp-ip-src=#{hva_ipv4} --arp-mac-dst 00:00:00:00:00:00 --log-ip --log-arp --log-prefix 'Amc d_#{vif_map[:uuid]}_hst_arp:' -j CONTINUE"
290
+ basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-mac-dst 00:00:00:00:00:00 -j ACCEPT"
291
+ basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_s_hst_arp --protocol arp --arp-ip-src=#{hva_ipv4} --arp-mac-dst 00:00:00:00:00:00 -j ACCEPT"
292
+
293
+ # guest <- *
294
+ basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-mac-dst ! #{vif_map[:mac]} --log-ip --log-arp --log-prefix 'Dmc d_#{vif_map[:uuid]}_arp:' -j CONTINUE"
295
+ basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_s_hst_arp --protocol arp --arp-mac-dst ! #{vif_map[:mac]} --log-ip --log-arp --log-prefix 'Dmc d_#{vif_map[:uuid]}_s_hst_arp:' -j CONTINUE"
296
+ basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-mac-dst ! #{vif_map[:mac]} -j DROP"
297
+ basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_s_hst_arp --protocol arp --arp-mac-dst ! #{vif_map[:mac]} -j DROP"
298
+
299
+ # anti spoof: ipv4
300
+ inst_map[:ips].each { |ipv4|
301
+ # guest -> *
302
+ basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-src ! #{ipv4} --log-ip --log-arp --log-prefix 'Dip s_#{vif_map[:uuid]}_arp:' -j CONTINUE"
303
+ basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-ip-src ! #{ipv4} --log-ip --log-arp --log-prefix 'Dip s_#{vif_map[:uuid]}_d_hst_arp:' -j CONTINUE"
304
+ basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-src ! #{ipv4} -j DROP"
305
+ basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-ip-src ! #{ipv4} -j DROP"
306
+ # guest <- *
307
+ basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-dst ! #{ipv4} --log-ip --log-arp --log-prefix 'Dip d_#{vif_map[:uuid]}_arp:' -j CONTINUE"
308
+ basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_s_hst_arp --protocol arp --arp-ip-dst ! #{ipv4} --log-ip --log-arp --log-prefix 'Dip d_#{vif_map[:uuid]}_s_hst_arp:' -j CONTINUE"
309
+ basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-dst ! #{ipv4} -j DROP"
310
+ basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_s_hst_arp --protocol arp --arp-ip-dst ! #{ipv4} -j DROP"
311
+ }
312
+
313
+ basic_cmds
314
+ end
216
315
 
217
- # anti spoof
218
- #cmds << "sudo ebtables -A s_#{vif}_arp --protocol arp --arp-mac-src ! #{vif_mac} -j DROP"
219
- #cmds << "sudo ebtables -A d_#{vif}_arp --protocol arp --arp-mac-dst ! #{vif_mac} -j DROP"
220
316
 
221
- # group nodes.
222
- ipv4s << network_map[:ipv4_gw]
223
- ipv4s << network_map[:dns_server]
224
- ipv4s << network_map[:dhcp_server]
225
- ipv4s.uniq.each do |ipv4|
226
- cmds << "sudo ebtables -A d_#{vif}_arp --protocol arp --arp-ip-src #{ipv4} -j ACCEPT"
227
- end
317
+ def build_ebtables_group_part(vif_map, inst_map, viftable_map)
318
+ group_cmds = []
319
+ hva_ipv4 = Isono::Util.default_gw_ipaddr
228
320
 
229
- # deny,allow
230
- cmds << "sudo ebtables -A d_#{vif}_arp --log-level warning --log-ip --log-arp --log-prefix 's_#{vif}_arp DROP:' -j CONTINUE"
231
- cmds << "sudo ebtables -A s_#{vif}_d_host_arp --log-level warning --log-ip --log-arp --log-prefix 's_#{vif}_d_host_arp DROP:' -j CONTINUE"
232
- cmds << "sudo ebtables -A d_#{vif}_arp -j DROP"
233
- cmds << "sudo ebtables -A s_#{vif}_d_host_arp -j DROP"
321
+ ################################
322
+ ## 2. group part
323
+ ################################
324
+ same_subnet_ipv4s = rpc.request('hva-collector', 'get_group_instance_ipv4s', inst_map[:uuid])
234
325
 
235
- cmds.uniq! if cmds.size > 0
236
- cmds.compact.each { |cmd|
237
- puts cmd
238
- system(cmd)
326
+ # detect node joined network(s).
327
+ network_map = rpc.request('hva-collector', 'get_network', inst_map[:instance_nics].first[:network_id])
328
+ raise "UnknownNetworkId" if network_map.nil?
329
+ joined_network = IPAddress("#{network_map[:ipv4_gw]}/#{network_map[:prefix]}")
330
+ [ network_map[:dns_server], network_map[:dhcp_server] ].each { |ipv4|
331
+ next unless joined_network.include? IPAddress(ipv4)
332
+ same_subnet_ipv4s << ipv4
239
333
  }
240
334
 
241
- logger.debug("refresh_ebtables: #{inst_map[:uuid]} done.")
335
+ # network resource node(s)
336
+ ng_maps = rpc.request('hva-collector', 'get_netfilter_groups_of_instance', inst_map[:uuid])
337
+ rules = ng_maps.map { |ng_map|
338
+ ng_map[:rules].map { |rule| rule[:permission] }
339
+ }.flatten
340
+ build_rule(rules).each do |rule|
341
+ next unless joined_network.include? IPAddress(rule[:ip_source])
342
+ same_subnet_ipv4s << rule[:ip_source]
343
+ end
344
+ same_subnet_ipv4s << network_map[:ipv4_gw]
345
+
346
+ # guest node(s) in HyperVisor.
347
+ alive_inst_maps = rpc.request('hva-collector', 'get_alive_instances', node.node_id)
348
+ guest_ipv4s = alive_inst_maps.map { |alive_inst_map|
349
+ alive_inst_map[:ips]
350
+ }.flatten.uniq.compact
351
+
352
+ same_subnet_ipv4s.uniq.reverse_each do |ipv4|
353
+ next if vif_map[:ipv4] == ipv4
354
+
355
+ # get_macaddr_by_ipv4, ipv4
356
+ if ipv4 == hva_ipv4
357
+ #p "#{vif_map[:uuid]}(#{vif_map[:ipv4]}) -> [host] ***-****** (#{ipv4})"
358
+ group_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-ip-src #{vif_map[:ipv4]} --arp-ip-dst #{ipv4} --log-ip --log-arp --log-prefix 'Afw s_#{vif_map[:uuid]}_d_hst_arp:' -j CONTINUE"
359
+ group_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-ip-src #{vif_map[:ipv4]} --arp-ip-dst #{ipv4} -j ACCEPT"
360
+ elsif guest_ipv4s.include?(ipv4)
361
+ #p "#{vif_map[:uuid]}(#{vif_map[:ipv4]}) -> [guest] #{viftable_map[ipv4]}(#{ipv4})"
362
+
363
+ # guest->guest
364
+ group_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-src #{ipv4} --arp-ip-dst #{vif_map[:ipv4]} --log-ip --log-arp --log-prefix 'Afw d_#{vif_map[:uuid]}_arp:' -j CONTINUE"
365
+ group_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-src #{ipv4} --arp-ip-dst #{vif_map[:ipv4]} -j ACCEPT"
366
+ # guest->host
367
+ group_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-ip-src #{vif_map[:ipv4]} --arp-ip-dst #{ipv4} --log-ip --log-arp --log-prefix 'Afw s_#{vif_map[:uuid]}_d_hst_arp:' -j CONTINUE"
368
+ group_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-ip-src #{vif_map[:ipv4]} --arp-ip-dst #{ipv4} -j ACCEPT"
369
+
370
+ unless viftable_map[ipv4].nil?
371
+ # guest->guest
372
+ group_cmds << "ebtables -A d_#{viftable_map[ipv4]}_arp --protocol arp --arp-ip-src #{vif_map[:ipv4]} --arp-ip-dst #{ipv4} --log-ip --log-arp --log-prefix 'Arv d_#{viftable_map[ipv4]}_arp:' -j CONTINUE"
373
+ group_cmds << "ebtables -A d_#{viftable_map[ipv4]}_arp --protocol arp --arp-ip-src #{vif_map[:ipv4]} --arp-ip-dst #{ipv4} -j ACCEPT"
374
+
375
+ # guest->host
376
+ group_cmds << "ebtables -A s_#{viftable_map[ipv4]}_d_hst_arp --protocol arp --arp-ip-src #{ipv4} --arp-ip-dst #{vif_map[:ipv4]} --log-ip --log-arp --log-prefix 'Arv s_#{viftable_map[ipv4]}_d_hst_arp:' -j CONTINUE"
377
+ group_cmds << "ebtables -A s_#{viftable_map[ipv4]}_d_hst_arp --protocol arp --arp-ip-src #{ipv4} --arp-ip-dst #{vif_map[:ipv4]} -j ACCEPT"
378
+ end
379
+ else
380
+ #p "#{vif_map[:uuid]}(#{vif_map[:ipv4]}) -> [other] ***-******** (#{ipv4})"
381
+ group_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-src #{ipv4} --arp-ip-dst #{vif_map[:ipv4]} --log-ip --log-arp --log-prefix 'Afw :d_#{vif_map[:uuid]}_arp' -j CONTINUE"
382
+ group_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-src #{ipv4} --arp-ip-dst #{vif_map[:ipv4]} -j ACCEPT"
383
+ end
384
+ end
385
+
386
+ group_cmds
242
387
  end
243
388
 
244
- def refresh_iptables(inst_map = {})
389
+
390
+ def build_ebtables_final_part(vif_map)
391
+ final_cmds = []
392
+
393
+ ################################
394
+ ## 3. final part
395
+ ################################
396
+ # deny,allow
397
+ final_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --log-level warning --log-ip --log-arp --log-prefix 'D d_#{vif_map[:uuid]}_arp:' -j CONTINUE"
398
+ final_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --log-level warning --log-ip --log-arp --log-prefix 'D s_#{vif_map[:uuid]}_d_hst_arp:' -j CONTINUE"
399
+ final_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp -j DROP"
400
+ final_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp -j DROP"
401
+
402
+ final_cmds
403
+ end
404
+
405
+ def refresh_iptables(inst_map = {}, with_flush = 1)
245
406
  logger.debug("refresh_iptables: #{inst_map[:uuid]} ...")
246
407
 
247
408
  # Does the hva have instance?
@@ -250,27 +411,34 @@ class ServiceNetfilter < Isono::NodeModules::Base
250
411
  return
251
412
  end
252
413
 
253
- network_map = rpc.request('hva-collector', 'get_network', inst_map[:host_pool][:network_id])
414
+ network_map = rpc.request('hva-collector', 'get_network', inst_map[:instance_nics].first[:network_id])
254
415
  raise "UnknownNetworkId" if network_map.nil?
255
416
 
256
- vif = inst_map[:instance_nics].first[:vif]
417
+ vif = inst_map[:instance_nics].first[:uuid]
257
418
  vif_mac = inst_map[:instance_nics].first[:mac_addr].unpack('A2'*6).join(':')
258
419
 
259
- flush_iptables(inst_map)
420
+ if with_flush
421
+ flush_iptables(inst_map)
422
+ end
260
423
 
261
424
  # Does host have vif?
262
425
  unless valid_vif?(vif)
263
426
  return
264
427
  end
265
428
 
429
+
430
+
431
+
266
432
  # group node IPv4 addresses.
267
433
  ipv4s = rpc.request('hva-collector', 'get_group_instance_ipv4s', inst_map[:uuid])
268
434
 
269
- ng = rpc.request('hva-collector', 'get_netfilter_groups_of_instance', inst_map[:uuid])
270
- rules = ng.map { |g|
271
- g[:rules].map { |rule| rule[:permission] }
272
- }
273
- rules.flatten! if rules.size > 0
435
+ ng_maps = rpc.request('hva-collector', 'get_netfilter_groups_of_instance', inst_map[:uuid])
436
+ rules = ng_maps.map { |ng_map|
437
+ ng_map[:rules].map { |rule| rule[:permission] }
438
+ }.flatten
439
+
440
+
441
+
274
442
 
275
443
  # xtables commands
276
444
  cmds = []
@@ -293,116 +461,122 @@ class ServiceNetfilter < Isono::NodeModules::Base
293
461
 
294
462
  # metadata-server
295
463
  [ 'A' ].each { |xcmd|
296
- system("sudo iptables -t nat -#{xcmd} PREROUTING -m physdev --physdev-is-bridged --physdev-in #{vif} -s 0.0.0.0 -d 169.254.169.254 -p tcp --dport 80 -j DNAT --to-destination #{network_map[:metadata_server]}:80")
464
+ system("iptables -t nat -#{xcmd} PREROUTING -m physdev --physdev-is-bridged --physdev-in #{vif} -s 0.0.0.0 -d 169.254.169.254 -p tcp --dport 80 -j DNAT --to-destination #{network_map[:metadata_server]}:80")
297
465
  }
298
466
 
299
467
  # create user defined chains.
300
468
  [ 'N' ].each { |xcmd|
301
469
  chains.each { |chain|
302
- cmds << "sudo iptables -#{xcmd} #{chain}"
470
+ cmds << "iptables -#{xcmd} #{chain}"
303
471
 
304
472
  # logger & drop
305
- cmds << "sudo iptables -N #{chain}_drop"
306
- cmds << "sudo iptables -A #{chain}_drop -j LOG --log-level 4 --log-prefix '#{chain} DROP:'"
307
- cmds << "sudo iptables -A #{chain}_drop -j DROP"
473
+ cmds << "iptables -N #{chain}_drop"
474
+ cmds << "iptables -A #{chain}_drop -j LOG --log-level 4 --log-prefix 'D #{chain}:'"
475
+ cmds << "iptables -A #{chain}_drop -j DROP"
308
476
  }
309
477
  }
310
478
 
311
479
  # group nodes
312
480
  ipv4s << network_map[:ipv4_gw]
313
- ipv4s.each { |addr|
314
- cmds << "sudo iptables -A d_#{vif} -s #{addr} -j ACCEPT"
481
+ ipv4s.uniq.reverse_each { |addr|
482
+ cmds << "iptables -A d_#{vif} -s #{addr} -j ACCEPT"
315
483
  }
316
484
 
317
485
  # IP protocol routing
318
486
  [ 's', 'd' ].each do |bound|
319
487
  protocol_maps.each { |k,v|
320
- cmds << "sudo iptables -N #{bound}_#{vif}_#{k}"
488
+ cmds << "iptables -N #{bound}_#{vif}_#{k}"
321
489
 
322
490
  case k
323
491
  when 'tcp'
324
492
  case bound
325
493
  when 's'
326
- cmds << "sudo iptables -A #{bound}_#{vif} -m state --state NEW,ESTABLISHED -p #{k} -j #{bound}_#{vif}_#{k}"
494
+ cmds << "iptables -A #{bound}_#{vif} -m state --state NEW,ESTABLISHED -p #{k} -j #{bound}_#{vif}_#{k}"
327
495
  when 'd'
328
- #cmds << "sudo iptables -A #{bound}_#{vif} -m state --state ESTABLISHED -p #{k} -j #{bound}_#{vif}_#{k}"
329
- cmds << "sudo iptables -A #{bound}_#{vif} -m state --state RELATED,ESTABLISHED -p #{k} -j ACCEPT"
330
- cmds << "sudo iptables -A #{bound}_#{vif} -p #{k} -j #{bound}_#{vif}_#{k}"
496
+ #cmds << "iptables -A #{bound}_#{vif} -m state --state ESTABLISHED -p #{k} -j #{bound}_#{vif}_#{k}"
497
+ cmds << "iptables -A #{bound}_#{vif} -m state --state RELATED,ESTABLISHED -p #{k} -j ACCEPT"
498
+ cmds << "iptables -A #{bound}_#{vif} -p #{k} -j #{bound}_#{vif}_#{k}"
331
499
  end
332
500
  when 'udp'
333
501
  case bound
334
502
  when 's'
335
- cmds << "sudo iptables -A #{bound}_#{vif} -m state --state NEW,ESTABLISHED -p #{k} -j #{bound}_#{vif}_#{k}"
503
+ cmds << "iptables -A #{bound}_#{vif} -m state --state NEW,ESTABLISHED -p #{k} -j #{bound}_#{vif}_#{k}"
336
504
  when 'd'
337
- #cmds << "sudo iptables -A #{bound}_#{vif} -m state --state ESTABLISHED -p #{k} -j #{bound}_#{vif}_#{k}"
338
- cmds << "sudo iptables -A #{bound}_#{vif} -m state --state ESTABLISHED -p #{k} -j ACCEPT"
339
- cmds << "sudo iptables -A #{bound}_#{vif} -p #{k} -j #{bound}_#{vif}_#{k}"
505
+ #cmds << "iptables -A #{bound}_#{vif} -m state --state ESTABLISHED -p #{k} -j #{bound}_#{vif}_#{k}"
506
+ cmds << "iptables -A #{bound}_#{vif} -m state --state ESTABLISHED -p #{k} -j ACCEPT"
507
+ cmds << "iptables -A #{bound}_#{vif} -p #{k} -j #{bound}_#{vif}_#{k}"
340
508
  end
341
509
  when 'icmp'
342
510
  case bound
343
511
  when 's'
344
- cmds << "sudo iptables -A #{bound}_#{vif} -m state --state NEW,ESTABLISHED,RELATED -p #{k} -j #{bound}_#{vif}_#{k}"
512
+ cmds << "iptables -A #{bound}_#{vif} -m state --state NEW,ESTABLISHED,RELATED -p #{k} -j #{bound}_#{vif}_#{k}"
345
513
  when 'd'
346
- #cmds << "sudo iptables -A #{bound}_#{vif} -m state --state NEW,ESTABLISHED,RELATED -p #{k} -j #{bound}_#{vif}_#{k}"
347
- cmds << "sudo iptables -A #{bound}_#{vif} -m state --state ESTABLISHED,RELATED -p #{k} -j ACCEPT"
348
- cmds << "sudo iptables -A #{bound}_#{vif} -p #{k} -j #{bound}_#{vif}_#{k}"
514
+ #cmds << "iptables -A #{bound}_#{vif} -m state --state NEW,ESTABLISHED,RELATED -p #{k} -j #{bound}_#{vif}_#{k}"
515
+ cmds << "iptables -A #{bound}_#{vif} -m state --state ESTABLISHED,RELATED -p #{k} -j ACCEPT"
516
+ cmds << "iptables -A #{bound}_#{vif} -p #{k} -j #{bound}_#{vif}_#{k}"
349
517
  end
350
518
  end
351
519
  }
352
520
  end
353
521
 
354
- cmds << "sudo iptables -A FORWARD -m physdev --physdev-is-bridged --physdev-in #{vif} -j s_#{vif}"
355
- cmds << "sudo iptables -A FORWARD -m physdev --physdev-is-bridged --physdev-out #{vif} -j d_#{vif}"
522
+ cmds << "iptables -A FORWARD -m physdev --physdev-is-bridged --physdev-in #{vif} -j s_#{vif}"
523
+ cmds << "iptables -A FORWARD -m physdev --physdev-is-bridged --physdev-out #{vif} -j d_#{vif}"
356
524
 
357
525
  ##
358
526
  ## ACCEPT
359
527
  ##
360
528
  # DHCP Server
361
- cmds << "sudo iptables -A d_#{vif}_udp -p udp -s #{network_map[:dhcp_server]} --sport 67 -j ACCEPT"
362
- #cmds << "sudo iptables -A d_#{vif}_udp -p udp --sport 67 -j d_#{vif}_udp_drop"
529
+ cmds << "iptables -A d_#{vif}_udp -p udp -s #{network_map[:dhcp_server]} --sport 67 -j ACCEPT"
530
+ cmds << "iptables -A d_#{vif}_udp -p udp -s #{network_map[:dhcp_server]} --sport 68 -j ACCEPT"
531
+
532
+ #cmds << "iptables -A d_#{vif}_udp -p udp --sport 67 -j d_#{vif}_udp_drop"
363
533
  # DNS Server
364
- cmds << "sudo iptables -A s_#{vif}_udp -p udp -d #{network_map[:dns_server]} --dport 53 -j ACCEPT"
534
+ cmds << "iptables -A s_#{vif}_udp -p udp -d #{network_map[:dns_server]} --dport 53 -j ACCEPT"
365
535
 
366
536
  ##
367
537
  ## DROP
368
538
  ##
369
539
  protocol_maps.each { |k,v|
370
540
  # DHCP
371
- cmds << "sudo iptables -A s_#{vif} -d #{network_map[:dhcp_server]} -p #{k} -j s_#{vif}_#{k}_drop"
541
+ cmds << "iptables -A s_#{vif} -d #{network_map[:dhcp_server]} -p #{k} -j s_#{vif}_#{k}_drop"
372
542
  # DNS
373
- cmds << "sudo iptables -A s_#{vif} -d #{network_map[:dns_server]} -p #{k} -j s_#{vif}_#{k}_drop"
543
+ cmds << "iptables -A s_#{vif} -d #{network_map[:dns_server]} -p #{k} -j s_#{vif}_#{k}_drop"
374
544
  }
375
545
 
376
546
  # security group
377
- # rules
378
547
  build_rule(rules).each do |rule|
379
548
  case rule[:ip_protocol]
380
549
  when 'tcp', 'udp'
381
- cmds << "sudo iptables -A d_#{vif}_#{rule[:ip_protocol]} -p #{rule[:ip_protocol]} -s #{rule[:ip_source]} --dport #{rule[:ip_dport]} -j ACCEPT"
550
+ cmds << "iptables -A d_#{vif}_#{rule[:ip_protocol]} -p #{rule[:ip_protocol]} -s #{rule[:ip_source]} --dport #{rule[:ip_dport]} -j ACCEPT"
382
551
  when 'icmp'
383
- # ToDo: implement
384
- # - icmp_type : -1...
385
- # - icmp_code : -1...
386
- # cmds << "sudo iptables -A d_#{vif}_#{rule[:ip_protocol]} -p #{rule[:ip_protocol]} -s #{rule[:ip_source]} --icmp-type #{rule[:icmp_type]}/#{rule[:icmp_code]} -j ACCEPT"
387
- cmds << "sudo iptables -A d_#{vif}_#{rule[:ip_protocol]} -p #{rule[:ip_protocol]} -s #{rule[:ip_source]} -j ACCEPT"
552
+ # icmp
553
+ # This extension can be used if `--protocol icmp' is specified. It provides the following option:
554
+ # [!] --icmp-type {type[/code]|typename}
555
+ # This allows specification of the ICMP type, which can be a numeric ICMP type, type/code pair, or one of the ICMP type names shown by the command
556
+ # iptables -p icmp -h
557
+ if rule[:icmp_type] == -1 && rule[:icmp_code] == -1
558
+ cmds << "iptables -A d_#{vif}_#{rule[:ip_protocol]} -p #{rule[:ip_protocol]} -s #{rule[:ip_source]} -j ACCEPT"
559
+ else
560
+ cmds << "iptables -A d_#{vif}_#{rule[:ip_protocol]} -p #{rule[:ip_protocol]} -s #{rule[:ip_source]} --icmp-type #{rule[:icmp_type]}/#{rule[:icmp_code]} -j ACCEPT"
561
+ end
388
562
  end
389
563
  end
390
564
 
391
565
  # drop other routings
392
566
  protocol_maps.each { |k,v|
393
- cmds << "sudo iptables -A d_#{vif}_#{k} -p #{k} -j d_#{vif}_#{k}_drop"
567
+ cmds << "iptables -A d_#{vif}_#{k} -p #{k} -j d_#{vif}_#{k}_drop"
394
568
  }
395
569
 
396
570
  # IP protocol routing
397
571
  [ 'd' ].each do |bound|
398
572
  protocol_maps.each { |k,v|
399
- cmds << "sudo iptables -A #{bound}_#{vif}_#{k} -j #{bound}_#{vif}_#{k}_drop"
573
+ cmds << "iptables -A #{bound}_#{vif}_#{k} -j #{bound}_#{vif}_#{k}_drop"
400
574
  }
401
575
  end
402
576
 
403
577
  cmds.uniq! if cmds.size > 0
404
578
  cmds.compact.each { |cmd|
405
- puts cmd
579
+ #puts cmd
406
580
  system(cmd)
407
581
  }
408
582
 
@@ -427,48 +601,57 @@ class ServiceNetfilter < Isono::NodeModules::Base
427
601
  protocol_maps = {
428
602
  'ip4' => 'ip4',
429
603
  'arp' => 'arp',
430
- #ip6' => 'ip6',
431
- #rarp' => '0x8035',
604
+ #'ip6' => 'ip6',
605
+ #'rarp' => '0x8035',
432
606
  }
433
607
 
434
608
  # make chain names.
435
609
  chains = []
436
610
  chains << "s_#{vif}"
437
611
  chains << "d_#{vif}"
438
- chains << "s_#{vif}_d_host"
612
+ chains << "s_#{vif}_d_hst"
613
+ chains << "d_#{vif}_s_hst"
439
614
  protocol_maps.each { |k,v|
440
615
  chains << "s_#{vif}_#{k}"
441
616
  chains << "d_#{vif}_#{k}"
442
- chains << "s_#{vif}_d_host_#{k}"
617
+ chains << "s_#{vif}_d_hst_#{k}"
618
+ chains << "d_#{vif}_s_hst_#{k}"
443
619
  }
444
620
 
445
621
  # clear rules if exists.
446
- system("sudo ebtables -L s_#{vif} >/dev/null 2>&1")
622
+ system("ebtables -L s_#{vif} >/dev/null 2>&1")
623
+ if $?.exitstatus == 0
624
+ cmd = "ebtables -D FORWARD -i #{vif} -j s_#{vif}"
625
+ puts cmd
626
+ system(cmd)
627
+ end
628
+
629
+ system("ebtables -L d_#{vif} >/dev/null 2>&1")
447
630
  if $?.exitstatus == 0
448
- cmd = "sudo ebtables -D FORWARD -i #{vif} -j s_#{vif}"
631
+ cmd = "ebtables -D FORWARD -o #{vif} -j d_#{vif}"
449
632
  puts cmd
450
633
  system(cmd)
451
634
  end
452
635
 
453
- system("sudo ebtables -L d_#{vif} >/dev/null 2>&1")
636
+ system("ebtables -L s_#{vif}_d_hst >/dev/null 2>&1")
454
637
  if $?.exitstatus == 0
455
- cmd = "sudo ebtables -D FORWARD -o #{vif} -j d_#{vif}"
638
+ cmd = "ebtables -D INPUT -i #{vif} -j s_#{vif}_d_hst"
456
639
  puts cmd
457
640
  system(cmd)
458
641
  end
459
642
 
460
- system("sudo ebtables -L s_#{vif}_d_host >/dev/null 2>&1")
643
+ system("ebtables -L d_#{vif}_s_hst >/dev/null 2>&1")
461
644
  if $?.exitstatus == 0
462
- cmd = "sudo ebtables -D INPUT -i #{vif} -j s_#{vif}_d_host"
645
+ cmd = "ebtables -D OUTPUT -o #{vif} -j d_#{vif}_s_hst"
463
646
  puts cmd
464
647
  system(cmd)
465
648
  end
466
649
 
467
650
  [ 'F', 'Z', 'X' ].each { |xcmd|
468
651
  chains.each { |chain|
469
- system("sudo ebtables -L #{chain} >/dev/null 2>&1")
652
+ system("ebtables -L #{chain} >/dev/null 2>&1")
470
653
  if $?.exitstatus == 0
471
- cmd = "sudo ebtables -#{xcmd} #{chain}"
654
+ cmd = "ebtables -#{xcmd} #{chain}"
472
655
  puts cmd
473
656
  system(cmd)
474
657
  end
@@ -514,25 +697,25 @@ class ServiceNetfilter < Isono::NodeModules::Base
514
697
 
515
698
  # metadata-server
516
699
  [ 'D' ].each { |xcmd|
517
- system("sudo iptables -t nat -#{xcmd} PREROUTING -m physdev --physdev-is-bridged --physdev-in #{vif} -s 0.0.0.0 -d 169.254.169.254 -p tcp --dport 80 -j DNAT --to-destination #{network_map[:metadata_server]}:80 >/dev/null 2>&1")
700
+ system("iptables -t nat -#{xcmd} PREROUTING -m physdev --physdev-is-bridged --physdev-in #{vif} -s 0.0.0.0 -d 169.254.169.254 -p tcp --dport 80 -j DNAT --to-destination #{network_map[:metadata_server]}:80 >/dev/null 2>&1")
518
701
  }
519
702
 
520
703
  # clean rules if exists.
521
- system("sudo iptables -nL s_#{vif} >/dev/null 2>&1")
704
+ system("iptables -nL s_#{vif} >/dev/null 2>&1")
522
705
  if $?.exitstatus == 0
523
- system("sudo iptables -D FORWARD -m physdev --physdev-is-bridged --physdev-in #{vif} -j s_#{vif}")
706
+ system("iptables -D FORWARD -m physdev --physdev-is-bridged --physdev-in #{vif} -j s_#{vif}")
524
707
  end
525
708
 
526
- system("sudo iptables -nL d_#{vif} >/dev/null 2>&1")
709
+ system("iptables -nL d_#{vif} >/dev/null 2>&1")
527
710
  if $?.exitstatus == 0
528
- system("sudo iptables -D FORWARD -m physdev --physdev-is-bridged --physdev-out #{vif} -j d_#{vif}")
711
+ system("iptables -D FORWARD -m physdev --physdev-is-bridged --physdev-out #{vif} -j d_#{vif}")
529
712
  end
530
713
 
531
714
  [ 'F', 'Z', 'X' ].each { |xcmd|
532
715
  chains.each { |chain|
533
- system("sudo iptables -nL #{chain} >/dev/null 2>&1")
716
+ system("iptables -nL #{chain} >/dev/null 2>&1")
534
717
  if $?.exitstatus == 0
535
- system("sudo iptables -#{xcmd} #{chain}")
718
+ system("iptables -#{xcmd} #{chain}")
536
719
  end
537
720
  }
538
721
  }
@@ -541,11 +724,13 @@ class ServiceNetfilter < Isono::NodeModules::Base
541
724
  end
542
725
 
543
726
  def build_rule(rules = [])
544
- require 'ipaddress'
545
-
546
727
  rule_maps = []
547
728
 
548
729
  rules.each do |rule|
730
+ rule = rule.strip.gsub(/[\s\t]+/, '')
731
+ from_group = false
732
+ ipv4s = []
733
+
549
734
  # ex.
550
735
  # "tcp:22,22,ip4:0.0.0.0"
551
736
  # "udp:53,53,ip4:0.0.0.0"
@@ -556,34 +741,61 @@ class ServiceNetfilter < Isono::NodeModules::Base
556
741
  # id_port has been separeted in first phase.
557
742
  from_pair, ip_dport, source_pair = rule.split(',')
558
743
 
744
+ # TODO: more strict validations
745
+ next if from_pair.nil?
746
+ next if ip_dport.nil?
747
+ next if source_pair.nil?
748
+
559
749
  # 2nd phase
560
750
  # ip_protocol : [ tcp | udp | icmp ]
561
751
  # ip_sport : tcp,udp? 1 - 16bit, icmp: -1
562
752
  ip_protocol, ip_sport = from_pair.split(':')
563
753
 
564
- # protocol : [ ip4 | ip6 ]
565
- # ip_source : ip4? xxx.xxx.xxx.xxx./[0-32], ip6?: not yet supprted.
754
+ # protocol : [ ip4 | ip6 | #{account_id} ]
755
+ # ip_source : ip4? xxx.xxx.xxx.xxx./[0-32], ip6? (not yet supprted), #{netfilter_group_id}
566
756
  protocol, ip_source = source_pair.split(':')
567
757
 
568
- # validate
569
- next unless protocol == 'ip4'
570
- # next unless IPAddress.valid?(ip_source)
571
-
572
- # IPAddress does't support prefix '0'.
573
- ip_addr, prefix = ip_source.split('/', 2)
574
- if prefix.to_i == 0
575
- ip_source = ip_addr
758
+ begin
759
+ s = StringScanner.new(protocol)
760
+ until s.eos?
761
+ case
762
+ when s.scan(/ip6/)
763
+ # TODO#FUTURE: support IPv6 address format
764
+ next
765
+ when s.scan(/ip4/)
766
+ # IPAddress does't support prefix '0'.
767
+ ip_addr, prefix = ip_source.split('/', 2)
768
+ if prefix.to_i == 0
769
+ ip_source = ip_addr
770
+ end
771
+ when s.scan(/a-\w{8}/)
772
+ from_group = true
773
+ inst_maps = rpc.request('hva-collector', 'get_instances_of_account_netfilter_group', protocol, ip_source)
774
+ inst_maps.each { |inst_map|
775
+ ipv4s << inst_map[:ips]
776
+ }
777
+ else
778
+ raise "unexpected protocol '#{s.peep(20)}'"
779
+ end
780
+ end
781
+ rescue Exception => e
782
+ p e
783
+ next
576
784
  end
577
785
 
578
786
  begin
579
- ip = IPAddress(ip_source)
580
- ip_source = case ip.u32
581
- when 0
582
- "#{ip.address}/0"
583
- else
584
- "#{ip.address}/#{ip.prefix}"
585
- end
586
-
787
+ if from_group == false
788
+ #p "from_group:(#{from_group}) ip_source -> #{ip_source}"
789
+ ip = IPAddress(ip_source)
790
+ ip_source = case ip.u32
791
+ when 0
792
+ "#{ip.address}/0"
793
+ else
794
+ "#{ip.address}/#{ip.prefix}"
795
+ end
796
+ else
797
+ ipv4s = ipv4s.flatten.uniq
798
+ end
587
799
  rescue Exception => e
588
800
  p e
589
801
  next
@@ -591,13 +803,25 @@ class ServiceNetfilter < Isono::NodeModules::Base
591
803
 
592
804
  case ip_protocol
593
805
  when 'tcp', 'udp'
594
- rule_maps << {
595
- :ip_protocol => ip_protocol,
596
- :ip_sport => ip_sport.to_i,
597
- :ip_dport => ip_dport.to_i,
598
- :protocol => protocol,
599
- :ip_source => ip_source,
600
- }
806
+ if from_group == false
807
+ rule_maps << {
808
+ :ip_protocol => ip_protocol,
809
+ :ip_sport => ip_sport.to_i,
810
+ :ip_dport => ip_dport.to_i,
811
+ :protocol => protocol,
812
+ :ip_source => ip_source,
813
+ }
814
+ else
815
+ ipv4s.each { |ip|
816
+ rule_maps << {
817
+ :ip_protocol => ip_protocol,
818
+ :ip_sport => ip_sport.to_i,
819
+ :ip_dport => ip_dport.to_i,
820
+ :protocol => 'ip4',
821
+ :ip_source => ip,
822
+ }
823
+ }
824
+ end
601
825
  when 'icmp'
602
826
  # via http://docs.amazonwebservices.com/AWSEC2/latest/CommandLineReference/
603
827
  #
@@ -605,13 +829,46 @@ class ServiceNetfilter < Isono::NodeModules::Base
605
829
  # This must be specified in the format type:code where both are integers.
606
830
  # Type, code, or both can be specified as -1, which is a wildcard.
607
831
 
608
- rule_maps << {
609
- :ip_protocol => ip_protocol,
610
- :icmp_type => -1, # ip_dport.to_i, # -1 or 0, 3, 5, 8, 11, 12, 13, 14, 15, 16, 17, 18
611
- :icmp_code => -1, # ip_sport.to_i, # -1 or 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
612
- :protocol => protocol,
613
- :ip_source => ip_source,
614
- }
832
+ icmp_type = ip_dport.to_i
833
+ icmp_code = ip_sport.to_i
834
+
835
+ # icmp_type
836
+ case icmp_type
837
+ when -1
838
+ when 0, 3, 5, 8, 11, 12, 13, 14, 15, 16, 17, 18
839
+ else
840
+ next
841
+ end
842
+
843
+ # icmp_code
844
+ case icmp_code
845
+ when -1
846
+ when 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
847
+ # when icmp_type equals -1 icmp_code must equal -1.
848
+ next if icmp_type == -1
849
+ else
850
+ next
851
+ end
852
+
853
+ if from_group == false
854
+ rule_maps << {
855
+ :ip_protocol => ip_protocol,
856
+ :icmp_type => ip_dport.to_i, # ip_dport.to_i, # -1 or 0, 3, 5, 8, 11, 12, 13, 14, 15, 16, 17, 18
857
+ :icmp_code => ip_sport.to_i, # ip_sport.to_i, # -1 or 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
858
+ :protocol => protocol,
859
+ :ip_source => ip_source,
860
+ }
861
+ else
862
+ ipv4s.each { |ip|
863
+ rule_maps << {
864
+ :ip_protocol => ip_protocol,
865
+ :icmp_type => ip_dport.to_i, # ip_dport.to_i, # -1 or 0, 3, 5, 8, 11, 12, 13, 14, 15, 16, 17, 18
866
+ :icmp_code => ip_sport.to_i, # ip_sport.to_i, # -1 or 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
867
+ :protocol => 'ip4',
868
+ :ip_source => ip,
869
+ }
870
+ }
871
+ end
615
872
  end
616
873
  end
617
874
 
@@ -628,70 +885,6 @@ class ServiceNetfilter < Isono::NodeModules::Base
628
885
 
629
886
  end
630
887
 
631
- require 'shellwords'
632
- raise "Shellword is old version." unless Shellwords.respond_to?(:shellescape)
633
- require 'open4'
634
-
635
- module CliHelper
636
- class TimeoutError < RuntimeError; end
637
-
638
- def tryagain(opts={:timeout=>60, :retry=>3}, &blk)
639
- timedout = false
640
- curthread = Thread.current
641
-
642
- timersig = EventMachine.add_timer(opts[:timeout]) {
643
- timedout = true
644
- if curthread
645
- curthread.raise(TimeoutError.new("timeout"))
646
- curthread.pass
647
- end
648
- }
649
-
650
- begin
651
- count = 0
652
- begin
653
- break if blk.call
654
- end while !timedout && ((count += 1) < opts[:retry])
655
- rescue TimeoutError => e
656
- raise e
657
- ensure
658
- curthread = nil
659
- EventMachine.cancel_timer(timersig) rescue nil
660
- end
661
- end
662
-
663
- class CommandError < StandardError
664
- attr_reader :stderr, :stdout
665
- def initialize(msg, stdout, stderr)
666
- super(msg)
667
- @stdout = stdout
668
- @stderr = stderr
669
- end
670
- end
671
-
672
- def sh(cmd, args=[], opts={})
673
- opts = opts.merge({:expect_exitcode=>0})
674
- cmd = sprintf(cmd, *args.map {|a| Shellwords.shellescape(a.to_s) })
675
-
676
- outbuf = errbuf = ''
677
- blk = proc {|pid, stdin, stdout, stderr|
678
- stdin.close
679
- outbuf = stdout
680
- errbuf = stderr
681
- }
682
- stat = Open4::popen4(cmd, &blk)
683
- if self.respond_to? :logger
684
- logger.debug("Exec command (pid=#{stat.pid}): #{cmd}")
685
- logger.debug("STDOUT:\n#{outbuf}\nSTDERR:\n#{errbuf}")
686
- end
687
- if stat.exitstatus != opts[:expect_exitcode]
688
- raise CommandError, "Unexpected exit code=#{stat.extstatus} (expected=#{opts{:expect_exitcode}})",
689
- outbuf, errbuf
690
- end
691
- true
692
- end
693
- end
694
-
695
888
  require 'net/telnet'
696
889
 
697
890
  module KvmHelper
@@ -700,240 +893,448 @@ module KvmHelper
700
893
  begin
701
894
  telnet = ::Net::Telnet.new("Host" => "localhost",
702
895
  "Port"=>port.to_s,
703
- "Prompt" => /\n\(qemu\) /,
896
+ "Prompt" => /\n\(qemu\) \z/,
704
897
  "Timeout" => 60,
705
898
  "Waittime" => 0.2)
706
899
 
707
900
  blk.call(telnet)
708
901
  rescue => e
709
902
  logger.error(e) if self.respond_to?(:logger)
903
+ raise e
710
904
  ensure
711
905
  telnet.close
712
906
  end
713
907
  end
714
908
  end
715
909
 
910
+ class InstanceMonitor < Isono::NodeModules::Base
911
+ include KvmHelper
912
+ include Dcmgr::Logger
913
+
914
+ initialize_hook do
915
+ @thread_pool = Isono::ThreadPool.new(1)
916
+ @monitor = EventMachine::PeriodicTimer.new(5) {
917
+ @thread_pool.pass {
918
+ myinstance.check_instance
919
+ }
920
+ }
921
+ end
922
+
923
+ terminate_hook do
924
+ @monitor.cancel
925
+ @thread_pool.shutdown
926
+ end
927
+
928
+ def check_instance()
929
+ instlst = rpc.request('hva-collector', 'get_alive_instances', manifest.node_id)
930
+ instlst.find_all{|i| i[:state] == 'running' }.each { |i|
931
+ begin
932
+ check_kvm_process(i)
933
+ rescue Exception => e
934
+ if i[:status] == 'online'
935
+ logger.error("#{e.class}, #{e.message}")
936
+
937
+ rpc.request('hva-collector', 'update_instance', i[:uuid], {:status=>:offline}) { |req|
938
+ req.oneshot = true
939
+ }
940
+ event.publish('hva/fault_instance', :args=>[i[:uuid]])
941
+ end
942
+ next
943
+ end
944
+
945
+ if i[:status] != 'online'
946
+ rpc.request('hva-collector', 'update_instance', i[:uuid], {:status=>:online}) { |req|
947
+ req.oneshot = true
948
+ }
949
+ end
950
+ }
951
+ end
952
+
953
+ private
954
+ def check_kvm_process(i)
955
+ pid = File.read(File.expand_path("#{i[:uuid]}/kvm.pid", node.manifest.config.vm_data_dir)).to_i
956
+ unless File.exists?(File.expand_path(pid.to_s, '/proc'))
957
+ raise "Unable to find the pid of kvm process: #{pid}"
958
+ end
959
+ end
960
+
961
+ def rpc
962
+ @rpc ||= Isono::NodeModules::RpcChannel.new(@node)
963
+ end
964
+
965
+ def event
966
+ @event ||= Isono::NodeModules::EventChannel.new(@node)
967
+ end
968
+ end
969
+
716
970
  class KvmHandler < EndpointBuilder
717
971
  include Dcmgr::Logger
718
- include CliHelper
972
+ include Dcmgr::Helpers::CliHelper
719
973
  include KvmHelper
720
974
 
721
- job :run_local_store do
722
- #hva = rpc.delegate('hva-collector')
723
- inst_id = request.args[0]
724
- logger.info("Booting #{inst_id}")
725
- #inst = hva.get_instance(inst_id)
975
+ def find_nic(ifindex = 2)
976
+ ifindex_map = {}
977
+ Dir.glob("/sys/class/net/*/ifindex").each do |ifindex_path|
978
+ device_name = File.split(File.split(ifindex_path).first)[1]
979
+ ifindex_num = File.readlines(ifindex_path).first.strip
980
+ ifindex_map[ifindex_num] = device_name
981
+ end
982
+ #p ifindex_map
983
+ ifindex_map[ifindex.to_s]
984
+ end
985
+
986
+ def nic_state(if_name = 'eth0')
987
+ operstate_path = "/sys/class/net/#{if_name}/operstate"
988
+ if File.exists?(operstate_path)
989
+ File.readlines(operstate_path).first.strip
990
+ end
991
+ end
992
+
993
+ def run_kvm(os_devpath)
994
+ # run vm
995
+ cmd = "kvm -m %d -smp %d -name vdc-%s -vnc :%d -drive file=%s -pidfile %s -daemonize -monitor telnet::%d,server,nowait"
996
+ args=[@inst[:instance_spec][:memory_size],
997
+ @inst[:instance_spec][:cpu_cores],
998
+ @inst_id,
999
+ @inst[:runtime_config][:vnc_port],
1000
+ os_devpath,
1001
+ File.expand_path('kvm.pid', @inst_data_dir),
1002
+ @inst[:runtime_config][:telnet_port]
1003
+ ]
1004
+ if vnic = @inst[:instance_nics].first
1005
+ cmd += " -net nic,macaddr=%s -net tap,ifname=%s,script=,downscript="
1006
+ args << vnic[:mac_addr].unpack('A2'*6).join(':')
1007
+ args << vnic[:uuid]
1008
+ end
1009
+ sh(cmd, args)
1010
+
1011
+ unless vnic.nil?
1012
+ network_map = rpc.request('hva-collector', 'get_network', @inst[:instance_nics].first[:network_id])
1013
+
1014
+ # physical interface
1015
+ physical_if = find_nic(@node.manifest.config.hv_ifindex)
1016
+ raise "UnknownPhysicalNIC" if physical_if.nil?
1017
+
1018
+ if network_map[:vlan_id] == 0
1019
+ # bridge interface
1020
+ p bridge_if = @node.manifest.config.bridge_novlan
1021
+ unless FileTest.exist?("/sys/class/net/#{bridge_if}/ifindex")
1022
+ sh("/usr/sbin/brctl addbr %s", [bridge_if])
1023
+ sh("/usr/sbin/brctl addif %s %s", [bridge_if, physical_if])
1024
+ end
1025
+ else
1026
+ # vlan interface
1027
+ vlan_if = "#{physical_if}.#{network_map[:vlan_id]}"
1028
+ unless FileTest.exist?("/sys/class/net/#{vlan_if}/ifindex")
1029
+ sh("/sbin/vconfig add #{physical_if} #{network_map[:vlan_id]}")
1030
+ end
1031
+
1032
+ # bridge interface
1033
+ bridge_if = "#{@node.manifest.config.bridge_prefix}-#{physical_if}.#{network_map[:vlan_id]}"
1034
+ unless FileTest.exist?("/sys/class/net/#{bridge_if}/ifindex")
1035
+ sh("/usr/sbin/brctl addbr %s", [bridge_if])
1036
+ sh("/usr/sbin/brctl addif %s %s", [bridge_if, vlan_if])
1037
+ end
1038
+ end
1039
+
1040
+
1041
+ # interface up? down?
1042
+ [ vlan_if, bridge_if ].each do |ifname|
1043
+ if nic_state(ifname) == "down"
1044
+ sh("/sbin/ifconfig #{ifname} 0.0.0.0 up")
1045
+ end
1046
+ end
1047
+
1048
+ sh("/sbin/ifconfig %s 0.0.0.0 up", [vnic[:uuid]])
1049
+ sh("/usr/sbin/brctl addif %s %s", [bridge_if, vnic[:uuid]])
1050
+ end
1051
+ end
1052
+
1053
+ def attach_volume_to_host
1054
+ # check under until the dev file is created.
1055
+ # /dev/disk/by-path/ip-192.168.1.21:3260-iscsi-iqn.1986-03.com.sun:02:a1024afa-775b-65cf-b5b0-aa17f3476bfc-lun-0
1056
+ linux_dev_path = "/dev/disk/by-path/ip-%s-iscsi-%s-lun-%d" % ["#{@vol[:storage_pool][:ipaddr]}:3260",
1057
+ @vol[:transport_information][:iqn],
1058
+ @vol[:transport_information][:lun]]
1059
+
1060
+ tryagain do
1061
+ next true if File.exist?(linux_dev_path)
1062
+
1063
+ sh("iscsiadm -m discovery -t sendtargets -p %s", [@vol[:storage_pool][:ipaddr]])
1064
+ sh("iscsiadm -m node -l -T '%s' --portal '%s'",
1065
+ [@vol[:transport_information][:iqn], @vol[:storage_pool][:ipaddr]])
1066
+ sleep 1
1067
+ end
1068
+
1069
+ rpc.request('sta-collector', 'update_volume', {
1070
+ :volume_id=>@vol_id,
1071
+ :state=>:attaching,
1072
+ :host_device_name => linux_dev_path})
1073
+ end
1074
+
1075
+ def detach_volume_from_host
1076
+ # iscsi logout
1077
+ sh("iscsiadm -m node -T '%s' --logout", [@vol[:transport_information][:iqn]])
1078
+
1079
+ rpc.request('sta-collector', 'update_volume', {
1080
+ :volume_id=>@vol_id,
1081
+ :state=>:available,
1082
+ :host_device_name=>nil,
1083
+ :instance_id=>nil,
1084
+ })
1085
+ event.publish('hva/volume_detached', :args=>[@inst_id, @vol_id])
1086
+ end
1087
+
1088
+ def terminate_instance
1089
+ kvm_pid=`pgrep -u root -f vdc-#{@inst_id}`
1090
+ if $?.exitstatus == 0 && kvm_pid.to_s =~ /^\d+$/
1091
+ sh("/bin/kill #{kvm_pid}")
1092
+ else
1093
+ logger.error("Can not find the KVM process. Skipping: kvm -name vdc-#{@inst_id}")
1094
+ end
1095
+ end
1096
+
1097
+ def update_instance_state(opts, ev)
1098
+ raise "Can't update instance info without setting @inst_id" if @inst_id.nil?
1099
+ rpc.request('hva-collector', 'update_instance', @inst_id, opts)
1100
+ event.publish(ev, :args=>[@inst_id])
1101
+ end
1102
+
1103
+ def update_volume_state(opts, ev)
1104
+ raise "Can't update volume info without setting @vol_id" if @vol_id.nil?
1105
+ rpc.request('sta-collector', 'update_volume', opts.merge(:volume_id=>@vol_id))
1106
+ event.publish(ev, :args=>[@vol_id])
1107
+ end
726
1108
 
727
- inst = rpc.request('hva-collector', 'get_instance', inst_id)
728
- raise "Invalid instance state: #{inst[:state]}" unless inst[:state].to_s == 'init'
1109
+ job :run_local_store, proc {
1110
+ @inst_id = request.args[0]
1111
+ logger.info("Booting #{@inst_id}")
729
1112
 
1113
+ @inst = rpc.request('hva-collector', 'get_instance', @inst_id)
1114
+ raise "Invalid instance state: #{@inst[:state]}" unless %w(init failingover).member?(@inst[:state].to_s)
1115
+
1116
+ rpc.request('hva-collector', 'update_instance', @inst_id, {:state=>:starting})
730
1117
  # setup vm data folder
731
- inst_data_dir = File.expand_path("#{inst_id}", @node.manifest.config.vm_data_dir)
732
- FileUtils.mkdir(inst_data_dir)
1118
+ @inst_data_dir = File.expand_path("#{@inst_id}", @node.manifest.config.vm_data_dir)
1119
+ FileUtils.mkdir(@inst_data_dir) unless File.exists?(@inst_data_dir)
733
1120
  # copy image file
734
- img_src = inst[:image][:source]
1121
+ img_src = @inst[:image][:source]
735
1122
  case img_src[:type].to_sym
736
1123
  when :http
737
- img_path = File.expand_path("#{inst_id}/#{inst[:uuid]}", @node.manifest.config.vm_data_dir)
1124
+ img_path = File.expand_path("#{@inst[:uuid]}", @inst_data_dir)
738
1125
  sh("curl --silent -o '#{img_path}' #{img_src[:uri]}")
739
1126
  else
740
1127
  raise "Unknown image source type: #{img_src[:type]}"
741
1128
  end
742
1129
 
743
- # boot virtual machine
744
- cmd = "kvm -m %d -smp %d -name vdc-%s -vnc :%d -drive file=%s -pidfile %s -daemonize -monitor telnet::%d,server,nowait"
745
- args = [
746
- inst[:instance_spec][:memory_size],
747
- inst[:instance_spec][:cpu_cores],
748
- inst_id,
749
- inst[:runtime_config][:vnc_port],
750
- img_path,
751
- File.expand_path('kvm.pid', inst_data_dir),
752
- inst[:runtime_config][:telnet_port]
753
- ]
754
- sh(cmd, args)
755
-
756
- rpc.request('hva-collector', 'update_instance', inst_id, {:state=>:running})
757
- event.publish('hva/instance_started', :args=>[inst_id])
758
- end
1130
+ run_kvm(img_path)
1131
+ update_instance_state({:state=>:running}, 'hva/instance_started')
1132
+ }, proc {
1133
+ update_instance_state({:state=>:terminated, :terminated_at=>Time.now},
1134
+ 'hva/instance_terminated')
1135
+ }
759
1136
 
760
- job :run_vol_store do
761
- inst_id = request.args[0]
762
- vol_id = request.args[1]
1137
+ job :run_vol_store, proc {
1138
+ @inst_id = request.args[0]
1139
+ @vol_id = request.args[1]
1140
+
1141
+ @inst = rpc.request('hva-collector', 'get_instance', @inst_id)
1142
+ @vol = rpc.request('sta-collector', 'get_volume', @vol_id)
1143
+ logger.info("Booting #{@inst_id}")
1144
+ raise "Invalid instance state: #{@inst[:state]}" unless %w(init failingover).member?(@inst[:state].to_s)
1145
+
1146
+ rpc.request('hva-collector', 'update_instance', @inst_id, {:state=>:starting})
763
1147
 
764
- inst = rpc.request('hva-collector', 'get_instance', inst_id)
765
- vol = rpc.request('sta-collector', 'get_volume', vol_id)
766
- logger.info("Booting #{inst_id}")
767
- raise "Invalid instance state: #{inst[:state]}" unless inst[:state].to_s == 'init'
768
-
769
1148
  # setup vm data folder
770
- inst_data_dir = File.expand_path("#{inst_id}", @node.manifest.config.vm_data_dir)
771
- FileUtils.mkdir(inst_data_dir)
1149
+ @inst_data_dir = File.expand_path("#{@inst_id}", @node.manifest.config.vm_data_dir)
1150
+ FileUtils.mkdir(@inst_data_dir) unless File.exists?(@inst_data_dir)
772
1151
 
773
1152
  # create volume from snapshot
774
- jobreq.run("zfs-handle.#{vol[:storage_pool][:node_id]}", "create_volume", vol_id)
1153
+ jobreq.run("zfs-handle.#{@vol[:storage_pool][:node_id]}", "create_volume", @vol_id)
775
1154
 
776
- logger.debug("volume created on #{vol[:storage_pool][:node_id]}: #{vol_id}")
1155
+ logger.debug("volume created on #{@vol[:storage_pool][:node_id]}: #{@vol_id}")
777
1156
  # reload volume info
778
- vol = rpc.request('sta-collector', 'get_volume', vol_id)
1157
+ @vol = rpc.request('sta-collector', 'get_volume', @vol_id)
779
1158
 
1159
+ rpc.request('sta-collector', 'update_volume', {:volume_id=>@vol_id, :state=>:attaching})
1160
+ logger.info("Attaching #{@vol_id} on #{@inst_id}")
780
1161
  # check under until the dev file is created.
781
1162
  # /dev/disk/by-path/ip-192.168.1.21:3260-iscsi-iqn.1986-03.com.sun:02:a1024afa-775b-65cf-b5b0-aa17f3476bfc-lun-0
782
- linux_dev_path = "/dev/disk/by-path/ip-%s-iscsi-%s-lun-%d" % ["#{vol[:storage_pool][:ipaddr]}:3260",
783
- vol[:transport_information][:iqn],
784
- vol[:transport_information][:lun]]
1163
+ linux_dev_path = "/dev/disk/by-path/ip-%s-iscsi-%s-lun-%d" % ["#{@vol[:storage_pool][:ipaddr]}:3260",
1164
+ @vol[:transport_information][:iqn],
1165
+ @vol[:transport_information][:lun]]
785
1166
 
786
1167
  # attach disk
787
- tryagain do
788
- sh("iscsiadm -m discovery -t sendtargets -p #{vol[:storage_pool][:ipaddr]}")
789
- sh("iscsiadm -m node -l -T '#{vol[:transport_information][:iqn]}' --portal '#{vol[:storage_pool][:ipaddr]}:3260'")
790
- sleep 1
791
- File.exist?(linux_dev_path)
792
- end
793
-
1168
+ attach_volume_to_host
1169
+
794
1170
  # run vm
795
- cmd = "kvm -m %d -smp %d -name vdc-%s -vnc :%d -drive file=%s -pidfile %s -daemonize -monitor telnet::%d,server,nowait"
796
- args=[inst[:instance_spec][:memory_size],
797
- inst[:instance_spec][:cpu_cores],
798
- inst_id,
799
- inst[:runtime_config][:vnc_port],
800
- linux_dev_path,
801
- File.expand_path('kvm.pid', inst_data_dir),
802
- inst[:runtime_config][:telnet_port]
803
- ]
804
- if vnic = inst[:instance_nics].first
805
- cmd += " -net nic,macaddr=%s -net tap,ifname=%s"
806
- args << vnic[:mac_addr].unpack('A2'*6).join(':')
807
- args << vnic[:vif]
808
- end
809
- sh(cmd, args)
810
-
811
- rpc.request('hva-collector', 'update_instance', inst_id, {:state=>:running})
812
- event.publish('hva/instance_started', :args=>[inst_id])
813
- end
1171
+ run_kvm(linux_dev_path)
1172
+ update_instance_state({:state=>:running}, 'hva/instance_started')
1173
+ update_volume_state({:state=>:attached}, 'hva/volume_attached')
1174
+ }, proc {
1175
+ update_instance_state({:state=>:terminated, :terminated_at=>Time.now},
1176
+ 'hva/instance_terminated')
1177
+ }
814
1178
 
815
1179
  job :terminate do
816
- inst_id = request.args[0]
817
-
818
- inst = rpc.request('hva-collector', 'get_instance', inst_id)
819
- raise "Invalid instance state: #{inst[:state]}" unless inst[:state].to_s == 'running'
820
-
821
- rpc.request('hva-collector', 'update_instance', inst_id, {:state=>:shuttingdown})
1180
+ @inst_id = request.args[0]
1181
+
1182
+ @inst = rpc.request('hva-collector', 'get_instance', @inst_id)
1183
+ raise "Invalid instance state: #{@inst[:state]}" unless @inst[:state].to_s == 'running'
822
1184
 
823
- kvm_pid=`pgrep -u root -f vdc-#{inst_id}`
824
- unless $?.exitstatus == 0 && kvm_pid.to_s =~ /^\d+$/
825
- raise "No such VM process: kvm -name vdc-#{inst_id}"
1185
+ begin
1186
+ rpc.request('hva-collector', 'update_instance', @inst_id, {:state=>:shuttingdown})
1187
+
1188
+ terminate_instance
1189
+
1190
+ unless @inst[:volume].nil?
1191
+ @inst[:volume].each { |volid, v|
1192
+ @vol_id = volid
1193
+ @vol = v
1194
+ # force to continue detaching volumes during termination.
1195
+ detach_volume_from_host rescue logger.error($!)
1196
+ }
1197
+ end
1198
+
1199
+ # cleanup vm data folder
1200
+ FileUtils.rm_r(File.expand_path("#{@inst_id}", @node.manifest.config.vm_data_dir))
1201
+ ensure
1202
+ update_instance_state({:state=>:terminated,:terminated_at=>Time.now},
1203
+ 'hva/instance_terminated')
826
1204
  end
1205
+ end
1206
+
1207
+ # just do terminate instance and unmount volumes not to affect
1208
+ # state management.
1209
+ # called from HA at which the faluty instance get cleaned properly.
1210
+ job :cleanup do
1211
+ @inst_id = request.args[0]
827
1212
 
828
- sh("/bin/kill #{kvm_pid}")
1213
+ @inst = rpc.request('hva-collector', 'get_instance', @inst_id)
1214
+ raise "Invalid instance state: #{@inst[:state]}" unless @inst[:state].to_s == 'running'
829
1215
 
830
- unless inst[:volume].nil?
831
- inst[:volume].each { |volid, v|
832
- sh("iscsiadm -m node -T '#{v[:transport_information][:iqn]}' --logout")
833
- }
1216
+ begin
1217
+ terminate_instance
1218
+
1219
+ unless @inst[:volume].nil?
1220
+ @inst[:volume].each { |volid, v|
1221
+ @vol_id = volid
1222
+ @vol = v
1223
+ # force to continue detaching volumes during termination.
1224
+ detach_volume_from_host rescue logger.error($!)
1225
+ }
1226
+ end
834
1227
  end
835
-
836
- # cleanup vm data folder
837
- FileUtils.rm_r(File.expand_path("#{inst_id}", @node.manifest.config.vm_data_dir))
838
1228
 
839
- rpc.request('hva-collector', 'update_instance', inst_id, {:state=>:terminated})
840
- event.publish('hva/instance_terminated', :args=>[inst_id])
841
1229
  end
842
1230
 
843
- job :attach do
844
- inst_id = request.args[0]
845
- vol_id = request.args[1]
1231
+ job :attach, proc {
1232
+ @inst_id = request.args[0]
1233
+ @vol_id = request.args[1]
846
1234
 
847
- job = Dcmgr::Stm::VolumeContext.new(vol_id)
848
- inst = rpc.request('hva-collector', 'get_instance', inst_id)
849
- vol = rpc.request('sta-collector', 'get_volume', vol_id)
850
- logger.info("Attaching #{vol_id}")
851
- job.stm.state = vol[:state].to_sym
852
- raise "Invalid volume state: #{vol[:state]}" unless vol[:state].to_s == 'available'
1235
+ @job = Dcmgr::Stm::VolumeContext.new(@vol_id)
1236
+ @inst = rpc.request('hva-collector', 'get_instance', @inst_id)
1237
+ @vol = rpc.request('sta-collector', 'get_volume', @vol_id)
1238
+ logger.info("Attaching #{@vol_id}")
1239
+ @job.stm.state = @vol[:state].to_sym
1240
+ raise "Invalid volume state: #{@vol[:state]}" unless @vol[:state].to_s == 'available'
853
1241
 
854
- job.stm.on_attach
1242
+ @job.stm.on_attach
1243
+ rpc.request('sta-collector', 'update_volume', {:volume_id=>@vol_id, :state=>:attaching})
855
1244
  # check under until the dev file is created.
856
1245
  # /dev/disk/by-path/ip-192.168.1.21:3260-iscsi-iqn.1986-03.com.sun:02:a1024afa-775b-65cf-b5b0-aa17f3476bfc-lun-0
857
- linux_dev_path = "/dev/disk/by-path/ip-%s-iscsi-%s-lun-%d" % ["#{vol[:storage_pool][:ipaddr]}:3260",
858
- vol[:transport_information][:iqn],
859
- vol[:transport_information][:lun]]
1246
+ linux_dev_path = "/dev/disk/by-path/ip-%s-iscsi-%s-lun-%d" % ["#{@vol[:storage_pool][:ipaddr]}:3260",
1247
+ @vol[:transport_information][:iqn],
1248
+ @vol[:transport_information][:lun]]
860
1249
 
861
1250
  # attach disk on host os
862
- tryagain do
863
- sh("iscsiadm -m discovery -t sendtargets -p #{vol[:storage_pool][:ipaddr]}")
864
- sh("iscsiadm -m node -l -T '#{vol[:transport_information][:iqn]}' --portal '#{vol[:storage_pool][:ipaddr]}:3260'")
865
- sleep 1
866
- File.exist?(linux_dev_path)
867
- end
1251
+ attach_volume_to_host
868
1252
 
869
- rpc.request('sta-collector', 'update_volume', job.to_hash(:host_device_name => linux_dev_path))
870
- logger.info("Attaching #{vol_id} on #{inst_id}")
871
- job.stm.on_attach
872
- job.on_attach
1253
+ logger.info("Attaching #{@vol_id} on #{@inst_id}")
1254
+ @job.stm.on_attach
1255
+ @job.on_attach
873
1256
 
874
1257
  # attach disk on guest os
875
- require 'net/telnet'
876
- slot_number = nil
877
- pci = nil
878
-
879
- slink = `ls -la #{linux_dev_path}`.scan(/.+\s..\/..\/([a-z]+)/)
880
- raise "volume has not attached host os" if slink.nil?
881
1258
 
882
- begin
883
- telnet = ::Net::Telnet.new("Host" => "localhost", "Port"=>"#{inst[:runtime_config][:telnet_port]}", "Prompt" => /\n\(qemu\) /, "Timeout" => 60, "Waittime" => 0.2)
884
- telnet.cmd({"String" => "pci_add auto storage file=/dev/#{slink},if=scsi", "Match" => /.+slot\s[0-9]+.+/}){|c|
885
- pci_add = c.scan(/.+slot\s([0-9]+).+/)
886
- slot_number = pci_add unless pci_add.empty?
887
- }
888
- telnet.cmd("info pci"){|c|
889
- pci = c.scan(/^(.+[a-zA-z]+.+[0-9],.+device.+#{slot_number},.+:)/)
890
- }
891
- rescue => e
892
- logger.error(e)
893
- ensure
894
- telnet.close
895
- end
896
- raise "volume has not attached" if pci.nil?
897
- rpc.request('sta-collector', 'update_volume', job.to_hash(:guest_device_name=>slot_number))
898
- logger.info("Attached #{vol_id} on #{inst_id}")
899
- end
1259
+ # pci_devddr consists of three hex numbers with colon separator.
1260
+ # dom <= 0xffff && bus <= 0xff && val <= 0x1f
1261
+ # see: qemu-0.12.5/hw/pci.c
1262
+ # /*
1263
+ # * Parse [[<domain>:]<bus>:]<slot>, return -1 on error
1264
+ # */
1265
+ # static int pci_parse_devaddr(const char *addr, int *domp, int *busp, unsigned *slotp)
1266
+ pci_devaddr = nil
1267
+
1268
+ sddev = File.expand_path(File.readlink(linux_dev_path), '/dev/disk/by-path')
1269
+ connect_monitor(@inst[:runtime_config][:telnet_port]) { |t|
1270
+ # success message:
1271
+ # OK domain 0, bus 0, slot 4, function 0
1272
+ # error message:
1273
+ # failed to add file=/dev/xxxx,if=virtio
1274
+ c = t.cmd("pci_add auto storage file=#{sddev},if=scsi")
1275
+ # Note: pci_parse_devaddr() called in "pci_add" uses strtoul()
1276
+ # with base 16 so that the input is expected in hex. however
1277
+ # at the result display, void pci_device_hot_add_print() uses
1278
+ # %d for showing bus and slot addresses. use hex to preserve
1279
+ # those values to keep consistent.
1280
+ if c =~ /\nOK domain ([0-9a-fA-F]+), bus ([0-9a-fA-F]+), slot ([0-9a-fA-F]+), function/m
1281
+ # numbers in OK result is decimal. convert them to hex.
1282
+ pci_devaddr = [$1, $2, $3].map{|i| i.to_i.to_s(16) }
1283
+ else
1284
+ raise "Error in qemu console: #{c}"
1285
+ end
1286
+
1287
+ # double check the pci address.
1288
+ c = t.cmd("info pci")
1289
+
1290
+ # static void pci_info_device(PCIBus *bus, PCIDevice *d)
1291
+ # called in "info pci" gets back PCI bus info with %d.
1292
+ if c.split(/\n/).grep(/^\s+Bus\s+#{pci_devaddr[1].to_i(16)}, device\s+#{pci_devaddr[2].to_i(16)}, function/).empty?
1293
+ raise "Could not find new disk device attached to qemu-kvm: #{pci_devaddr.join(':')}"
1294
+ end
1295
+ }
1296
+
1297
+ rpc.request('sta-collector', 'update_volume', @job.to_hash(:guest_device_name=>pci_devaddr.join(':')))
1298
+ event.publish('hva/volume_attached', :args=>[@inst_id, @vol_id])
1299
+ logger.info("Attached #{@vol_id} on #{@inst_id}")
1300
+ }
900
1301
 
901
1302
  job :detach do
902
- inst_id = request.args[0]
903
- vol_id = request.args[1]
904
-
905
- job = Dcmgr::Stm::VolumeContext.new(vol_id)
906
- inst = rpc.request('hva-collector', 'get_instance', inst_id)
907
- vol = rpc.request('sta-collector', 'get_volume', vol_id)
908
- logger.info("Detaching #{vol_id} on #{inst_id}")
909
- job.stm.state = vol[:state].to_sym
910
- raise "Invalid volume state: #{vol[:state]}" unless vol[:state].to_s == 'attached'
911
-
912
- job.stm.on_detach
1303
+ @inst_id = request.args[0]
1304
+ @vol_id = request.args[1]
1305
+
1306
+ @job = Dcmgr::Stm::VolumeContext.new(@vol_id)
1307
+ @inst = rpc.request('hva-collector', 'get_instance', @inst_id)
1308
+ @vol = rpc.request('sta-collector', 'get_volume', @vol_id)
1309
+ logger.info("Detaching #{@vol_id} on #{@inst_id}")
1310
+ @job.stm.state = @vol[:state].to_sym
1311
+ raise "Invalid volume state: #{@vol[:state]}" unless @vol[:state].to_s == 'attached'
1312
+
1313
+ @job.stm.on_detach
1314
+ rpc.request('sta-collector', 'update_volume', @job.to_hash)
913
1315
  # detach disk on guest os
914
- require 'net/telnet'
915
- pci = nil
916
-
917
- begin
918
- telnet = ::Net::Telnet.new("Host" => "localhost", "Port"=>"#{inst[:runtime_config][:telnet_port]}", "Prompt" => /\n\(qemu\) /, "Timeout" => 60, "Waittime" => 0.2)
919
- telnet.cmd("pci_del #{vol[:guest_device_name]}")
920
- telnet.cmd("info pci"){|c|
921
- pci = c.scan(/^(.+[a-zA-z]+.+[0-9],.+device.+#{vol[:guest_device_name]},.+:)/)
922
- }
923
- rescue => e
924
- logger.error(e)
925
- ensure
926
- telnet.close
927
- end
928
- raise "volume has not detached" unless pci.empty?
929
- rpc.request('sta-collector', 'update_volume', job.to_hash)
930
-
931
- # iscsi logout
932
- job.stm.on_detach
933
- job.on_detach
934
- logger.info("iscsi logout #{vol_id}: #{vol[:transport_information][:iqn]}")
935
- initiator = `sudo iscsiadm -m node -T '#{vol[:transport_information][:iqn]}' --logout`
936
- rpc.request('sta-collector', 'update_volume', job.to_hash)
1316
+ pci_devaddr = @vol[:guest_device_name]
1317
+
1318
+ connect_monitor(@inst[:runtime_config][:telnet_port]) { |t|
1319
+ t.cmd("pci_del #{pci_devaddr}")
1320
+ #
1321
+ # Bus 0, device 4, function 0:
1322
+ # SCSI controller: PCI device 1af4:1001
1323
+ # IRQ 0.
1324
+ # BAR0: I/O at 0x1000 [0x103f].
1325
+ # BAR1: 32 bit memory at 0x08000000 [0x08000fff].
1326
+ # id ""
1327
+ c = t.cmd("info pci")
1328
+ pci_devaddr = pci_devaddr.split(':')
1329
+ unless c.split(/\n/).grep(/\s+Bus\s+#{pci_devaddr[1].to_i(16)}, device\s+#{pci_devaddr[2].to_i(16)}, function/).empty?
1330
+ raise "Detached disk device still be attached in qemu-kvm: #{pci_devaddr.join(':')}"
1331
+ end
1332
+ }
1333
+
1334
+ detach_volume_from_host
1335
+
1336
+ @job.stm.on_detach
1337
+ @job.on_detach
937
1338
  end
938
1339
 
939
1340
  def rpc
@@ -956,11 +1357,15 @@ manifest.instance_eval do
956
1357
  node_instance_id "#{Isono::Util.default_gw_ipaddr}"
957
1358
  load_module Isono::NodeModules::NodeHeartbeat
958
1359
  load_module ServiceNetfilter
1360
+ load_module InstanceMonitor
959
1361
 
960
1362
  config do |c|
961
1363
  c.vm_data_dir = '/var/lib/vm'
962
1364
  c.enable_ebtables = true
963
1365
  c.enable_iptables = true
1366
+ c.hv_ifindex = 2 # ex. /sys/class/net/eth0/ifindex => 2
1367
+ c.bridge_prefix = 'br'
1368
+ c.bridge_novlan = 'br0'
964
1369
  end
965
1370
 
966
1371
  config_path File.expand_path('config/hva.conf', app_root)