wakame-vdc-dcmgr 11.06.0 → 11.12.0
Sign up to get free protection for your applications and to get access to all the features.
- data/Rakefile +19 -31
- data/bin/collector +6 -1
- data/config/db/migrations/0001_v1110_origin.rb +446 -0
- data/config/dcmgr.conf.example +51 -0
- data/lib/dcmgr.rb +99 -22
- data/lib/dcmgr/cli/base.rb +34 -1
- data/lib/dcmgr/cli/host.rb +24 -20
- data/lib/dcmgr/cli/image.rb +38 -19
- data/lib/dcmgr/cli/keypair.rb +16 -12
- data/lib/dcmgr/cli/network.rb +189 -81
- data/lib/dcmgr/cli/quota.rb +2 -2
- data/lib/dcmgr/cli/security_group.rb +106 -0
- data/lib/dcmgr/cli/spec.rb +144 -39
- data/lib/dcmgr/cli/storage.rb +16 -15
- data/lib/dcmgr/cli/tag.rb +20 -14
- data/lib/dcmgr/cli/vlan.rb +5 -5
- data/lib/dcmgr/drivers/backing_store.rb +32 -0
- data/lib/dcmgr/drivers/comstar.rb +81 -0
- data/lib/dcmgr/drivers/iijgio_storage.rb +9 -19
- data/lib/dcmgr/drivers/iscsi_target.rb +41 -0
- data/lib/dcmgr/drivers/kvm.rb +161 -28
- data/lib/dcmgr/drivers/linux_iscsi.rb +60 -0
- data/lib/dcmgr/drivers/local_storage.rb +24 -0
- data/lib/dcmgr/drivers/lxc.rb +167 -125
- data/lib/dcmgr/drivers/raw.rb +74 -0
- data/lib/dcmgr/drivers/s3_storage.rb +7 -19
- data/lib/dcmgr/drivers/snapshot_storage.rb +18 -28
- data/lib/dcmgr/drivers/storage_initiator.rb +28 -0
- data/lib/dcmgr/drivers/sun_iscsi.rb +32 -0
- data/lib/dcmgr/drivers/zfs.rb +77 -0
- data/lib/dcmgr/endpoints/core_api.rb +315 -263
- data/lib/dcmgr/endpoints/errors.rb +21 -10
- data/lib/dcmgr/endpoints/metadata.rb +360 -23
- data/lib/dcmgr/helpers/cli_helper.rb +6 -3
- data/lib/dcmgr/helpers/ec2_metadata_helper.rb +9 -0
- data/lib/dcmgr/helpers/nic_helper.rb +11 -0
- data/lib/dcmgr/helpers/snapshot_storage_helper.rb +34 -0
- data/lib/dcmgr/models/account.rb +0 -6
- data/lib/dcmgr/models/account_resource.rb +0 -4
- data/lib/dcmgr/models/base_new.rb +14 -2
- data/lib/dcmgr/models/dhcp_range.rb +38 -0
- data/lib/dcmgr/models/frontend_system.rb +0 -6
- data/lib/dcmgr/models/history.rb +0 -11
- data/lib/dcmgr/models/host_node.rb +131 -0
- data/lib/dcmgr/models/hostname_lease.rb +0 -8
- data/lib/dcmgr/models/image.rb +31 -18
- data/lib/dcmgr/models/instance.rb +137 -143
- data/lib/dcmgr/models/instance_nic.rb +52 -29
- data/lib/dcmgr/models/instance_security_group.rb +9 -0
- data/lib/dcmgr/models/instance_spec.rb +163 -31
- data/lib/dcmgr/models/ip_lease.rb +10 -21
- data/lib/dcmgr/models/mac_lease.rb +30 -11
- data/lib/dcmgr/models/network.rb +148 -27
- data/lib/dcmgr/models/physical_network.rb +18 -0
- data/lib/dcmgr/models/quota.rb +0 -10
- data/lib/dcmgr/models/request_log.rb +3 -18
- data/lib/dcmgr/models/security_group.rb +66 -0
- data/lib/dcmgr/models/security_group_rule.rb +145 -0
- data/lib/dcmgr/models/ssh_key_pair.rb +16 -19
- data/lib/dcmgr/models/{storage_pool.rb → storage_node.rb} +35 -25
- data/lib/dcmgr/models/tag.rb +0 -14
- data/lib/dcmgr/models/tag_mapping.rb +1 -7
- data/lib/dcmgr/models/vlan_lease.rb +2 -8
- data/lib/dcmgr/models/volume.rb +49 -37
- data/lib/dcmgr/models/volume_snapshot.rb +15 -17
- data/lib/dcmgr/node_modules/hva_collector.rb +69 -28
- data/lib/dcmgr/node_modules/instance_ha.rb +23 -12
- data/lib/dcmgr/node_modules/instance_monitor.rb +16 -2
- data/lib/dcmgr/node_modules/openflow_controller.rb +784 -0
- data/lib/dcmgr/node_modules/scheduler.rb +189 -0
- data/lib/dcmgr/node_modules/service_netfilter.rb +452 -227
- data/lib/dcmgr/node_modules/service_openflow.rb +731 -0
- data/lib/dcmgr/node_modules/sta_collector.rb +20 -0
- data/lib/dcmgr/node_modules/sta_tgt_initializer.rb +35 -0
- data/lib/dcmgr/rack/request_logger.rb +11 -6
- data/lib/dcmgr/rpc/hva_handler.rb +256 -110
- data/lib/dcmgr/rpc/sta_handler.rb +244 -0
- data/lib/dcmgr/scheduler.rb +122 -8
- data/lib/dcmgr/scheduler/host_node/exclude_same.rb +24 -0
- data/lib/dcmgr/scheduler/host_node/find_first.rb +12 -0
- data/lib/dcmgr/scheduler/host_node/least_usage.rb +28 -0
- data/lib/dcmgr/scheduler/host_node/per_instance.rb +18 -0
- data/lib/dcmgr/scheduler/host_node/specify_node.rb +26 -0
- data/lib/dcmgr/scheduler/network/flat_single.rb +23 -0
- data/lib/dcmgr/scheduler/network/nat_one_to_one.rb +23 -0
- data/lib/dcmgr/scheduler/network/per_instance.rb +39 -0
- data/lib/dcmgr/scheduler/network/vif_template.rb +19 -0
- data/lib/dcmgr/scheduler/storage_node/find_first.rb +13 -0
- data/lib/dcmgr/scheduler/storage_node/least_usage.rb +23 -0
- data/lib/dcmgr/storage_service.rb +39 -40
- data/lib/dcmgr/tags.rb +3 -3
- data/lib/dcmgr/version.rb +1 -1
- data/lib/dcmgr/vnet.rb +105 -0
- data/lib/dcmgr/vnet/factories.rb +141 -0
- data/lib/dcmgr/vnet/isolators/by_securitygroup.rb +21 -0
- data/lib/dcmgr/vnet/isolators/dummy.rb +17 -0
- data/lib/dcmgr/vnet/netfilter/cache.rb +51 -0
- data/lib/dcmgr/vnet/netfilter/chain.rb +66 -0
- data/lib/dcmgr/vnet/netfilter/controller.rb +193 -0
- data/lib/dcmgr/vnet/netfilter/ebtables_rule.rb +53 -0
- data/lib/dcmgr/vnet/netfilter/iptables_rule.rb +45 -0
- data/lib/dcmgr/vnet/netfilter/task_manager.rb +459 -0
- data/lib/dcmgr/vnet/tasks/accept_all_dns.rb +19 -0
- data/lib/dcmgr/vnet/tasks/accept_arp_broadcast.rb +24 -0
- data/lib/dcmgr/vnet/tasks/accept_arp_from_friends.rb +34 -0
- data/lib/dcmgr/vnet/tasks/accept_arp_from_gateway.rb +21 -0
- data/lib/dcmgr/vnet/tasks/accept_arp_to_host.rb +30 -0
- data/lib/dcmgr/vnet/tasks/accept_ip_from_friends.rb +26 -0
- data/lib/dcmgr/vnet/tasks/accept_ip_from_gateway.rb +23 -0
- data/lib/dcmgr/vnet/tasks/accept_ip_to_anywhere.rb +18 -0
- data/lib/dcmgr/vnet/tasks/accept_related_established.rb +45 -0
- data/lib/dcmgr/vnet/tasks/accept_wakame_dhcp_only.rb +33 -0
- data/lib/dcmgr/vnet/tasks/accept_wakame_dns_only.rb +33 -0
- data/lib/dcmgr/vnet/tasks/debug_iptables.rb +21 -0
- data/lib/dcmgr/vnet/tasks/drop_arp_forwarding.rb +27 -0
- data/lib/dcmgr/vnet/tasks/drop_arp_to_host.rb +24 -0
- data/lib/dcmgr/vnet/tasks/drop_ip_from_anywhere.rb +18 -0
- data/lib/dcmgr/vnet/tasks/drop_ip_spoofing.rb +34 -0
- data/lib/dcmgr/vnet/tasks/drop_mac_spoofing.rb +33 -0
- data/lib/dcmgr/vnet/tasks/exclude_from_nat.rb +47 -0
- data/lib/dcmgr/vnet/tasks/security_group.rb +37 -0
- data/lib/dcmgr/vnet/tasks/static_nat.rb +54 -0
- data/lib/dcmgr/vnet/tasks/translate_metadata_address.rb +32 -0
- data/web/metadata/config.ru +1 -1
- metadata +174 -89
- data/lib/dcmgr/cli/group.rb +0 -101
- data/lib/dcmgr/endpoints/core_api_mock.rb +0 -865
- data/lib/dcmgr/models/host_pool.rb +0 -122
- data/lib/dcmgr/models/instance_netfilter_group.rb +0 -16
- data/lib/dcmgr/models/netfilter_group.rb +0 -89
- data/lib/dcmgr/models/netfilter_rule.rb +0 -21
- data/lib/dcmgr/scheduler/find_last.rb +0 -16
- data/lib/dcmgr/scheduler/find_random.rb +0 -16
- data/lib/dcmgr/stm/instance.rb +0 -25
- data/lib/dcmgr/stm/snapshot_context.rb +0 -33
- data/lib/dcmgr/stm/volume_context.rb +0 -65
@@ -0,0 +1,731 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
$LOAD_PATH.unshift File.expand_path('../../../../../trema/ruby', __FILE__)
|
3
|
+
|
4
|
+
require 'fileutils'
|
5
|
+
require 'isono'
|
6
|
+
require 'ipaddress'
|
7
|
+
require 'trema'
|
8
|
+
|
9
|
+
module Dcmgr
|
10
|
+
module NodeModules
|
11
|
+
|
12
|
+
class ServiceOpenFlow < Isono::NodeModules::Base
|
13
|
+
include Dcmgr::Logger
|
14
|
+
include Dcmgr::Helpers::NicHelper
|
15
|
+
|
16
|
+
attr_reader :networks
|
17
|
+
|
18
|
+
config_section do
|
19
|
+
desc "configuration file for ofc..."
|
20
|
+
end
|
21
|
+
|
22
|
+
initialize_hook do
|
23
|
+
@networks = {}
|
24
|
+
|
25
|
+
# Trema hack...
|
26
|
+
$verbose = true
|
27
|
+
|
28
|
+
Dcmgr.run_initializers('sequel')
|
29
|
+
|
30
|
+
EH = CustomEventHandler.new
|
31
|
+
EH.set_as_handler
|
32
|
+
|
33
|
+
Trema::Util::cleanup_current_session
|
34
|
+
|
35
|
+
rule = {
|
36
|
+
:port_status => "OpenFlowController",
|
37
|
+
:packet_in => "OpenFlowController",
|
38
|
+
:state_notify => "OpenFlowController",
|
39
|
+
:vendor => "OpenFlowController"
|
40
|
+
}
|
41
|
+
|
42
|
+
unix_socket = "#{node.manifest.config.ovs_run_dir}/#{node.manifest.config.bridge_novlan}.controller"
|
43
|
+
|
44
|
+
FileUtils.remove_file(unix_socket, true)
|
45
|
+
|
46
|
+
@switch_manager = Trema::SwitchManager.new( rule, nil, unix_socket )
|
47
|
+
|
48
|
+
myinstance.worker_thread.pass {
|
49
|
+
@switch_manager.run!
|
50
|
+
|
51
|
+
myinstance.openflow_controller.init_trema
|
52
|
+
myinstance.openflow_controller.run_immediate!
|
53
|
+
}
|
54
|
+
|
55
|
+
event = Isono::NodeModules::EventChannel.new(node)
|
56
|
+
|
57
|
+
event.subscribe('hva/instance_started', '#') do |args|
|
58
|
+
myinstance.worker_thread.pass {
|
59
|
+
logger.info("refresh on instance_started: #{args.inspect}")
|
60
|
+
inst_id = args[0]
|
61
|
+
myinstance.add_openflow_by_instance_id(inst_id)
|
62
|
+
}
|
63
|
+
end
|
64
|
+
|
65
|
+
event.subscribe('hva/instance_terminated', '#') do |args|
|
66
|
+
myinstance.worker_thread.pass {
|
67
|
+
logger.info("refresh on instance_terminated: #{args.inspect}")
|
68
|
+
inst_id = args[0]
|
69
|
+
myinstance.delete_openflow_by_instance_id(inst_id)
|
70
|
+
}
|
71
|
+
end
|
72
|
+
|
73
|
+
event.subscribe('hva/openflow_updated', '#') do |args|
|
74
|
+
myinstance.worker_thread.pass {
|
75
|
+
logger.info("refresh on openflow_updated: #{args.inspect}")
|
76
|
+
openflow_group_id = args[0]
|
77
|
+
myinstance.refresh_openflow_by_joined_openflow_group_id(openflow_group_id)
|
78
|
+
}
|
79
|
+
end
|
80
|
+
end
|
81
|
+
|
82
|
+
terminate_hook do
|
83
|
+
myinstance.worker_thread.pass {
|
84
|
+
myinstance.openflow_controller.stop_immediate!
|
85
|
+
myinstance.openflow_controller.shutdown!
|
86
|
+
}
|
87
|
+
|
88
|
+
Trema::Util::cleanup_current_session
|
89
|
+
end
|
90
|
+
|
91
|
+
def add_openflow_by_instance_id(inst_id)
|
92
|
+
port = get_port_from_instance_id inst_id
|
93
|
+
add_instance_now port[1] if not port.nil?
|
94
|
+
end
|
95
|
+
|
96
|
+
def delete_openflow_by_instance_id(inst_id)
|
97
|
+
port = get_port_from_instance_id inst_id
|
98
|
+
delete_instance_now port[1] if not port.nil?
|
99
|
+
end
|
100
|
+
|
101
|
+
def refresh_openflow_by_joined_openflow_group_id(openflow_group_id)
|
102
|
+
raise "UnknownOpenflowGroupID" if openflow_group_id.nil?
|
103
|
+
|
104
|
+
logger.info "Refresh Openflow..."
|
105
|
+
end
|
106
|
+
|
107
|
+
#
|
108
|
+
#
|
109
|
+
#
|
110
|
+
|
111
|
+
def add_eth port
|
112
|
+
worker_thread.pass { add_eth_now port }
|
113
|
+
end
|
114
|
+
|
115
|
+
def add_eth_now port
|
116
|
+
logger.info "adding eth #{port.port_info.name}."
|
117
|
+
|
118
|
+
return if port.object_id != openflow_controller.ports[port.port_info.number].object_id
|
119
|
+
return if not port.is_active
|
120
|
+
|
121
|
+
logger.info "port: #{port.port_info.number}"
|
122
|
+
logger.info "mac: #{port.port_info.hw_addr.to_s}"
|
123
|
+
logger.info "config: #{port.port_info.config}"
|
124
|
+
logger.info "state: #{port.port_info.state}"
|
125
|
+
logger.info "curr: #{port.port_info.curr}"
|
126
|
+
logger.info "advertised: #{port.port_info.advertised}"
|
127
|
+
logger.info "supported: #{port.port_info.supported}"
|
128
|
+
logger.info "peer: #{port.port_info.peer}"
|
129
|
+
|
130
|
+
port_number = port.port_info.number
|
131
|
+
|
132
|
+
networks_map = get_physical_networks
|
133
|
+
raise "A single (and only a single) physical network must be registered. (With ipv4_gw set)" unless networks_map.one?
|
134
|
+
|
135
|
+
network = get_network_from_map networks_map[0]
|
136
|
+
|
137
|
+
port.lock.synchronize {
|
138
|
+
return if not port.is_active
|
139
|
+
|
140
|
+
network.add_port port_number, true
|
141
|
+
openflow_controller.install_eth port
|
142
|
+
openflow_controller.update_network network
|
143
|
+
|
144
|
+
port.network = network
|
145
|
+
openflow_controller.ovs_ofctl.add_flows_from_list port.queued_flows
|
146
|
+
port.queued_flows.clear
|
147
|
+
}
|
148
|
+
end
|
149
|
+
|
150
|
+
def add_instance port
|
151
|
+
worker_thread.pass { add_instance_now port }
|
152
|
+
end
|
153
|
+
|
154
|
+
def add_instance_now port
|
155
|
+
logger.info "adding instance #{port.port_info.name}."
|
156
|
+
|
157
|
+
return if port.object_id != openflow_controller.ports[port.port_info.number].object_id
|
158
|
+
return if port.has_instance or not port.is_active
|
159
|
+
|
160
|
+
inst_map = rpc.request('hva-collector', 'get_instance_of_nic', port.port_info.name)
|
161
|
+
raise ArgumentError, "Unknown Nic: #{port.port_info.name}" if inst_map.nil?
|
162
|
+
vif_map = inst_map[:vif].detect { |vif| vif[:vif_id] == port.port_info.name }
|
163
|
+
raise ArgumentError, "Unknown Nic: #{port.port_info.name}" if vif_map.nil?
|
164
|
+
ip_map = vif_map[:ipv4]
|
165
|
+
raise ArgumentError, "Unknown Nic: #{port.port_info.name}" if ip_map.nil?
|
166
|
+
|
167
|
+
# logger.info "inst_map: #{inst_map.inspect}"
|
168
|
+
logger.debug "vif_map: #{vif_map.inspect}"
|
169
|
+
|
170
|
+
ip_lease = ip_map[:address]
|
171
|
+
mac_lease = clean_mac(vif_map[:mac_addr])
|
172
|
+
|
173
|
+
port.ip = ip_lease
|
174
|
+
port.mac = mac_lease
|
175
|
+
port.has_instance = true
|
176
|
+
|
177
|
+
logger.info "port: #{port.port_info.number}"
|
178
|
+
logger.info "mac: #{port.port_info.hw_addr.to_s} <=> #{mac_lease}"
|
179
|
+
logger.info "ip: #{ip_lease}"
|
180
|
+
logger.info "config: #{port.port_info.config}"
|
181
|
+
logger.info "state: #{port.port_info.state}"
|
182
|
+
logger.info "curr: #{port.port_info.curr}"
|
183
|
+
logger.info "advertised: #{port.port_info.advertised}"
|
184
|
+
logger.info "supported: #{port.port_info.supported}"
|
185
|
+
logger.info "peer: #{port.port_info.peer}"
|
186
|
+
|
187
|
+
port_number = port.port_info.number
|
188
|
+
network = get_network_from_map ip_map[:network]
|
189
|
+
|
190
|
+
if not network.virtual
|
191
|
+
openflow_controller.install_route port, mac_lease, ip_lease
|
192
|
+
openflow_controller.install_arp_antispoof port, mac_lease, ip_lease
|
193
|
+
|
194
|
+
openflow_controller.install_static_d_transport 17, port, mac_lease, ip_lease, ip_map[:network][:dns_server], 53 if not ip_map[:network][:dns_server].nil?
|
195
|
+
|
196
|
+
if not ip_map[:network][:dns_server].nil?
|
197
|
+
openflow_controller.install_static_d_transport 17, port, mac_lease, ip_lease, ip_map[:network][:dhcp_server], 67
|
198
|
+
openflow_controller.install_static_d_transport 17, port, mac_lease, ip_lease, ip_map[:network][:dhcp_server], 68
|
199
|
+
else
|
200
|
+
openflow_controller.install_static_d_transport 17, port, mac_lease, ip_lease, "0.0.0.0/0", 67
|
201
|
+
openflow_controller.install_static_d_transport 17, port, mac_lease, ip_lease, "0.0.0.0/0", 68
|
202
|
+
end
|
203
|
+
|
204
|
+
add_security_group port, inst_map[:uuid], vif_map
|
205
|
+
|
206
|
+
# Testing guest -> *
|
207
|
+
openflow_controller.install_local_icmp port, mac_lease, ip_lease
|
208
|
+
openflow_controller.install_local_transport 6, port, mac_lease, ip_lease
|
209
|
+
openflow_controller.install_local_transport 17, port, mac_lease, ip_lease
|
210
|
+
|
211
|
+
else
|
212
|
+
openflow_controller.install_virtual_route network, port, mac_lease, ip_lease
|
213
|
+
end
|
214
|
+
|
215
|
+
port.lock.synchronize {
|
216
|
+
return if not port.is_active
|
217
|
+
|
218
|
+
network.add_port port_number, true
|
219
|
+
openflow_controller.update_network network
|
220
|
+
|
221
|
+
port.network = network
|
222
|
+
openflow_controller.ovs_ofctl.add_flows_from_list port.queued_flows
|
223
|
+
port.queued_flows.clear
|
224
|
+
}
|
225
|
+
end
|
226
|
+
|
227
|
+
# Always call in the worker thread.
|
228
|
+
def delete_instance port
|
229
|
+
worker_thread.pass { delete_instance_now port }
|
230
|
+
end
|
231
|
+
|
232
|
+
def delete_instance_now port
|
233
|
+
logger.info "deleting instance #{port.port_info.name}."
|
234
|
+
|
235
|
+
return if not port.has_instance
|
236
|
+
|
237
|
+
ip_lease = port.ip
|
238
|
+
mac_lease = port.mac
|
239
|
+
|
240
|
+
port.ip = nil
|
241
|
+
port.mac = nil
|
242
|
+
port.has_instance = false
|
243
|
+
|
244
|
+
logger.info "port: #{port.port_info.number}"
|
245
|
+
logger.info "mac: #{port.port_info.hw_addr.to_s} <=> #{mac_lease}"
|
246
|
+
logger.info "ip: #{ip_lease}"
|
247
|
+
logger.info "config: #{port.port_info.config}"
|
248
|
+
logger.info "state: #{port.port_info.state}"
|
249
|
+
logger.info "curr: #{port.port_info.curr}"
|
250
|
+
logger.info "advertised: #{port.port_info.advertised}"
|
251
|
+
logger.info "supported: #{port.port_info.supported}"
|
252
|
+
logger.info "peer: #{port.port_info.peer}"
|
253
|
+
|
254
|
+
networks.each { |network| network.remove_port port.port_info.number }
|
255
|
+
end
|
256
|
+
|
257
|
+
def add_tunnel port
|
258
|
+
worker_thread.pass { add_tunnel_now port }
|
259
|
+
end
|
260
|
+
|
261
|
+
def add_tunnel_now port
|
262
|
+
logger.info "Got tunnel port: name:#{port.port_info.name}."
|
263
|
+
|
264
|
+
return if port.object_id != openflow_controller.ports[port.port_info.number].object_id
|
265
|
+
return if not port.is_active
|
266
|
+
|
267
|
+
logger.info "port: #{port.port_info.number}"
|
268
|
+
logger.info "mac: #{port.port_info.hw_addr.to_s}"
|
269
|
+
logger.info "config: #{port.port_info.config}"
|
270
|
+
logger.info "state: #{port.port_info.state}"
|
271
|
+
logger.info "curr: #{port.port_info.curr}"
|
272
|
+
logger.info "advertised: #{port.port_info.advertised}"
|
273
|
+
logger.info "supported: #{port.port_info.supported}"
|
274
|
+
logger.info "peer: #{port.port_info.peer}"
|
275
|
+
|
276
|
+
# Note that vnet_id may be different from the actual GRE
|
277
|
+
# tunnel id used.
|
278
|
+
vnet_id = port.port_info.name[/^gre-[a-z]*-([0-9]*)$/, 1].to_i
|
279
|
+
raise "GRE tunnel interface name must match 'gre-[a-z]*-[0-9]*'." if vnet_id.nil? or vnet_id == 0
|
280
|
+
|
281
|
+
network = get_network_from_id vnet_id
|
282
|
+
|
283
|
+
port.lock.synchronize {
|
284
|
+
return if not port.is_active
|
285
|
+
|
286
|
+
network.add_port port.port_info.number, false
|
287
|
+
openflow_controller.update_network network
|
288
|
+
|
289
|
+
port.network = network
|
290
|
+
openflow_controller.install_gre_tunnel network.id, port
|
291
|
+
|
292
|
+
openflow_controller.ovs_ofctl.add_flows_from_list port.queued_flows
|
293
|
+
port.queued_flows.clear
|
294
|
+
}
|
295
|
+
end
|
296
|
+
|
297
|
+
def delete_tunnel port
|
298
|
+
worker_thread.pass { delete_tunnel_now port }
|
299
|
+
end
|
300
|
+
|
301
|
+
def delete_tunnel_now port
|
302
|
+
return if not port.has_instance
|
303
|
+
end
|
304
|
+
|
305
|
+
# def rebuild_networks
|
306
|
+
# worker_thread.pass { rebuild_networks_now }
|
307
|
+
# end
|
308
|
+
|
309
|
+
# def rebuild_networks_now
|
310
|
+
# networks_map = rpc.request('hva-collector', 'get_networks')
|
311
|
+
# raise "Failed to retrieve networks." if networks_map.nil?
|
312
|
+
# # networks.clear
|
313
|
+
|
314
|
+
# networks_map.each { |network|
|
315
|
+
# }
|
316
|
+
# end
|
317
|
+
|
318
|
+
def get_physical_networks
|
319
|
+
networks_map = rpc.request('hva-collector', 'get_networks')
|
320
|
+
raise "Failed to retrieve networks." if networks_map.nil?
|
321
|
+
|
322
|
+
networks_map.select { |network| not network[:ipv4_gw].nil? }
|
323
|
+
end
|
324
|
+
|
325
|
+
def get_network_from_id network_id
|
326
|
+
return networks[network_id] if networks.has_key? network_id
|
327
|
+
|
328
|
+
network_map = rpc.request('hva-collector', 'get_network', network_id)
|
329
|
+
raise "Failed to retrieve network #{network_id}." if network_map.nil?
|
330
|
+
|
331
|
+
logger.debug "get network from: id:#{network_id} map:#{network_map.inspect}."
|
332
|
+
create_network network_map
|
333
|
+
end
|
334
|
+
|
335
|
+
def get_network_from_map network_map
|
336
|
+
if networks.has_key? network_map[:id]
|
337
|
+
networks[network_map[:id]]
|
338
|
+
else
|
339
|
+
create_network network_map
|
340
|
+
end
|
341
|
+
end
|
342
|
+
|
343
|
+
def create_network network_map
|
344
|
+
throw "Network map is invalid: #{network_map.inspect}." if network_map.nil? or network_map[:id] <= 0
|
345
|
+
|
346
|
+
network_id = network_map[:id]
|
347
|
+
throw "Network already created" if networks.has_key? network_id and not networks[network_id].nil?
|
348
|
+
|
349
|
+
if not network_map[:ipv4_gw].nil?
|
350
|
+
logger.info "Creating physical network: id:#{network_id} link_interface:#{network_map[:link_interface]}."
|
351
|
+
|
352
|
+
# Do more here...
|
353
|
+
network = networks[network_id] = OpenFlowNetwork.new(network_id)
|
354
|
+
network.add_port OpenFlowController::OFPP_LOCAL, true
|
355
|
+
|
356
|
+
openflow_controller.install_physical_network network
|
357
|
+
|
358
|
+
else
|
359
|
+
logger.info "Creating virtual network: id:#{network_id} link_interface:#{network_map[:link_interface]}."
|
360
|
+
raise "No valid IPv4 network defined." if network_map[:ipv4_network].nil? or not network_map[:ipv4_network] =~ /\.0$/
|
361
|
+
|
362
|
+
dhcp_ip = IPAddr.new(network_map[:ipv4_network]) | IPAddr.new("0.0.0.1")
|
363
|
+
|
364
|
+
network = networks[network_id] = OpenFlowNetwork.new(network_id)
|
365
|
+
network.dhcp_hw = openflow_controller.local_hw
|
366
|
+
network.dhcp_ip = dhcp_ip
|
367
|
+
network.ipv4_network = IPAddr.new(network_map[:ipv4_network])
|
368
|
+
network.prefix = network_map[:prefix]
|
369
|
+
network.virtual = true
|
370
|
+
|
371
|
+
openflow_controller.install_virtual_network network
|
372
|
+
end
|
373
|
+
|
374
|
+
network
|
375
|
+
end
|
376
|
+
|
377
|
+
#
|
378
|
+
# Cut-n-paste from ServiceNetfilter
|
379
|
+
#
|
380
|
+
|
381
|
+
def add_security_group port, inst_id, vif_map
|
382
|
+
ng_maps = rpc.request('hva-collector', 'get_security_groups_of_instance', inst_id)
|
383
|
+
rules = ng_maps.map { |ng_map|
|
384
|
+
ng_map[:rules].map { |rule| rule[:permission] }
|
385
|
+
}.flatten
|
386
|
+
|
387
|
+
# security group
|
388
|
+
build_rule(rules).each do |rule|
|
389
|
+
case rule[:ip_protocol]
|
390
|
+
when 'tcp', 'udp'
|
391
|
+
if rule[:ip_fport] == rule[:ip_tport]
|
392
|
+
openflow_controller.install_static_transport 6, port, port.mac, port.ip, rule[:ip_fport], rule[:ip_source]
|
393
|
+
elsif rule[:ip_fport] <= 1 and rule[:ip_tport] >= 65535
|
394
|
+
openflow_controller.install_static_transport 6, port, port.mac, port.ip, 0, rule[:ip_source]
|
395
|
+
else
|
396
|
+
logger.info "add_security_group: No support for port ranges yet: ip_source:#{rule[:ip_source]} ports:#{rule[:ip_fport]}-#{rule[:ip_tport]}"
|
397
|
+
end
|
398
|
+
when 'icmp'
|
399
|
+
# icmp
|
400
|
+
# This extension can be used if `--protocol icmp' is specified. It provides the following option:
|
401
|
+
# [!] --icmp-type {type[/code]|typename}
|
402
|
+
# This allows specification of the ICMP type, which can be a numeric ICMP type, type/code pair, or one of the ICMP type names shown by the command
|
403
|
+
# iptables -p icmp -h
|
404
|
+
openflow_controller.install_static_icmp rule[:icmp_type], rule[:icmp_code], port, port.mac, port.ip, rule[:ip_source]
|
405
|
+
end
|
406
|
+
end
|
407
|
+
end
|
408
|
+
|
409
|
+
def build_rule(rules = [])
|
410
|
+
rule_maps = []
|
411
|
+
|
412
|
+
rules.each do |rule|
|
413
|
+
rule = rule.strip.gsub(/[\s\t]+/, '')
|
414
|
+
from_group = false
|
415
|
+
ipv4s = []
|
416
|
+
|
417
|
+
# ex.
|
418
|
+
# "tcp:22,22,ip4:0.0.0.0"
|
419
|
+
# "udp:53,53,ip4:0.0.0.0"
|
420
|
+
# "icmp:-1,-1,ip4:0.0.0.0"
|
421
|
+
|
422
|
+
# 1st phase
|
423
|
+
# ip_tport : tcp,udp? 1 - 16bit, icmp: -1
|
424
|
+
# id_port has been separeted in first phase.
|
425
|
+
from_pair, ip_tport, source_pair = rule.split(',')
|
426
|
+
|
427
|
+
next if from_pair.nil?
|
428
|
+
next if ip_tport.nil?
|
429
|
+
next if source_pair.nil?
|
430
|
+
|
431
|
+
# 2nd phase
|
432
|
+
# ip_protocol : [ tcp | udp | icmp ]
|
433
|
+
# ip_fport : tcp,udp? 1 - 16bit, icmp: -1
|
434
|
+
ip_protocol, ip_fport = from_pair.split(':')
|
435
|
+
|
436
|
+
# protocol : [ ip4 | ip6 | #{account_id} ]
|
437
|
+
# ip_source : ip4? xxx.xxx.xxx.xxx./[0-32], ip6? (not yet supprted), #{netfilter_group_id}
|
438
|
+
protocol, ip_source = source_pair.split(':')
|
439
|
+
|
440
|
+
begin
|
441
|
+
s = StringScanner.new(protocol)
|
442
|
+
until s.eos?
|
443
|
+
case
|
444
|
+
when s.scan(/ip6/)
|
445
|
+
# TODO#FUTURE: support IPv6 address format
|
446
|
+
next
|
447
|
+
when s.scan(/ip4/)
|
448
|
+
# IPAddress doesn't support prefix '0'.
|
449
|
+
ip_addr, prefix = ip_source.split('/', 2)
|
450
|
+
if prefix.to_i == 0
|
451
|
+
ip_source = ip_addr
|
452
|
+
end
|
453
|
+
when s.scan(/a-\w{8}/)
|
454
|
+
from_group = true
|
455
|
+
inst_maps = rpc.request('hva-collector', 'get_instances_of_account_netfilter_group', protocol, ip_source)
|
456
|
+
inst_maps.each { |inst_map|
|
457
|
+
ipv4s << inst_map[:ips]
|
458
|
+
}
|
459
|
+
else
|
460
|
+
raise "unexpected protocol '#{s.peep(20)}'"
|
461
|
+
end
|
462
|
+
end
|
463
|
+
rescue Exception => e
|
464
|
+
p e
|
465
|
+
next
|
466
|
+
end
|
467
|
+
|
468
|
+
begin
|
469
|
+
if from_group == false
|
470
|
+
#p "from_group:(#{from_group}) ip_source -> #{ip_source}"
|
471
|
+
ip = IPAddress(ip_source)
|
472
|
+
ip_source = case ip.u32
|
473
|
+
when 0
|
474
|
+
"#{ip.address}/0"
|
475
|
+
else
|
476
|
+
"#{ip.address}/#{ip.prefix}"
|
477
|
+
end
|
478
|
+
else
|
479
|
+
ipv4s = ipv4s.flatten.uniq
|
480
|
+
end
|
481
|
+
rescue Exception => e
|
482
|
+
p e
|
483
|
+
next
|
484
|
+
end
|
485
|
+
|
486
|
+
case ip_protocol
|
487
|
+
when 'tcp', 'udp'
|
488
|
+
ip_fport = ip_fport.to_i
|
489
|
+
ip_tport = ip_tport.to_i
|
490
|
+
|
491
|
+
# validate port range
|
492
|
+
[ ip_fport, ip_tport ].each do |port|
|
493
|
+
next unless port >= 1 && port <= 65535
|
494
|
+
end
|
495
|
+
|
496
|
+
if ip_fport <= ip_tport
|
497
|
+
if from_group == false
|
498
|
+
rule_maps << {
|
499
|
+
:ip_protocol => ip_protocol,
|
500
|
+
:ip_fport => ip_fport,
|
501
|
+
:ip_tport => ip_tport,
|
502
|
+
:protocol => protocol,
|
503
|
+
:ip_source => ip_source,
|
504
|
+
}
|
505
|
+
else
|
506
|
+
ipv4s.each { |ip|
|
507
|
+
rule_maps << {
|
508
|
+
:ip_protocol => ip_protocol,
|
509
|
+
:ip_fport => ip_fport,
|
510
|
+
:ip_tport => ip_tport,
|
511
|
+
:protocol => 'ip4',
|
512
|
+
:ip_source => ip,
|
513
|
+
}
|
514
|
+
}
|
515
|
+
end
|
516
|
+
end
|
517
|
+
when 'icmp'
|
518
|
+
# via http://docs.amazonwebservices.com/AWSEC2/latest/CommandLineReference/
|
519
|
+
#
|
520
|
+
# For the ICMP protocol, the ICMP type and code must be specified.
|
521
|
+
# This must be specified in the format type:code where both are integers.
|
522
|
+
# Type, code, or both can be specified as -1, which is a wildcard.
|
523
|
+
|
524
|
+
icmp_type = ip_fport.to_i
|
525
|
+
icmp_code = ip_tport.to_i
|
526
|
+
|
527
|
+
# icmp_type
|
528
|
+
case icmp_type
|
529
|
+
when -1
|
530
|
+
when 0, 3, 5, 8, 11, 12, 13, 14, 15, 16, 17, 18
|
531
|
+
else
|
532
|
+
next
|
533
|
+
end
|
534
|
+
|
535
|
+
# icmp_code
|
536
|
+
case icmp_code
|
537
|
+
when -1
|
538
|
+
when 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
|
539
|
+
# when icmp_type equals -1 icmp_code must equal -1.
|
540
|
+
next if icmp_type == -1
|
541
|
+
else
|
542
|
+
next
|
543
|
+
end
|
544
|
+
|
545
|
+
if from_group == false
|
546
|
+
rule_maps << {
|
547
|
+
:ip_protocol => ip_protocol,
|
548
|
+
:icmp_type => ip_tport.to_i, # ip_tport.to_i, # -1 or 0, 3, 5, 8, 11, 12, 13, 14, 15, 16, 17, 18
|
549
|
+
:icmp_code => ip_fport.to_i, # ip_fport.to_i, # -1 or 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
|
550
|
+
:protocol => protocol,
|
551
|
+
:ip_source => ip_source,
|
552
|
+
}
|
553
|
+
else
|
554
|
+
ipv4s.each { |ip|
|
555
|
+
rule_maps << {
|
556
|
+
:ip_protocol => ip_protocol,
|
557
|
+
:icmp_type => ip_tport.to_i, # ip_tport.to_i, # -1 or 0, 3, 5, 8, 11, 12, 13, 14, 15, 16, 17, 18
|
558
|
+
:icmp_code => ip_fport.to_i, # ip_fport.to_i, # -1 or 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
|
559
|
+
:protocol => 'ip4',
|
560
|
+
:ip_source => ip,
|
561
|
+
}
|
562
|
+
}
|
563
|
+
end
|
564
|
+
end
|
565
|
+
end
|
566
|
+
|
567
|
+
rule_maps
|
568
|
+
end
|
569
|
+
|
570
|
+
def openflow_controller
|
571
|
+
@openflow_controller ||= OpenFlowController.new(self)
|
572
|
+
end
|
573
|
+
|
574
|
+
def rpc
|
575
|
+
@rpc ||= Isono::NodeModules::RpcChannel.new(@node)
|
576
|
+
end
|
577
|
+
|
578
|
+
def worker_thread
|
579
|
+
@worker_thread ||= Isono::ThreadPool.new(1, 'Openflow')
|
580
|
+
end
|
581
|
+
|
582
|
+
def get_port_from_instance_id inst_id
|
583
|
+
raise ArgumentError, "Unknown Instance ID: #{inst_id}" if inst_id.nil?
|
584
|
+
inst_map = rpc.request('hva-collector', 'get_instance', inst_id)
|
585
|
+
raise ArgumentError, "Unknown Instance ID: #{inst_id}" if inst_map.nil?
|
586
|
+
|
587
|
+
port = openflow_controller.ports.detect { |f| f[1].port_info.name == inst_map[:instance_nics].first[:uuid] }
|
588
|
+
end
|
589
|
+
end
|
590
|
+
|
591
|
+
|
592
|
+
#
|
593
|
+
# Event handler glue for Trema.
|
594
|
+
#
|
595
|
+
|
596
|
+
module EventFdConnection
|
597
|
+
def initialize connection
|
598
|
+
@connection = connection
|
599
|
+
end
|
600
|
+
|
601
|
+
def notify_readable
|
602
|
+
@connection.notify_readable
|
603
|
+
end
|
604
|
+
|
605
|
+
def notify_writable
|
606
|
+
@connection.notify_writable
|
607
|
+
end
|
608
|
+
|
609
|
+
def unbind
|
610
|
+
# EM.next_tick do
|
611
|
+
# # socket is detached from the eventloop, but still open
|
612
|
+
# data = @io.read
|
613
|
+
# end
|
614
|
+
end
|
615
|
+
end
|
616
|
+
|
617
|
+
|
618
|
+
class CustomEventHandler < Trema::EventHandler
|
619
|
+
include Dcmgr::Logger
|
620
|
+
|
621
|
+
@@fd_set = []
|
622
|
+
|
623
|
+
def init_event_handler
|
624
|
+
logger.debug "Called CustomEventHandler init_event_handler."
|
625
|
+
end
|
626
|
+
|
627
|
+
def finalize_event_handler
|
628
|
+
logger.debug "Called CustomEventHandler finalize_event_handler."
|
629
|
+
end
|
630
|
+
|
631
|
+
def stop_event_handler
|
632
|
+
logger.debug "Called CustomEventHandler stop_event_handler."
|
633
|
+
EM.stop
|
634
|
+
end
|
635
|
+
|
636
|
+
def run_event_handler_once
|
637
|
+
logger.debug "Called CustomEventHandler run_event_handler_once."
|
638
|
+
end
|
639
|
+
|
640
|
+
def set_fd_handler fd, connection
|
641
|
+
logger.debug "Called CustomEventHandler immediate set_fd_handler, #{fd}."
|
642
|
+
raise "CustomEventHandler event handler already registered." if not @@fd_set[ fd ].nil?
|
643
|
+
|
644
|
+
@@fd_set[ fd ] = EM.watch fd, EventFdConnection, connection
|
645
|
+
end
|
646
|
+
|
647
|
+
def delete_fd_handler fd
|
648
|
+
logger.debug "Called CustomEventHandler delete_fd_handler, #{fd}."
|
649
|
+
connection = @@fd_set[ fd ]
|
650
|
+
@@fd_set[ fd ] = nil
|
651
|
+
|
652
|
+
connection.detach
|
653
|
+
end
|
654
|
+
|
655
|
+
def set_readable fd, state
|
656
|
+
# logger.debug "Called CustomEventHandler set_readable, #{fd}."
|
657
|
+
|
658
|
+
if fd < 0
|
659
|
+
logger.error "Called set_readable with negative fd: #{fd}, #{state}."
|
660
|
+
return
|
661
|
+
end
|
662
|
+
|
663
|
+
@@fd_set[ fd ].notify_readable = state
|
664
|
+
end
|
665
|
+
|
666
|
+
def set_writable fd, state
|
667
|
+
# logger.debug "Called CustomEventHandler set_writable, #{fd}."
|
668
|
+
|
669
|
+
if fd < 0
|
670
|
+
logger.error "Called set_writable with negative fd: #{fd}, #{state}."
|
671
|
+
return
|
672
|
+
end
|
673
|
+
|
674
|
+
@@fd_set[ fd ].notify_writable = state
|
675
|
+
end
|
676
|
+
|
677
|
+
def readable fd
|
678
|
+
logger.debug "Called CustomEventHandler readable, #{fd}."
|
679
|
+
end
|
680
|
+
|
681
|
+
def writable fd
|
682
|
+
logger.debug "Called CustomEventHandler writable, #{fd}."
|
683
|
+
end
|
684
|
+
|
685
|
+
# Timer event handlers:
|
686
|
+
def init_timer
|
687
|
+
logger.debug "Called CustomEventHandler init_timer."
|
688
|
+
end
|
689
|
+
|
690
|
+
def finalize_timer
|
691
|
+
logger.debug "Called CustomEventHandler finalize_timer."
|
692
|
+
end
|
693
|
+
|
694
|
+
def add_timer_event_callback timer
|
695
|
+
logger.debug "Called CustomEventHandler: first:#{timer.inspect} interval:#{timer.interval} expiration:#{timer.expiration}."
|
696
|
+
|
697
|
+
timer.handle.cancel if not timer.handle.nil?
|
698
|
+
|
699
|
+
# timer.handle = EventMachine::Timer.new(timer.interval) do
|
700
|
+
timer.handle = EventMachine::Timer.new(timer.expiration) do
|
701
|
+
logger.debug "Calling timer event: first:#{timer.inspect} interval:#{timer.interval} expiration:#{timer.expiration}."
|
702
|
+
timer.call
|
703
|
+
end
|
704
|
+
end
|
705
|
+
|
706
|
+
def add_periodic_event_callback interval, timer
|
707
|
+
logger.debug "Called CustomEventHandler: timer:#{timer.inspect} interval:#{interval}."
|
708
|
+
|
709
|
+
timer.handle.cancel if not timer.handle.nil?
|
710
|
+
|
711
|
+
timer.handle = EventMachine::PeriodicTimer.new(interval) do
|
712
|
+
# logger.debug "Calling periodic timer event: timer:#{timer.inspect} interval:#{interval}."
|
713
|
+
timer.call
|
714
|
+
end
|
715
|
+
end
|
716
|
+
|
717
|
+
def delete_timer_event timer
|
718
|
+
logger.debug "Called CustomEventHandler: timer:#{timer.inspect}."
|
719
|
+
|
720
|
+
timer.handle.cancel if not timer.handle.nil?
|
721
|
+
timer.handle = nil
|
722
|
+
end
|
723
|
+
|
724
|
+
def execute_timer_events
|
725
|
+
logger.debug "Called CustomEventHandler execute_timer_events."
|
726
|
+
end
|
727
|
+
|
728
|
+
end
|
729
|
+
|
730
|
+
end
|
731
|
+
end
|