wakame-vdc-dcmgr 11.06.0 → 11.12.0
Sign up to get free protection for your applications and to get access to all the features.
- data/Rakefile +19 -31
- data/bin/collector +6 -1
- data/config/db/migrations/0001_v1110_origin.rb +446 -0
- data/config/dcmgr.conf.example +51 -0
- data/lib/dcmgr.rb +99 -22
- data/lib/dcmgr/cli/base.rb +34 -1
- data/lib/dcmgr/cli/host.rb +24 -20
- data/lib/dcmgr/cli/image.rb +38 -19
- data/lib/dcmgr/cli/keypair.rb +16 -12
- data/lib/dcmgr/cli/network.rb +189 -81
- data/lib/dcmgr/cli/quota.rb +2 -2
- data/lib/dcmgr/cli/security_group.rb +106 -0
- data/lib/dcmgr/cli/spec.rb +144 -39
- data/lib/dcmgr/cli/storage.rb +16 -15
- data/lib/dcmgr/cli/tag.rb +20 -14
- data/lib/dcmgr/cli/vlan.rb +5 -5
- data/lib/dcmgr/drivers/backing_store.rb +32 -0
- data/lib/dcmgr/drivers/comstar.rb +81 -0
- data/lib/dcmgr/drivers/iijgio_storage.rb +9 -19
- data/lib/dcmgr/drivers/iscsi_target.rb +41 -0
- data/lib/dcmgr/drivers/kvm.rb +161 -28
- data/lib/dcmgr/drivers/linux_iscsi.rb +60 -0
- data/lib/dcmgr/drivers/local_storage.rb +24 -0
- data/lib/dcmgr/drivers/lxc.rb +167 -125
- data/lib/dcmgr/drivers/raw.rb +74 -0
- data/lib/dcmgr/drivers/s3_storage.rb +7 -19
- data/lib/dcmgr/drivers/snapshot_storage.rb +18 -28
- data/lib/dcmgr/drivers/storage_initiator.rb +28 -0
- data/lib/dcmgr/drivers/sun_iscsi.rb +32 -0
- data/lib/dcmgr/drivers/zfs.rb +77 -0
- data/lib/dcmgr/endpoints/core_api.rb +315 -263
- data/lib/dcmgr/endpoints/errors.rb +21 -10
- data/lib/dcmgr/endpoints/metadata.rb +360 -23
- data/lib/dcmgr/helpers/cli_helper.rb +6 -3
- data/lib/dcmgr/helpers/ec2_metadata_helper.rb +9 -0
- data/lib/dcmgr/helpers/nic_helper.rb +11 -0
- data/lib/dcmgr/helpers/snapshot_storage_helper.rb +34 -0
- data/lib/dcmgr/models/account.rb +0 -6
- data/lib/dcmgr/models/account_resource.rb +0 -4
- data/lib/dcmgr/models/base_new.rb +14 -2
- data/lib/dcmgr/models/dhcp_range.rb +38 -0
- data/lib/dcmgr/models/frontend_system.rb +0 -6
- data/lib/dcmgr/models/history.rb +0 -11
- data/lib/dcmgr/models/host_node.rb +131 -0
- data/lib/dcmgr/models/hostname_lease.rb +0 -8
- data/lib/dcmgr/models/image.rb +31 -18
- data/lib/dcmgr/models/instance.rb +137 -143
- data/lib/dcmgr/models/instance_nic.rb +52 -29
- data/lib/dcmgr/models/instance_security_group.rb +9 -0
- data/lib/dcmgr/models/instance_spec.rb +163 -31
- data/lib/dcmgr/models/ip_lease.rb +10 -21
- data/lib/dcmgr/models/mac_lease.rb +30 -11
- data/lib/dcmgr/models/network.rb +148 -27
- data/lib/dcmgr/models/physical_network.rb +18 -0
- data/lib/dcmgr/models/quota.rb +0 -10
- data/lib/dcmgr/models/request_log.rb +3 -18
- data/lib/dcmgr/models/security_group.rb +66 -0
- data/lib/dcmgr/models/security_group_rule.rb +145 -0
- data/lib/dcmgr/models/ssh_key_pair.rb +16 -19
- data/lib/dcmgr/models/{storage_pool.rb → storage_node.rb} +35 -25
- data/lib/dcmgr/models/tag.rb +0 -14
- data/lib/dcmgr/models/tag_mapping.rb +1 -7
- data/lib/dcmgr/models/vlan_lease.rb +2 -8
- data/lib/dcmgr/models/volume.rb +49 -37
- data/lib/dcmgr/models/volume_snapshot.rb +15 -17
- data/lib/dcmgr/node_modules/hva_collector.rb +69 -28
- data/lib/dcmgr/node_modules/instance_ha.rb +23 -12
- data/lib/dcmgr/node_modules/instance_monitor.rb +16 -2
- data/lib/dcmgr/node_modules/openflow_controller.rb +784 -0
- data/lib/dcmgr/node_modules/scheduler.rb +189 -0
- data/lib/dcmgr/node_modules/service_netfilter.rb +452 -227
- data/lib/dcmgr/node_modules/service_openflow.rb +731 -0
- data/lib/dcmgr/node_modules/sta_collector.rb +20 -0
- data/lib/dcmgr/node_modules/sta_tgt_initializer.rb +35 -0
- data/lib/dcmgr/rack/request_logger.rb +11 -6
- data/lib/dcmgr/rpc/hva_handler.rb +256 -110
- data/lib/dcmgr/rpc/sta_handler.rb +244 -0
- data/lib/dcmgr/scheduler.rb +122 -8
- data/lib/dcmgr/scheduler/host_node/exclude_same.rb +24 -0
- data/lib/dcmgr/scheduler/host_node/find_first.rb +12 -0
- data/lib/dcmgr/scheduler/host_node/least_usage.rb +28 -0
- data/lib/dcmgr/scheduler/host_node/per_instance.rb +18 -0
- data/lib/dcmgr/scheduler/host_node/specify_node.rb +26 -0
- data/lib/dcmgr/scheduler/network/flat_single.rb +23 -0
- data/lib/dcmgr/scheduler/network/nat_one_to_one.rb +23 -0
- data/lib/dcmgr/scheduler/network/per_instance.rb +39 -0
- data/lib/dcmgr/scheduler/network/vif_template.rb +19 -0
- data/lib/dcmgr/scheduler/storage_node/find_first.rb +13 -0
- data/lib/dcmgr/scheduler/storage_node/least_usage.rb +23 -0
- data/lib/dcmgr/storage_service.rb +39 -40
- data/lib/dcmgr/tags.rb +3 -3
- data/lib/dcmgr/version.rb +1 -1
- data/lib/dcmgr/vnet.rb +105 -0
- data/lib/dcmgr/vnet/factories.rb +141 -0
- data/lib/dcmgr/vnet/isolators/by_securitygroup.rb +21 -0
- data/lib/dcmgr/vnet/isolators/dummy.rb +17 -0
- data/lib/dcmgr/vnet/netfilter/cache.rb +51 -0
- data/lib/dcmgr/vnet/netfilter/chain.rb +66 -0
- data/lib/dcmgr/vnet/netfilter/controller.rb +193 -0
- data/lib/dcmgr/vnet/netfilter/ebtables_rule.rb +53 -0
- data/lib/dcmgr/vnet/netfilter/iptables_rule.rb +45 -0
- data/lib/dcmgr/vnet/netfilter/task_manager.rb +459 -0
- data/lib/dcmgr/vnet/tasks/accept_all_dns.rb +19 -0
- data/lib/dcmgr/vnet/tasks/accept_arp_broadcast.rb +24 -0
- data/lib/dcmgr/vnet/tasks/accept_arp_from_friends.rb +34 -0
- data/lib/dcmgr/vnet/tasks/accept_arp_from_gateway.rb +21 -0
- data/lib/dcmgr/vnet/tasks/accept_arp_to_host.rb +30 -0
- data/lib/dcmgr/vnet/tasks/accept_ip_from_friends.rb +26 -0
- data/lib/dcmgr/vnet/tasks/accept_ip_from_gateway.rb +23 -0
- data/lib/dcmgr/vnet/tasks/accept_ip_to_anywhere.rb +18 -0
- data/lib/dcmgr/vnet/tasks/accept_related_established.rb +45 -0
- data/lib/dcmgr/vnet/tasks/accept_wakame_dhcp_only.rb +33 -0
- data/lib/dcmgr/vnet/tasks/accept_wakame_dns_only.rb +33 -0
- data/lib/dcmgr/vnet/tasks/debug_iptables.rb +21 -0
- data/lib/dcmgr/vnet/tasks/drop_arp_forwarding.rb +27 -0
- data/lib/dcmgr/vnet/tasks/drop_arp_to_host.rb +24 -0
- data/lib/dcmgr/vnet/tasks/drop_ip_from_anywhere.rb +18 -0
- data/lib/dcmgr/vnet/tasks/drop_ip_spoofing.rb +34 -0
- data/lib/dcmgr/vnet/tasks/drop_mac_spoofing.rb +33 -0
- data/lib/dcmgr/vnet/tasks/exclude_from_nat.rb +47 -0
- data/lib/dcmgr/vnet/tasks/security_group.rb +37 -0
- data/lib/dcmgr/vnet/tasks/static_nat.rb +54 -0
- data/lib/dcmgr/vnet/tasks/translate_metadata_address.rb +32 -0
- data/web/metadata/config.ru +1 -1
- metadata +174 -89
- data/lib/dcmgr/cli/group.rb +0 -101
- data/lib/dcmgr/endpoints/core_api_mock.rb +0 -865
- data/lib/dcmgr/models/host_pool.rb +0 -122
- data/lib/dcmgr/models/instance_netfilter_group.rb +0 -16
- data/lib/dcmgr/models/netfilter_group.rb +0 -89
- data/lib/dcmgr/models/netfilter_rule.rb +0 -21
- data/lib/dcmgr/scheduler/find_last.rb +0 -16
- data/lib/dcmgr/scheduler/find_random.rb +0 -16
- data/lib/dcmgr/stm/instance.rb +0 -25
- data/lib/dcmgr/stm/snapshot_context.rb +0 -33
- data/lib/dcmgr/stm/volume_context.rb +0 -65
@@ -0,0 +1,189 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
require 'isono'
|
3
|
+
|
4
|
+
module Dcmgr
|
5
|
+
module NodeModules
|
6
|
+
class Scheduler < Isono::NodeModules::Base
|
7
|
+
include Dcmgr::Logger
|
8
|
+
|
9
|
+
# module_module: bin/collector
|
10
|
+
initialize_hook do
|
11
|
+
app = Isono::Rack::ObjectMethod.new(myinstance)
|
12
|
+
job = Isono::NodeModules::JobChannel.new(node)
|
13
|
+
job.register_endpoint('scheduler', Isono::Rack.build do
|
14
|
+
use Isono::Rack::DataStore
|
15
|
+
run proc { |req, res|
|
16
|
+
Thread.current[Models::BaseNew::LOCK_TABLES_KEY] = {}
|
17
|
+
app.call(req, res)
|
18
|
+
}
|
19
|
+
end)
|
20
|
+
end
|
21
|
+
|
22
|
+
terminate_hook do
|
23
|
+
end
|
24
|
+
|
25
|
+
def schedule_instance(instance_id)
|
26
|
+
instance = Models::Instance[instance_id]
|
27
|
+
vol = Models::Volume.find(:instance_id=>instance.id, :boot_dev=>1)
|
28
|
+
|
29
|
+
Dcmgr::Scheduler.host_node.schedule(instance)
|
30
|
+
Dcmgr::Scheduler.network.schedule(instance)
|
31
|
+
lease_ip_address_to_instance(instance)
|
32
|
+
instance.save
|
33
|
+
|
34
|
+
instance.state = :pending
|
35
|
+
instance.save
|
36
|
+
|
37
|
+
|
38
|
+
case instance.image.boot_dev_type
|
39
|
+
when Models::Image::BOOT_DEV_SAN
|
40
|
+
Dcmgr::Scheduler.storage_node.schedule(vol)
|
41
|
+
vol.state = :pending
|
42
|
+
vol.save
|
43
|
+
|
44
|
+
commit_transaction
|
45
|
+
|
46
|
+
repository_address = Dcmgr::StorageService.repository_address(vol.snapshot.destination_key)
|
47
|
+
self.job.submit("sta-handle.#{vol.storage_node.node_id}",
|
48
|
+
'create_volume_and_run_instance', vol.canonical_uuid, instance.canonical_uuid, repository_address)
|
49
|
+
when Models::Image::BOOT_DEV_LOCAL
|
50
|
+
commit_transaction
|
51
|
+
self.job.submit("hva-handle.#{instance.host_node.node_id}",
|
52
|
+
'run_local_store', instance.canonical_uuid)
|
53
|
+
else
|
54
|
+
raise "Unknown boot type"
|
55
|
+
end
|
56
|
+
event.publish('instance.scheduled', :args=>[instance.canonical_uuid])
|
57
|
+
|
58
|
+
rescue ::Exception => e
|
59
|
+
rollback_transaction rescue nil
|
60
|
+
|
61
|
+
logger.error(e)
|
62
|
+
instance.destroy if instance
|
63
|
+
vol.destroy if vol
|
64
|
+
return
|
65
|
+
end
|
66
|
+
|
67
|
+
def schedule_instance_ha(instance_id, vol)
|
68
|
+
instance = Models::Instance[instance_id]
|
69
|
+
|
70
|
+
Dcmgr::Scheduler.host_node_ha.schedule(instance)
|
71
|
+
# Don't re-schedule network here.
|
72
|
+
# The IP address, MAC address and NIC must be assigned with same address.
|
73
|
+
instance.save
|
74
|
+
|
75
|
+
instance.state = :pending
|
76
|
+
instance.save
|
77
|
+
|
78
|
+
commit_transaction
|
79
|
+
case instance.image.boot_dev_type
|
80
|
+
when Models::Image::BOOT_DEV_SAN
|
81
|
+
self.job.submit("hva-handle.#{instance.host_node.node_id}", 'run_vol_store', instance.canonical_uuid, vol.canonical_uuid)
|
82
|
+
when Models::Image::BOOT_DEV_LOCAL
|
83
|
+
self.job.submit("hva-handle.#{instance.host_node.node_id}", 'run_local_store', instance.canonical_uuid)
|
84
|
+
else
|
85
|
+
raise "Unknown boot type"
|
86
|
+
end
|
87
|
+
event.publish('instance.scheduled', :args=>[instance.canonical_uuid])
|
88
|
+
|
89
|
+
rescue ::Exception => e
|
90
|
+
rollback_transaction rescue nil
|
91
|
+
|
92
|
+
logger.error(e)
|
93
|
+
instance.destroy if instance
|
94
|
+
vol.destroy if vol
|
95
|
+
return
|
96
|
+
end
|
97
|
+
|
98
|
+
def schedule_start_instance(instance_id)
|
99
|
+
instance = Models::Instance[instance_id]
|
100
|
+
vol = Models::Volume.find(:instance_id=>instance.id, :boot_dev=>1)
|
101
|
+
|
102
|
+
Dcmgr::Scheduler.host_node.schedule(instance)
|
103
|
+
lease_ip_address_to_instance(instance)
|
104
|
+
instance.save
|
105
|
+
|
106
|
+
instance.state = :pending
|
107
|
+
instance.save
|
108
|
+
|
109
|
+
commit_transaction
|
110
|
+
case instance.image.boot_dev_type
|
111
|
+
when Models::Image::BOOT_DEV_SAN
|
112
|
+
self.job.submit("hva-handle.#{instance.host_node.node_id}", 'run_vol_store', instance.canonical_uuid, vol.canonical_uuid)
|
113
|
+
when Models::Image::BOOT_DEV_LOCAL
|
114
|
+
self.job.submit("hva-handle.#{instance.host_node.node_id}", 'run_local_store', instance.canonical_uuid)
|
115
|
+
else
|
116
|
+
raise "Unknown boot type"
|
117
|
+
end
|
118
|
+
event.publish('instance.scheduled', :args=>[instance.canonical_uuid])
|
119
|
+
|
120
|
+
rescue ::Exception => e
|
121
|
+
rollback_transaction rescue nil
|
122
|
+
|
123
|
+
logger.error(e)
|
124
|
+
instance.destroy if instance
|
125
|
+
vol.destroy if vol
|
126
|
+
return
|
127
|
+
end
|
128
|
+
|
129
|
+
def schedule_volume(volume_id)
|
130
|
+
volume = Models::Volume[volume_id]
|
131
|
+
|
132
|
+
Dcmgr::Scheduler.storage_node.schedule(volume)
|
133
|
+
volume.save
|
134
|
+
|
135
|
+
volume.state = :pending
|
136
|
+
volume.save
|
137
|
+
|
138
|
+
commit_transaction
|
139
|
+
|
140
|
+
repository_address = nil
|
141
|
+
if volume.snapshot
|
142
|
+
repository_address = Dcmgr::StorageService.repository_address(volume.snapshot.destination_key)
|
143
|
+
end
|
144
|
+
|
145
|
+
self.job.submit("sta-handle.#{volume.storage_node.node_id}",
|
146
|
+
'create_volume', volume.canonical_uuid, repository_address)
|
147
|
+
rescue ::Exception => e
|
148
|
+
rollback_transaction rescue nil
|
149
|
+
|
150
|
+
logger.error(e)
|
151
|
+
volume.destroy
|
152
|
+
end
|
153
|
+
|
154
|
+
protected
|
155
|
+
# commit manually before return from the request block
|
156
|
+
def commit_transaction
|
157
|
+
db = Sequel::DATABASES.first
|
158
|
+
db << db.__send__(:commit_transaction_sql)
|
159
|
+
end
|
160
|
+
|
161
|
+
def rollback_transaction
|
162
|
+
db = Sequel::DATABASES.first
|
163
|
+
db << db.__send__(:rollback_transaction_sql)
|
164
|
+
end
|
165
|
+
|
166
|
+
def job()
|
167
|
+
Isono::NodeModules::JobChannel.new(self.node)
|
168
|
+
end
|
169
|
+
|
170
|
+
def event()
|
171
|
+
Isono::NodeModules::EventChannel.new(self.node)
|
172
|
+
end
|
173
|
+
|
174
|
+
def lease_ip_address_to_instance(instance)
|
175
|
+
instance.nic.each { |nic|
|
176
|
+
network = nic.network
|
177
|
+
if network && nic.direct_ip_lease.empty?
|
178
|
+
Models::IpLease.lease(nic, network)
|
179
|
+
end
|
180
|
+
nat_network = nic.nat_network
|
181
|
+
if nat_network && nic.nat_ip_lease.empty?
|
182
|
+
Models::IpLease.lease(nic, nat_network)
|
183
|
+
end
|
184
|
+
}
|
185
|
+
end
|
186
|
+
|
187
|
+
end
|
188
|
+
end
|
189
|
+
end
|
@@ -8,29 +8,34 @@ module Dcmgr
|
|
8
8
|
module Bandwidth
|
9
9
|
include Dcmgr::Helpers::NicHelper
|
10
10
|
include Dcmgr::Logger
|
11
|
-
|
11
|
+
|
12
12
|
def clear_bandwidth_limits
|
13
13
|
logger.debug "Removing all bandwidth limits"
|
14
14
|
"tc qdisc del dev #{find_nic(@node.manifest.config.hv_ifindex)} root"
|
15
15
|
end
|
16
|
-
|
16
|
+
|
17
|
+
|
18
|
+
# Enforces the bandwidth limits set for the networks.
|
19
|
+
# This uses the tc command to do so.
|
20
|
+
# _networks_ is an array containing the networks to set
|
21
|
+
# the bandwidth limits for.
|
17
22
|
def limit_bandwidth(networks)
|
18
23
|
bandwidth_cmd = []
|
19
24
|
#raise ArgumentError unless inst_maps.is_a?(Hash)
|
20
25
|
nic = find_nic(@node.manifest.config.hv_ifindex)
|
21
|
-
|
26
|
+
|
22
27
|
#Determine the physical nic's peed in Mbit/s
|
23
28
|
speed = %x{ethtool #{nic} | grep Speed | cut -d ' ' -f2}.chomp.to_i
|
24
|
-
|
29
|
+
|
25
30
|
#Set up root disc
|
26
31
|
bandwidth_cmd << "tc qdisc add dev #{nic} root handle 1: htb"
|
27
32
|
bandwidth_cmd << "tc class add dev #{nic} parent 1: classid 1:1 htb rate #{speed}mbit ceil #{speed}mbit"
|
28
|
-
|
33
|
+
|
29
34
|
networks.each { |nw|
|
30
35
|
next if nw[:bandwidth].nil?
|
31
|
-
|
36
|
+
|
32
37
|
logger.debug "Limiting bandwidth to #{nw[:bandwidth]}Mbit/s for #{nw[:uuid]}."
|
33
|
-
|
38
|
+
|
34
39
|
#Set up the bandwidth limit for this network
|
35
40
|
bandwidth_cmd << "tc class add dev #{nic} parent 1:1 classid 1:1#{nw[:bandwidth_mark]} htb rate #{nw[:bandwidth]}mbit ceil #{nw[:bandwidth]}mbit prio 1"
|
36
41
|
bandwidth_cmd << "tc qdisc add dev #{nic} parent 1:1#{nw[:bandwidth_mark]} handle 1#{nw[:bandwidth_mark]}: sfq perturb 10"
|
@@ -39,7 +44,7 @@ module Dcmgr
|
|
39
44
|
#Mark the packets passing through this network
|
40
45
|
["s","d"].each { |x| bandwidth_cmd << "iptables -A FORWARD -#{x} #{nw[:ipv4_gw]}/#{nw[:prefix]} -j MARK --set-mark 0x#{nw[:bandwidth_mark]}" }
|
41
46
|
}
|
42
|
-
|
47
|
+
|
43
48
|
bandwidth_cmd
|
44
49
|
end
|
45
50
|
end
|
@@ -48,6 +53,69 @@ module Dcmgr
|
|
48
53
|
include Dcmgr::Helpers::NicHelper
|
49
54
|
include Dcmgr::Logger
|
50
55
|
|
56
|
+
# Quick and dirty hack to unlink the nat chains before deleting them.
|
57
|
+
# It would be cleaner to recall the creation method with a :delete action
|
58
|
+
# but NAT rules are based on IP leases and those are deleted on instance termination
|
59
|
+
# Therefore we use grep to get the referring rules based on vnic uuid and then delete them.
|
60
|
+
# Run build_nat_chains(inst_map, :delete) afterwards to delete the chains themselves
|
61
|
+
def unlink_nat_chains(inst_map)
|
62
|
+
raise ArgumentError, "inst_map must be a Hash." unless inst_map.is_a?(Hash)
|
63
|
+
|
64
|
+
del_cmds = []
|
65
|
+
inst_map[:instance_nics].each { |nic|
|
66
|
+
post = %x{iptables -t nat -L POSTROUTING --line-numbers | grep s_#{nic[:uuid]} | tr -s ' ' | cut -d ' ' -f1}.chomp
|
67
|
+
pre = %x{iptables -t nat -L PREROUTING --line-numbers | grep d_#{nic[:uuid]} | tr -s ' ' | cut -d ' ' -f1}.chomp
|
68
|
+
|
69
|
+
del_cmds << "iptables -t nat -D POSTROUTING #{post}" unless post.empty?
|
70
|
+
del_cmds << "iptables -t nat -D PREROUTING #{pre}" unless pre.empty?
|
71
|
+
}
|
72
|
+
|
73
|
+
del_cmds
|
74
|
+
end
|
75
|
+
|
76
|
+
# Similar hack to unlink_nat_chains. Once an instance is terminated,
|
77
|
+
# we no longer know which IP it had so we grep the rules by mac address
|
78
|
+
# and delete them by rule numer
|
79
|
+
def stop_arp_reply(inst_map)
|
80
|
+
raise ArgumentError, "inst_map must be a Hash." unless inst_map.is_a?(Hash)
|
81
|
+
|
82
|
+
del_cmds = []
|
83
|
+
|
84
|
+
inst_map[:instance_nics].each { |nic|
|
85
|
+
mac = clean_mac(nic[:mac_addr])
|
86
|
+
rule_number = %x{ebtables -t nat -L --Ln --Lmac2 | grep #{mac} | cut -d '.' -f1}
|
87
|
+
del_cmds << "ebtables -t nat -D PREROUTING #{rule_number}" unless rule_number.empty?
|
88
|
+
}
|
89
|
+
|
90
|
+
del_cmds
|
91
|
+
end
|
92
|
+
|
93
|
+
# Builds or deletes the chains for each vnic in an instance.
|
94
|
+
# We use different chains for incoming and outgoing packets per vnic
|
95
|
+
# This way every packet only needs to be checked against chains that
|
96
|
+
# are specifically intended for it.
|
97
|
+
# _inst_map_ is a map of the instance to build or delte chains for.
|
98
|
+
# _action_ decides wether we will create or delete the rules. It can be
|
99
|
+
# either of the following:
|
100
|
+
# * :create is the default value and creates chains for _inst_map_
|
101
|
+
# * :delete deletes the chains for _inst_map_
|
102
|
+
def build_nat_chains(inst_map, action = :create)
|
103
|
+
actions = { :create => ['N'], :delete => ['F', 'X'] }
|
104
|
+
raise ArgumentError, "#{action} is not a valid action. Valid actions are #{actions.keys.join(',')}." unless actions.keys.member?(action)
|
105
|
+
raise ArgumentError, "inst_map must be a Hash." unless inst_map.is_a?(Hash)
|
106
|
+
|
107
|
+
chain_cmds = []
|
108
|
+
inst_map[:instance_nics].each { |nic|
|
109
|
+
['s','d'].each { |bound|
|
110
|
+
actions[action].each { |a|
|
111
|
+
chain_cmds << "iptables -t nat -#{a} #{bound}_#{nic[:uuid]}"
|
112
|
+
}
|
113
|
+
}
|
114
|
+
}
|
115
|
+
|
116
|
+
chain_cmds
|
117
|
+
end
|
118
|
+
|
51
119
|
# Takes an instance and nats it.
|
52
120
|
# If the instance is in a network that has a nat_network mapped to it,
|
53
121
|
# it will receive a second ip lease for that network. This lease will then be
|
@@ -55,93 +123,108 @@ module Dcmgr
|
|
55
123
|
# For example if 192.168.0.0/24 is natted to 172.16.0.0/16, then
|
56
124
|
# an instance with ip 192.168.0.10 might be natted to ip 172.16.46.23.
|
57
125
|
def nat_instance(inst_map)
|
126
|
+
raise ArgumentError, "inst_map must be a Hash." unless inst_map.is_a?(Hash)
|
127
|
+
|
58
128
|
nat_cmd = []
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
129
|
+
|
130
|
+
inst_map[:instance_nics].each { |nic|
|
131
|
+
# strict check
|
132
|
+
next unless valid_nic?(nic[:uuid])
|
133
|
+
|
63
134
|
nat_ips = rpc.request('hva-collector', 'get_nat_leases', nic[:uuid]).map {|ip| IPAddress(ip)}
|
64
|
-
|
135
|
+
|
65
136
|
#Get the internal ip for this nic
|
66
137
|
internal_ip = IPAddress rpc.request('hva-collector', 'get_iplease_for_nic', nic[:uuid])
|
67
138
|
inside_exception_ips = rpc.request('hva-collector','get_group_instance_ipv4s',inst_map[:uuid]).map {|ip| IPAddress(ip)}
|
68
139
|
outside_exception_ips = rpc.request('hva-collector','get_group_instance_ipv4s',inst_map[:uuid],:outside).map {|ip| IPAddress(ip)}
|
69
|
-
|
140
|
+
|
70
141
|
#output the commands to nat this nic and answer arp requests for its outside ip
|
71
|
-
friend_ipset =
|
142
|
+
friend_ipset = nic[:uuid] + "_friend_ips"
|
72
143
|
nat_ips.each { |external_ip|
|
73
144
|
if @node.manifest.config.use_ipset
|
74
|
-
|
145
|
+
|
75
146
|
nat_cmd << "ipset -N #{friend_ipset} iphash"
|
76
|
-
|
147
|
+
|
77
148
|
inside_exception_ips.each { |ex_ip|
|
78
149
|
nat_cmd << "ipset -A #{friend_ipset} #{ex_ip.address}"
|
79
150
|
}
|
80
|
-
|
81
|
-
# The good rules that use ipset
|
82
|
-
postrouting_command = "iptables -t nat -A
|
83
|
-
prerouting_command = "iptables -t nat -A
|
151
|
+
|
152
|
+
# The good rules that use ipset
|
153
|
+
postrouting_command = "iptables -t nat -A s_#{nic[:uuid]} -s #{internal_ip.address} -m set ! --match-set #{friend_ipset} dst"
|
154
|
+
prerouting_command = "iptables -t nat -A d_#{nic[:uuid]} -d #{external_ip.address} -m set ! --match-set #{friend_ipset} src"
|
84
155
|
else
|
85
156
|
# The ugly rules to use in case ipset is not installed
|
86
|
-
postrouting_command = "iptables -t nat -A
|
87
|
-
prerouting_command = "iptables -t nat -A
|
157
|
+
postrouting_command = "iptables -t nat -A s_#{nic[:uuid]} -s #{internal_ip.address}"
|
158
|
+
prerouting_command = "iptables -t nat -A d_#{nic[:uuid]} -d #{external_ip.address}"
|
88
159
|
end
|
89
|
-
|
160
|
+
|
161
|
+
# Set up the proper chain jumps
|
162
|
+
nat_cmd << "iptables -t nat -A PREROUTING -d #{external_ip.address} -j d_#{nic[:uuid]}"
|
163
|
+
nat_cmd << "iptables -t nat -A POSTROUTING -s #{internal_ip.address} -j s_#{nic[:uuid]}"
|
164
|
+
|
90
165
|
# Build the final nat rules and log any packets that traverse them
|
91
|
-
nat_cmd << postrouting_command + " -j LOG --log-prefix 'Snat '"
|
166
|
+
nat_cmd << postrouting_command + " -j LOG --log-prefix 'Snat '" if @node.manifest.config.packet_drop_log
|
92
167
|
nat_cmd << postrouting_command + " -j SNAT --to #{external_ip.address}"
|
93
|
-
|
94
|
-
nat_cmd << prerouting_command + " -j LOG --log-prefix 'Dnat '"
|
168
|
+
|
169
|
+
nat_cmd << prerouting_command + " -j LOG --log-prefix 'Dnat '" if @node.manifest.config.packet_drop_log
|
95
170
|
nat_cmd << prerouting_command + " -j DNAT --to #{internal_ip.address}"
|
96
|
-
|
171
|
+
|
97
172
|
logger.debug "Natting #{internal_ip.address} to #{external_ip.address}"
|
98
|
-
|
99
|
-
|
173
|
+
|
174
|
+
mac = clean_mac(nic[:mac_addr])
|
175
|
+
nat_cmd << arp_respond(external_ip,mac)
|
100
176
|
}
|
101
177
|
}
|
102
|
-
|
178
|
+
|
103
179
|
nat_cmd
|
104
180
|
end
|
105
|
-
|
181
|
+
|
182
|
+
# Returns the netfilter rules for destination IP addresses that
|
183
|
+
# will not use static nat. These are the IP addresses of other instances
|
184
|
+
# in the same security group.
|
185
|
+
# _inst_map_ is a map of the instance that the rules will be defined for.
|
106
186
|
def nat_exceptions(inst_map)
|
107
187
|
inside_exception_ips = rpc.request('hva-collector','get_group_instance_ipv4s',inst_map[:uuid]).map {|ip| IPAddress(ip)}
|
108
188
|
outside_exception_ips = rpc.request('hva-collector','get_group_instance_ipv4s',inst_map[:uuid],:outside).map {|ip| IPAddress(ip)}
|
109
|
-
|
189
|
+
|
110
190
|
cmds = []
|
111
191
|
inst_map[:instance_nics].each { |nic|
|
192
|
+
# strict check
|
193
|
+
next unless valid_nic?(nic[:uuid])
|
194
|
+
|
112
195
|
internal_ip = IPAddress(rpc.request('hva-collector', 'get_iplease_for_nic', nic[:uuid]))
|
113
196
|
inside_exception_ips.each { |ex_ip|
|
114
|
-
cmds << "iptables -t nat -A
|
197
|
+
cmds << "iptables -t nat -A s_#{nic[:uuid]} -s #{internal_ip.address} -d #{ex_ip.address}/#{ex_ip.prefix} -j ACCEPT"
|
115
198
|
}
|
116
199
|
outside_exception_ips.each { |ex_ip|
|
117
|
-
cmds << "iptables -t nat -A
|
200
|
+
cmds << "iptables -t nat -A d_#{nic[:uuid]} -s #{internal_ip.address} -d #{ex_ip.address}/#{ex_ip.prefix} -j ACCEPT"
|
118
201
|
}
|
119
202
|
}
|
120
|
-
|
203
|
+
|
121
204
|
cmds
|
122
205
|
end
|
123
|
-
|
206
|
+
|
124
207
|
# Returns ebtables command to respond to ARP requests for the address _ip_.
|
125
|
-
|
126
|
-
|
208
|
+
# _mac_addr_ is the mac address that we will reply with.
|
209
|
+
def arp_respond(ip,mac_addr)
|
210
|
+
ip = IPAddress(ip) if ip.is_a?(String)
|
127
211
|
raise "Invalid IP address: #{ip}" unless ip.is_a?(IPAddress)
|
128
|
-
|
212
|
+
|
129
213
|
#Get the mac address for our physical nic
|
130
|
-
nic = find_nic(@node.manifest.config.hv_ifindex)
|
214
|
+
#nic = find_nic(@node.manifest.config.hv_ifindex)
|
131
215
|
#TODO: Find a prettier way to get the mac address
|
132
|
-
mac_addr = %x{ifconfig | grep '#{nic}' | tr -s ' ' | cut -d ' ' -f5}.chomp
|
133
|
-
|
216
|
+
#mac_addr = %x{ifconfig | grep '#{nic}' | tr -s ' ' | cut -d ' ' -f5}.chomp
|
217
|
+
|
134
218
|
logger.debug "Replying ARP requests for address: #{ip.address}"
|
135
|
-
|
219
|
+
|
136
220
|
"ebtables -t nat -A PREROUTING -p arp --arp-ip-dst #{ip.address} --arp-opcode REQUEST -j arpreply --arpreply-mac #{mac_addr}"
|
137
221
|
end
|
138
|
-
|
222
|
+
|
139
223
|
def is_natted_ip?(ip)
|
140
224
|
ip = IPAddress(ip) if ip.is_a?(String)
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
rpc.request('hva-collector', 'is_natted_ip?', ip.address)
|
225
|
+
raise ArgumentError, "Invalid IP address: #{ip}" unless ip.is_a?(IPAddress)
|
226
|
+
|
227
|
+
rpc.request('hva-collector', 'is_natted_ip?', ip.address)
|
145
228
|
end
|
146
229
|
end
|
147
230
|
|
@@ -150,12 +233,20 @@ module Dcmgr
|
|
150
233
|
include Dcmgr::Helpers::NicHelper
|
151
234
|
include Nat
|
152
235
|
include Bandwidth
|
236
|
+
|
237
|
+
attr_accessor :controller
|
153
238
|
|
154
239
|
initialize_hook do
|
155
240
|
@worker_thread = Isono::ThreadPool.new(1, 'Netfilter')
|
156
241
|
|
157
242
|
@worker_thread.pass {
|
158
|
-
myinstance.
|
243
|
+
unless myinstance.node.manifest.config.edge_networking == 'legacy_netfilter'
|
244
|
+
sleep 1
|
245
|
+
# Initializing the controller will also apply netfilter rules for every alive instance
|
246
|
+
myinstance.controller = VNet::ControllerFactory.create_controller(myinstance.node)
|
247
|
+
else
|
248
|
+
myinstance.init_netfilter
|
249
|
+
end
|
159
250
|
}
|
160
251
|
|
161
252
|
event = Isono::NodeModules::EventChannel.new(node)
|
@@ -164,8 +255,12 @@ module Dcmgr
|
|
164
255
|
@worker_thread.pass {
|
165
256
|
logger.info("refresh on instance_started: #{args.inspect}")
|
166
257
|
inst_id = args[0]
|
167
|
-
|
168
|
-
|
258
|
+
unless myinstance.node.manifest.config.edge_networking == 'legacy_netfilter'
|
259
|
+
myinstance.controller.apply_instance(inst_id)
|
260
|
+
else
|
261
|
+
logger.info("add_netfilter_by_instance_id: #{inst_id}")
|
262
|
+
myinstance.add_netfilter_by_instance_id(inst_id)
|
263
|
+
end
|
169
264
|
}
|
170
265
|
end
|
171
266
|
|
@@ -173,16 +268,24 @@ module Dcmgr
|
|
173
268
|
@worker_thread.pass {
|
174
269
|
logger.info("refresh on instance_terminated: #{args.inspect}")
|
175
270
|
inst_id = args[0]
|
176
|
-
|
177
|
-
|
271
|
+
unless myinstance.node.manifest.config.edge_networking == 'legacy_netfilter'
|
272
|
+
myinstance.controller.remove_instance(inst_id)
|
273
|
+
else
|
274
|
+
logger.info("delete_netfilter_by_instance_id: #{inst_id}")
|
275
|
+
myinstance.delete_netfilter_by_instance_id(inst_id)
|
276
|
+
end
|
178
277
|
}
|
179
278
|
end
|
180
279
|
|
181
|
-
event.subscribe('hva/
|
280
|
+
event.subscribe('hva/security_group_updated', '#') do |args|
|
182
281
|
@worker_thread.pass {
|
183
282
|
logger.info("refresh on netfilter_updated: #{args.inspect}")
|
184
|
-
|
185
|
-
myinstance.
|
283
|
+
security_group_id = args[0]
|
284
|
+
unless myinstance.node.manifest.config.edge_networking == 'legacy_netfilter'
|
285
|
+
myinstance.controller.update_security_group(security_group_id)
|
286
|
+
else
|
287
|
+
myinstance.refresh_netfilter_by_joined_security_group_id(security_group_id)
|
288
|
+
end
|
186
289
|
}
|
187
290
|
end
|
188
291
|
end
|
@@ -196,7 +299,7 @@ module Dcmgr
|
|
196
299
|
viftable_map[ inst_map[:ips].first ] = inst_map[:instance_nics].first[:uuid]
|
197
300
|
|
198
301
|
# Does the hva have instance?
|
199
|
-
unless inst_map[:
|
302
|
+
unless inst_map[:host_node][:node_id] == node.node_id
|
200
303
|
logger.warn("no match for the instance: #{inst_map[:uuid]}")
|
201
304
|
next
|
202
305
|
end
|
@@ -217,15 +320,81 @@ module Dcmgr
|
|
217
320
|
end
|
218
321
|
end
|
219
322
|
|
323
|
+
# This method created all netfilter rules for one instance.
|
324
|
+
# It is called when starting a new instances
|
325
|
+
# _inst_id_ is the canonical uuid of the instance whose netfilter rule are to be created.
|
326
|
+
def add_netfilter_by_instance_id(inst_id)
|
327
|
+
raise ArgumentError, "Unknown Instance ID: #{inst_id}" if inst_id.nil?
|
328
|
+
inst_map = rpc.request('hva-collector', 'get_instance', inst_id)
|
329
|
+
raise ArgumentError, "Unknown Instance ID: #{inst_id}" if inst_map.nil?
|
330
|
+
|
331
|
+
cmds = []
|
332
|
+
vif_map = build_vif_map(inst_map)
|
333
|
+
viftable_map = {}
|
334
|
+
viftable_map[ inst_map[:ips].first ] = inst_map[:instance_nics].first[:uuid]
|
335
|
+
|
336
|
+
if @node.manifest.config.enable_ebtables
|
337
|
+
cmds << build_ebtables_chains(vif_map,:create)
|
338
|
+
cmds << build_ebtables_basic_part(vif_map, inst_map, :create)
|
339
|
+
cmds << build_ebtables_group_part(vif_map, inst_map, viftable_map, :create)
|
340
|
+
cmds << build_ebtables_final_part(vif_map, :create)
|
341
|
+
end
|
342
|
+
|
343
|
+
if @node.manifest.config.enable_iptables
|
344
|
+
cmds << build_iptables_chains(protocol_map(:iptables), vif_map, :create)
|
345
|
+
cmds << build_iptables_basic_part(vif_map, inst_map, :create)
|
346
|
+
cmds << build_iptables_group_part(vif_map, inst_map, :create)
|
347
|
+
cmds << build_iptables_final_part(vif_map, :create)
|
348
|
+
end
|
349
|
+
|
350
|
+
if @node.manifest.config.enable_ebtables && @node.manifest.config.enable_iptables
|
351
|
+
cmds << build_nat_chains(inst_map)
|
352
|
+
cmds << nat_exceptions(inst_map) unless @node.manifest.config.use_ipset
|
353
|
+
cmds << nat_instance(inst_map)
|
354
|
+
end
|
355
|
+
|
356
|
+
do_exec(cmds)
|
357
|
+
end
|
358
|
+
|
359
|
+
# This method deletes all netfilter rules for one instance.
|
360
|
+
# It is called when terminating an instance.
|
361
|
+
# _inst_id_ The canonical uuid of the instance whose netfilter rules are to be deleted.
|
362
|
+
def delete_netfilter_by_instance_id(inst_id)
|
363
|
+
raise ArgumentError, "Unknown Instance ID: #{inst_id}" if inst_id.nil?
|
364
|
+
inst_map = rpc.request('hva-collector', 'get_instance', inst_id)
|
365
|
+
raise ArgumentError, "Unknown Instance ID: #{inst_id}" if inst_map.nil?
|
366
|
+
|
367
|
+
#Create vnic map
|
368
|
+
vif_map = build_vif_map(inst_map)
|
369
|
+
|
370
|
+
#Delete ebtables chains
|
371
|
+
cmds = []
|
372
|
+
|
373
|
+
#Calling build_ebtables_basic_part with the :delete flag will delete all jumps to this instance's chains and then delete the chains themselves
|
374
|
+
cmds << build_ebtables_basic_part(vif_map, inst_map, :delete) if @node.manifest.config.enable_ebtables
|
375
|
+
|
376
|
+
#Delete nat chains
|
377
|
+
if @node.manifest.config.enable_ebtables && @node.manifest.config.enable_iptables
|
378
|
+
cmds << unlink_nat_chains(inst_map)
|
379
|
+
cmds << stop_arp_reply(inst_map)
|
380
|
+
cmds << build_nat_chains(inst_map, :delete)
|
381
|
+
end
|
382
|
+
|
383
|
+
#Delete iptables chains
|
384
|
+
cmds << build_iptables_basic_part(vif_map, inst_map, :delete) if @node.manifest.config.enable_iptables
|
385
|
+
|
386
|
+
do_exec(cmds)
|
387
|
+
end
|
388
|
+
|
220
389
|
# from event_subscriber
|
221
390
|
def refresh_netfilter_by_friend_instance_id(inst_id)
|
222
391
|
raise "UnknownInstanceID" if inst_id.nil?
|
223
392
|
|
224
393
|
begin
|
225
|
-
ng_maps = rpc.request('hva-collector', '
|
394
|
+
ng_maps = rpc.request('hva-collector', 'get_security_groups_of_instance', inst_id)
|
226
395
|
# get friend instance(s)
|
227
396
|
friend_inst_maps = ng_maps.map { |ng_map|
|
228
|
-
rpc.request('hva-collector', '
|
397
|
+
rpc.request('hva-collector', 'get_instances_of_security_group', ng_map[:id])
|
229
398
|
}.flatten.uniq
|
230
399
|
guest_inst_maps = rpc.request('hva-collector', 'get_alive_instances', node.node_id)
|
231
400
|
|
@@ -237,7 +406,7 @@ module Dcmgr
|
|
237
406
|
else
|
238
407
|
# group_instance: 1->0
|
239
408
|
inst_map = rpc.request('hva-collector', 'get_instance', inst_id)
|
240
|
-
init_netfilter if inst_map[:
|
409
|
+
init_netfilter if inst_map[:host_node][:node_id] == node.node_id
|
241
410
|
end
|
242
411
|
rescue Exception => e
|
243
412
|
p e
|
@@ -245,11 +414,11 @@ module Dcmgr
|
|
245
414
|
end
|
246
415
|
|
247
416
|
# from event_subscriber
|
248
|
-
def
|
249
|
-
raise "
|
417
|
+
def refresh_netfilter_by_joined_security_group_id(security_group_id)
|
418
|
+
raise "Unknown security group ID: #{security_group_id}" if security_group_id.nil?
|
250
419
|
|
251
420
|
begin
|
252
|
-
inst_maps = rpc.request('hva-collector', '
|
421
|
+
inst_maps = rpc.request('hva-collector', 'get_instances_of_security_group', security_group_id)
|
253
422
|
init_netfilter if inst_maps.size > 0
|
254
423
|
rescue Exception => e
|
255
424
|
p e
|
@@ -259,7 +428,7 @@ module Dcmgr
|
|
259
428
|
def build_vif_map(inst_map = {})
|
260
429
|
vif_map = {
|
261
430
|
:uuid => inst_map[:instance_nics].first[:uuid],
|
262
|
-
:mac => inst_map[:instance_nics].first[:mac_addr]
|
431
|
+
:mac => clean_mac(inst_map[:instance_nics].first[:mac_addr]),
|
263
432
|
:ipv4 => inst_map[:ips].first,
|
264
433
|
}
|
265
434
|
end
|
@@ -312,7 +481,7 @@ module Dcmgr
|
|
312
481
|
inst_maps.each { |inst_map|
|
313
482
|
vif_map = build_vif_map(inst_map)
|
314
483
|
|
315
|
-
basic_cmds << build_ebtables_basic_part(vif_map, inst_map)
|
484
|
+
basic_cmds << build_ebtables_basic_part(vif_map, inst_map)
|
316
485
|
group_cmds << build_ebtables_group_part(vif_map, inst_map, viftable_map)
|
317
486
|
final_cmds << build_ebtables_final_part(vif_map)
|
318
487
|
}
|
@@ -331,15 +500,17 @@ module Dcmgr
|
|
331
500
|
nat_cmds = []
|
332
501
|
final_cmds = []
|
333
502
|
|
503
|
+
#Drop all packets that aren't explicitely allowed
|
504
|
+
#init_cmds << "iptables -P FORWARD DROP"
|
505
|
+
init_cmds << "iptables -P FORWARD ACCEPT"
|
506
|
+
|
334
507
|
[ 'raw', 'nat', 'filter' ].each { |table|
|
335
508
|
[ 'F', 'Z', 'X' ].each { |xcmd|
|
336
509
|
init_cmds << "iptables -t #{table} -#{xcmd}"
|
337
510
|
}
|
338
511
|
}
|
339
|
-
|
340
|
-
|
341
|
-
use_ipset = true
|
342
|
-
if use_ipset
|
512
|
+
|
513
|
+
if @node.manifest.config.use_ipset
|
343
514
|
['F','X'].each { |xcmd|
|
344
515
|
init_cmds << "ipset -#{xcmd}"
|
345
516
|
}
|
@@ -365,25 +536,28 @@ module Dcmgr
|
|
365
536
|
end
|
366
537
|
|
367
538
|
def init_static_nat(inst_maps = [])
|
539
|
+
chain_cmds = []
|
540
|
+
#ref_cmds = []
|
368
541
|
accept_cmds = []
|
369
542
|
nat_cmds = []
|
370
|
-
|
543
|
+
|
371
544
|
inst_maps.each { |inst_map|
|
545
|
+
chain_cmds << build_nat_chains(inst_map)
|
546
|
+
#ref_cmds = ref_nat_chains(inst_map)
|
372
547
|
accept_cmds << nat_exceptions(inst_map) unless @node.manifest.config.use_ipset
|
373
548
|
nat_cmds << nat_instance(inst_map)
|
374
549
|
}
|
375
|
-
|
376
|
-
do_exec([accept_cmds,nat_cmds])
|
550
|
+
|
551
|
+
do_exec([chain_cmds,accept_cmds,nat_cmds])
|
377
552
|
end
|
378
553
|
|
379
554
|
def init_bandwidth_limit(network_maps)
|
380
555
|
do_exec([clear_bandwidth_limits,limit_bandwidth(network_maps)])
|
381
556
|
end
|
382
557
|
|
383
|
-
def
|
384
|
-
|
385
|
-
|
386
|
-
|
558
|
+
def build_ebtables_chains(vif_map,action = :create)
|
559
|
+
actions = { :create => 'N' , :delete => 'X' }
|
560
|
+
raise ArgumentError, "#{action} is not a valid action. Valid actions are #{actions.keys.join(',')}" unless actions.keys.member?(action)
|
387
561
|
################################
|
388
562
|
## 0. chain name
|
389
563
|
################################
|
@@ -404,75 +578,90 @@ module Dcmgr
|
|
404
578
|
chains << "d_#{vif_map[:uuid]}_s_hst_#{k}"
|
405
579
|
}
|
406
580
|
|
581
|
+
# create user defined chains.
|
582
|
+
cmds = chains.map { |chain|
|
583
|
+
"ebtables -#{actions[action]} #{chain}"
|
584
|
+
}
|
585
|
+
|
586
|
+
cmds
|
587
|
+
end
|
588
|
+
|
589
|
+
def build_ebtables_basic_part(vif_map, inst_map, action = :create)
|
590
|
+
basic_cmds = []
|
591
|
+
|
592
|
+
actions = { :create => 'A' , :delete => 'D' }
|
593
|
+
raise ArgumentError, "#{action} is not a valid action. Valid actions are #{actions.keys.join(',')}" unless actions.keys.member?(action)
|
594
|
+
|
595
|
+
hva_ipv4 = Isono::Util.default_gw_ipaddr
|
596
|
+
|
407
597
|
################################
|
408
598
|
## 1. basic part
|
409
599
|
################################
|
600
|
+
|
601
|
+
protocol_map = protocol_map(:ebtables)
|
410
602
|
|
411
|
-
|
412
|
-
[ 'N' ].each { |xcmd|
|
413
|
-
chains.each { |chain|
|
414
|
-
basic_cmds << "ebtables -#{xcmd} #{chain}"
|
415
|
-
}
|
416
|
-
}
|
603
|
+
basic_cmds << build_ebtables_chains(vif_map,action) if action == :create
|
417
604
|
|
418
605
|
# jumt to user defined chains
|
419
|
-
basic_cmds << "ebtables
|
420
|
-
basic_cmds << "ebtables
|
421
|
-
basic_cmds << "ebtables
|
422
|
-
basic_cmds << "ebtables
|
606
|
+
basic_cmds << "ebtables -#{actions[action]} FORWARD -i #{vif_map[:uuid]} -j s_#{vif_map[:uuid]}"
|
607
|
+
basic_cmds << "ebtables -#{actions[action]} FORWARD -o #{vif_map[:uuid]} -j d_#{vif_map[:uuid]}"
|
608
|
+
basic_cmds << "ebtables -#{actions[action]} INPUT -i #{vif_map[:uuid]} -j s_#{vif_map[:uuid]}_d_hst"
|
609
|
+
basic_cmds << "ebtables -#{actions[action]} OUTPUT -o #{vif_map[:uuid]} -j d_#{vif_map[:uuid]}_s_hst"
|
423
610
|
|
424
611
|
# IP protocol routing
|
425
612
|
protocol_map.each { |k,v|
|
426
|
-
basic_cmds << "ebtables
|
427
|
-
basic_cmds << "ebtables
|
428
|
-
basic_cmds << "ebtables
|
429
|
-
basic_cmds << "ebtables
|
613
|
+
basic_cmds << "ebtables -#{actions[action]} s_#{vif_map[:uuid]} -p #{v} -j s_#{vif_map[:uuid]}_#{k}"
|
614
|
+
basic_cmds << "ebtables -#{actions[action]} d_#{vif_map[:uuid]} -p #{v} -j d_#{vif_map[:uuid]}_#{k}"
|
615
|
+
basic_cmds << "ebtables -#{actions[action]} s_#{vif_map[:uuid]}_d_hst -p #{v} -j s_#{vif_map[:uuid]}_d_hst_#{k}"
|
616
|
+
basic_cmds << "ebtables -#{actions[action]} d_#{vif_map[:uuid]}_s_hst -p #{v} -j d_#{vif_map[:uuid]}_s_hst_#{k}"
|
430
617
|
}
|
431
618
|
|
432
619
|
if @node.manifest.config.packet_drop_log
|
433
|
-
basic_cmds << "ebtables
|
434
|
-
basic_cmds << "ebtables
|
620
|
+
basic_cmds << "ebtables -#{actions[action]} s_#{vif_map[:uuid]} --log-level 4 --log-ip --log-arp --log-prefix 'D s_#{vif_map[:uuid]}:' -j CONTINUE" if @node.manifest.config.packet_drop_log
|
621
|
+
basic_cmds << "ebtables -#{actions[action]} s_#{vif_map[:uuid]}_d_hst --log-level 4 --log-ip --log-arp --log-prefix 'D s_#{vif_map[:uuid]}_d_hst:' -j CONTINUE" if @node.manifest.config.packet_drop_log
|
435
622
|
end
|
436
|
-
basic_cmds << "ebtables
|
437
|
-
basic_cmds << "ebtables
|
623
|
+
basic_cmds << "ebtables -#{actions[action]} s_#{vif_map[:uuid]} -j DROP"
|
624
|
+
basic_cmds << "ebtables -#{actions[action]} s_#{vif_map[:uuid]}_d_hst -j DROP"
|
438
625
|
# anti spoof: mac # guest -> *
|
439
|
-
basic_cmds << "ebtables
|
440
|
-
basic_cmds << "ebtables
|
441
|
-
basic_cmds << "ebtables
|
442
|
-
basic_cmds << "ebtables
|
626
|
+
basic_cmds << "ebtables -#{actions[action]} s_#{vif_map[:uuid]}_arp --protocol arp --arp-mac-src ! #{vif_map[:mac]} --log-ip --log-arp --log-prefix 'Dmc s_#{vif_map[:uuid]}_arp:' -j CONTINUE" if @node.manifest.config.packet_drop_log
|
627
|
+
basic_cmds << "ebtables -#{actions[action]} s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-mac-src ! #{vif_map[:mac]} --log-ip --log-arp --log-prefix 'Dmc s_#{vif_map[:uuid]}_d_hst_arp:' -j CONTINUE" if @node.manifest.config.packet_drop_log
|
628
|
+
basic_cmds << "ebtables -#{actions[action]} s_#{vif_map[:uuid]}_arp --protocol arp --arp-mac-src ! #{vif_map[:mac]} -j DROP"
|
629
|
+
basic_cmds << "ebtables -#{actions[action]} s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-mac-src ! #{vif_map[:mac]} -j DROP"
|
443
630
|
# guest <- * (broadcast)
|
444
|
-
basic_cmds << "ebtables
|
445
|
-
basic_cmds << "ebtables
|
446
|
-
basic_cmds << "ebtables
|
447
|
-
basic_cmds << "ebtables
|
631
|
+
basic_cmds << "ebtables -#{actions[action]} d_#{vif_map[:uuid]}_arp --protocol arp --arp-mac-dst 00:00:00:00:00:00 --log-ip --log-arp --log-prefix 'Amc d_#{vif_map[:uuid]}_arp:' -j CONTINUE" if @node.manifest.config.packet_drop_log
|
632
|
+
basic_cmds << "ebtables -#{actions[action]} d_#{vif_map[:uuid]}_s_hst_arp --protocol arp --arp-ip-src=#{hva_ipv4} --arp-mac-dst 00:00:00:00:00:00 --log-ip --log-arp --log-prefix 'Amc d_#{vif_map[:uuid]}_hst_arp:' -j CONTINUE" if @node.manifest.config.packet_drop_log
|
633
|
+
basic_cmds << "ebtables -#{actions[action]} d_#{vif_map[:uuid]}_arp --protocol arp --arp-mac-dst 00:00:00:00:00:00 -j ACCEPT"
|
634
|
+
basic_cmds << "ebtables -#{actions[action]} d_#{vif_map[:uuid]}_s_hst_arp --protocol arp --arp-ip-src=#{hva_ipv4} --arp-mac-dst 00:00:00:00:00:00 -j ACCEPT"
|
448
635
|
|
449
636
|
# guest <- *
|
450
|
-
basic_cmds << "ebtables
|
451
|
-
basic_cmds << "ebtables
|
452
|
-
basic_cmds << "ebtables
|
453
|
-
basic_cmds << "ebtables
|
637
|
+
basic_cmds << "ebtables -#{actions[action]} d_#{vif_map[:uuid]}_arp --protocol arp --arp-mac-dst ! #{vif_map[:mac]} --log-ip --log-arp --log-prefix 'Dmc d_#{vif_map[:uuid]}_arp:' -j CONTINUE" if @node.manifest.config.packet_drop_log
|
638
|
+
basic_cmds << "ebtables -#{actions[action]} d_#{vif_map[:uuid]}_s_hst_arp --protocol arp --arp-mac-dst ! #{vif_map[:mac]} --log-ip --log-arp --log-prefix 'Dmc d_#{vif_map[:uuid]}_s_hst_arp:' -j CONTINUE" if @node.manifest.config.packet_drop_log
|
639
|
+
basic_cmds << "ebtables -#{actions[action]} d_#{vif_map[:uuid]}_arp --protocol arp --arp-mac-dst ! #{vif_map[:mac]} -j DROP"
|
640
|
+
basic_cmds << "ebtables -#{actions[action]} d_#{vif_map[:uuid]}_s_hst_arp --protocol arp --arp-mac-dst ! #{vif_map[:mac]} -j DROP"
|
454
641
|
|
455
642
|
# anti spoof: ipv4
|
456
643
|
inst_map[:ips].each { |ipv4|
|
457
|
-
#next if is_natted_ip? ipv4
|
458
644
|
# guest -> *
|
459
|
-
basic_cmds << "ebtables
|
460
|
-
basic_cmds << "ebtables
|
461
|
-
basic_cmds << "ebtables
|
462
|
-
basic_cmds << "ebtables
|
645
|
+
basic_cmds << "ebtables -#{actions[action]} s_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-src ! #{ipv4} --log-ip --log-arp --log-prefix 'Dip s_#{vif_map[:uuid]}_arp:' -j CONTINUE" if @node.manifest.config.packet_drop_log
|
646
|
+
basic_cmds << "ebtables -#{actions[action]} s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-ip-src ! #{ipv4} --log-ip --log-arp --log-prefix 'Dip s_#{vif_map[:uuid]}_d_hst_arp:' -j CONTINUE" if @node.manifest.config.packet_drop_log
|
647
|
+
basic_cmds << "ebtables -#{actions[action]} s_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-src ! #{ipv4} -j DROP"
|
648
|
+
basic_cmds << "ebtables -#{actions[action]} s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-ip-src ! #{ipv4} -j DROP"
|
463
649
|
# guest <- *
|
464
|
-
basic_cmds << "ebtables
|
465
|
-
basic_cmds << "ebtables
|
466
|
-
basic_cmds << "ebtables
|
467
|
-
basic_cmds << "ebtables
|
650
|
+
basic_cmds << "ebtables -#{actions[action]} d_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-dst ! #{ipv4} --log-ip --log-arp --log-prefix 'Dip d_#{vif_map[:uuid]}_arp:' -j CONTINUE" if @node.manifest.config.packet_drop_log
|
651
|
+
basic_cmds << "ebtables -#{actions[action]} d_#{vif_map[:uuid]}_s_hst_arp --protocol arp --arp-ip-dst ! #{ipv4} --log-ip --log-arp --log-prefix 'Dip d_#{vif_map[:uuid]}_s_hst_arp:' -j CONTINUE" if @node.manifest.config.packet_drop_log
|
652
|
+
basic_cmds << "ebtables -#{actions[action]} d_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-dst ! #{ipv4} -j DROP"
|
653
|
+
basic_cmds << "ebtables -#{actions[action]} d_#{vif_map[:uuid]}_s_hst_arp --protocol arp --arp-ip-dst ! #{ipv4} -j DROP"
|
468
654
|
}
|
469
655
|
|
656
|
+
basic_cmds << build_ebtables_chains(vif_map,action) if action == :delete
|
470
657
|
basic_cmds
|
471
658
|
end
|
472
659
|
|
473
|
-
def build_ebtables_group_part(vif_map, inst_map, viftable_map)
|
660
|
+
def build_ebtables_group_part(vif_map, inst_map, viftable_map, action = :create)
|
474
661
|
group_cmds = []
|
475
662
|
hva_ipv4 = Isono::Util.default_gw_ipaddr
|
663
|
+
actions = { :create => 'A' , :delete => 'D' }
|
664
|
+
raise ArgumentError, "#{action} is not a valid action. Valid actions are #{actions.keys.join(',')}" unless actions.keys.member?(action)
|
476
665
|
|
477
666
|
################################
|
478
667
|
## 2. group part
|
@@ -482,23 +671,26 @@ module Dcmgr
|
|
482
671
|
network_map = rpc.request('hva-collector', 'get_network', inst_map[:instance_nics].first[:network_id])
|
483
672
|
raise "UnknownNetworkId" if network_map.nil?
|
484
673
|
joined_network = IPAddress("#{network_map[:ipv4_gw]}/#{network_map[:prefix]}")
|
485
|
-
|
486
|
-
[ network_map[:dns_server], network_map[:dhcp_server] ].each { |ipv4|
|
487
|
-
next
|
674
|
+
|
675
|
+
[ network_map[:dns_server], network_map[:dhcp_server], network_map[:metadata_server] ].each { |ipv4|
|
676
|
+
next if ipv4.nil? or not joined_network.include? IPAddress(ipv4)
|
488
677
|
same_subnet_ipv4s << ipv4
|
489
|
-
}
|
678
|
+
}
|
490
679
|
|
491
680
|
# network resource node(s)
|
492
|
-
ng_maps = rpc.request('hva-collector', '
|
681
|
+
ng_maps = rpc.request('hva-collector', 'get_security_groups_of_instance', inst_map[:uuid])
|
493
682
|
rules = ng_maps.map { |ng_map|
|
494
683
|
ng_map[:rules].map { |rule| rule[:permission] }
|
495
684
|
}.flatten
|
496
685
|
build_rule(rules).each do |rule|
|
686
|
+
# <ArgumentError: Invalid IP "0.0.0.0">
|
687
|
+
next if rule[:ip_source] == "0.0.0.0/0"
|
688
|
+
|
497
689
|
begin
|
498
|
-
# <ArgumentError: Invalid IP "0.0.0.0">
|
499
690
|
next unless joined_network.include? IPAddress(rule[:ip_source])
|
500
691
|
same_subnet_ipv4s << rule[:ip_source]
|
501
692
|
rescue Exception => e
|
693
|
+
#raise unless e.is_a? ArgumentError
|
502
694
|
p e
|
503
695
|
end
|
504
696
|
end
|
@@ -516,72 +708,93 @@ module Dcmgr
|
|
516
708
|
# get_macaddr_by_ipv4, ipv4
|
517
709
|
if ipv4 == hva_ipv4
|
518
710
|
#p "#{vif_map[:uuid]}(#{vif_map[:ipv4]}) -> [host] ***-****** (#{ipv4})"
|
519
|
-
group_cmds << "ebtables
|
520
|
-
group_cmds << "ebtables
|
711
|
+
group_cmds << "ebtables -#{actions[action]} s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-ip-src #{vif_map[:ipv4]} --arp-ip-dst #{ipv4} --log-ip --log-arp --log-prefix 'Afw s_#{vif_map[:uuid]}_d_hst_arp:' -j CONTINUE" if @node.manifest.config.packet_drop_log
|
712
|
+
group_cmds << "ebtables -#{actions[action]} s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-ip-src #{vif_map[:ipv4]} --arp-ip-dst #{ipv4} -j ACCEPT"
|
521
713
|
elsif guest_ipv4s.include?(ipv4)
|
522
714
|
#p "#{vif_map[:uuid]}(#{vif_map[:ipv4]}) -> [guest] #{viftable_map[ipv4]}(#{ipv4})"
|
523
715
|
|
524
716
|
# guest->guest
|
525
|
-
group_cmds << "ebtables
|
526
|
-
group_cmds << "ebtables
|
717
|
+
group_cmds << "ebtables -#{actions[action]} d_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-src #{ipv4} --arp-ip-dst #{vif_map[:ipv4]} --log-ip --log-arp --log-prefix 'Afw d_#{vif_map[:uuid]}_arp:' -j CONTINUE" if @node.manifest.config.packet_drop_log
|
718
|
+
group_cmds << "ebtables -#{actions[action]} d_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-src #{ipv4} --arp-ip-dst #{vif_map[:ipv4]} -j ACCEPT"
|
527
719
|
# guest->host
|
528
|
-
group_cmds << "ebtables
|
529
|
-
group_cmds << "ebtables
|
530
|
-
|
720
|
+
group_cmds << "ebtables -#{actions[action]} s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-ip-src #{vif_map[:ipv4]} --arp-ip-dst #{ipv4} --log-ip --log-arp --log-prefix 'Afw s_#{vif_map[:uuid]}_d_hst_arp:' -j CONTINUE" if @node.manifest.config.packet_drop_log
|
721
|
+
group_cmds << "ebtables -#{actions[action]} s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-ip-src #{vif_map[:ipv4]} --arp-ip-dst #{ipv4} -j ACCEPT"
|
722
|
+
|
531
723
|
unless viftable_map[ipv4].nil?
|
532
724
|
# guest->guest
|
533
|
-
group_cmds << "ebtables
|
534
|
-
group_cmds << "ebtables
|
725
|
+
group_cmds << "ebtables -#{actions[action]} d_#{viftable_map[ipv4]}_arp --protocol arp --arp-ip-src #{vif_map[:ipv4]} --arp-ip-dst #{ipv4} --log-ip --log-arp --log-prefix 'Arv d_#{viftable_map[ipv4]}_arp:' -j CONTINUE" if @node.manifest.config.packet_drop_log
|
726
|
+
group_cmds << "ebtables -#{actions[action]} d_#{viftable_map[ipv4]}_arp --protocol arp --arp-ip-src #{vif_map[:ipv4]} --arp-ip-dst #{ipv4} -j ACCEPT"
|
535
727
|
|
536
728
|
# guest->host
|
537
|
-
group_cmds << "ebtables
|
538
|
-
group_cmds << "ebtables
|
729
|
+
group_cmds << "ebtables -#{actions[action]} s_#{viftable_map[ipv4]}_d_hst_arp --protocol arp --arp-ip-src #{ipv4} --arp-ip-dst #{vif_map[:ipv4]} --log-ip --log-arp --log-prefix 'Arv s_#{viftable_map[ipv4]}_d_hst_arp:' -j CONTINUE" if @node.manifest.config.packet_drop_log
|
730
|
+
group_cmds << "ebtables -#{actions[action]} s_#{viftable_map[ipv4]}_d_hst_arp --protocol arp --arp-ip-src #{ipv4} --arp-ip-dst #{vif_map[:ipv4]} -j ACCEPT"
|
539
731
|
end
|
540
732
|
else
|
541
733
|
#p "#{vif_map[:uuid]}(#{vif_map[:ipv4]}) -> [other] ***-******** (#{ipv4})"
|
542
|
-
group_cmds << "ebtables
|
543
|
-
group_cmds << "ebtables
|
734
|
+
group_cmds << "ebtables -#{actions[action]} d_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-src #{ipv4} --arp-ip-dst #{vif_map[:ipv4]} --log-ip --log-arp --log-prefix 'Afw :d_#{vif_map[:uuid]}_arp' -j CONTINUE" if @node.manifest.config.packet_drop_log
|
735
|
+
group_cmds << "ebtables -#{actions[action]} d_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-src #{ipv4} --arp-ip-dst #{vif_map[:ipv4]} -j ACCEPT"
|
544
736
|
end
|
545
737
|
end
|
546
738
|
|
547
739
|
group_cmds
|
548
740
|
end
|
549
|
-
|
741
|
+
|
742
|
+
def build_ebtables_final_part(vif_map, action = :create)
|
550
743
|
final_cmds = []
|
744
|
+
actions = { :create => 'A' , :delete => 'D' }
|
745
|
+
raise ArgumentError, "#{action} is not a valid action. Valid actions are #{actions.keys.join(',')}" unless actions.keys.member?(action)
|
551
746
|
|
552
747
|
################################
|
553
748
|
## 3. final part
|
554
749
|
################################
|
555
750
|
# deny,allow
|
556
|
-
final_cmds << "ebtables
|
557
|
-
final_cmds << "ebtables
|
558
|
-
final_cmds << "ebtables
|
559
|
-
final_cmds << "ebtables
|
751
|
+
final_cmds << "ebtables -#{actions[action]} d_#{vif_map[:uuid]}_arp --log-level 4 --log-ip --log-arp --log-prefix 'D d_#{vif_map[:uuid]}_arp:' -j CONTINUE" if @node.manifest.config.packet_drop_log
|
752
|
+
final_cmds << "ebtables -#{actions[action]} s_#{vif_map[:uuid]}_d_hst_arp --log-level 4 --log-ip --log-arp --log-prefix 'D s_#{vif_map[:uuid]}_d_hst_arp:' -j CONTINUE" if @node.manifest.config.packet_drop_log
|
753
|
+
final_cmds << "ebtables -#{actions[action]} d_#{vif_map[:uuid]}_arp -j DROP"
|
754
|
+
final_cmds << "ebtables -#{actions[action]} s_#{vif_map[:uuid]}_d_hst_arp -j DROP"
|
560
755
|
|
561
756
|
final_cmds
|
562
757
|
end
|
563
758
|
|
564
|
-
def
|
565
|
-
|
759
|
+
def build_iptables_chains(protocol_map, vif_map, action = :create)
|
760
|
+
actions = { :create => ['N'] , :delete => ['F','X'] }
|
761
|
+
raise ArgumentError, "#{action} is not a valid action. Valid actions are #{actions.keys.join(',')}" unless actions.keys.member?(action)
|
566
762
|
|
567
|
-
|
568
|
-
raise "UnknownNetworkId" if network_map.nil?
|
763
|
+
chain_cmds = []
|
569
764
|
|
570
765
|
################################
|
571
766
|
## 0. chain name
|
572
767
|
################################
|
573
768
|
|
769
|
+
[ 's', 'd' ].each do |bound|
|
770
|
+
protocol_map.each { |k,v|
|
771
|
+
actions[action].each do |act|
|
772
|
+
chain_cmds << "iptables -#{act} #{bound}_#{vif_map[:uuid]}"
|
773
|
+
chain_cmds << "iptables -#{act} #{bound}_#{vif_map[:uuid]}_#{k}"
|
774
|
+
|
775
|
+
chain_cmds << "iptables -#{act} #{bound}_#{vif_map[:uuid]}_drop"
|
776
|
+
chain_cmds << "iptables -#{act} #{bound}_#{vif_map[:uuid]}_#{k}_drop"
|
777
|
+
end
|
778
|
+
}
|
779
|
+
end
|
780
|
+
|
781
|
+
chain_cmds
|
782
|
+
end
|
783
|
+
|
784
|
+
def build_iptables_basic_part(vif_map, inst_map, action = :create)
|
785
|
+
basic_cmds = []
|
786
|
+
|
787
|
+
actions = { :create => 'A' , :delete => 'D' }
|
788
|
+
raise ArgumentError, "#{action} is not a valid action. Valid actions are #{actions.keys.join(',')}" unless actions.keys.member?(action)
|
789
|
+
|
790
|
+
network_map = rpc.request('hva-collector', 'get_network', inst_map[:instance_nics].first[:network_id])
|
791
|
+
raise "UnknownNetworkId" if network_map.nil?
|
792
|
+
|
574
793
|
# support IP protocol
|
575
794
|
protocol_map = protocol_map(:iptables)
|
576
795
|
|
577
796
|
# make chain names.
|
578
|
-
|
579
|
-
protocol_map.each { |k,v|
|
580
|
-
chains << "s_#{vif_map[:uuid]}_#{k}"
|
581
|
-
chains << "d_#{vif_map[:uuid]}_#{k}"
|
582
|
-
}
|
583
|
-
chains << "s_#{vif_map[:uuid]}"
|
584
|
-
chains << "d_#{vif_map[:uuid]}"
|
797
|
+
basic_cmds << build_iptables_chains(protocol_map, vif_map, :create) if action == :create
|
585
798
|
|
586
799
|
################################
|
587
800
|
## 1. basic part
|
@@ -589,133 +802,141 @@ module Dcmgr
|
|
589
802
|
|
590
803
|
# metadata-server
|
591
804
|
port = network_map[:metadata_server_port] || 80
|
592
|
-
[
|
593
|
-
basic_cmds << "iptables -t nat -#{xcmd} PREROUTING -m physdev --physdev-in #{vif_map[:uuid]} -d 169.254.169.254 -p tcp --dport 80 -j DNAT --to-destination #{network_map[:metadata_server]}:#{port}"
|
594
|
-
}
|
595
|
-
# create user defined chains.
|
596
|
-
[ 'N' ].each { |xcmd|
|
597
|
-
chains.each { |chain|
|
598
|
-
basic_cmds << "iptables -#{xcmd} #{chain}"
|
599
|
-
|
600
|
-
# logger & drop
|
601
|
-
basic_cmds << "iptables -N #{chain}_drop"
|
602
|
-
if @node.manifest.config.packet_drop_log
|
603
|
-
basic_cmds << "iptables -A #{chain}_drop -j LOG --log-level 4 --log-prefix 'D #{chain}:'"
|
604
|
-
end
|
605
|
-
basic_cmds << "iptables -A #{chain}_drop -j DROP"
|
606
|
-
}
|
607
|
-
}
|
805
|
+
basic_cmds << "iptables -t nat -#{actions[action]} PREROUTING -m physdev --physdev-in #{vif_map[:uuid]} -d 169.254.169.254 -p tcp --dport 80 -j DNAT --to-destination #{network_map[:metadata_server]}:#{port}"
|
608
806
|
|
609
807
|
# DHCP Server
|
610
|
-
basic_cmds << "iptables
|
611
|
-
basic_cmds << "iptables
|
808
|
+
basic_cmds << "iptables -#{actions[action]} d_#{vif_map[:uuid]} -p udp ! -s #{network_map[:dhcp_server]} --sport 67 -j d_#{vif_map[:uuid]}_udp_drop"
|
809
|
+
basic_cmds << "iptables -#{actions[action]} d_#{vif_map[:uuid]} -p udp ! -s #{network_map[:dhcp_server]} --sport 68 -j d_#{vif_map[:uuid]}_udp_drop"
|
612
810
|
|
613
811
|
# group nodes
|
614
812
|
# group node IPv4 addresses.
|
615
813
|
ipv4s = rpc.request('hva-collector', 'get_group_instance_ipv4s', inst_map[:uuid])
|
616
814
|
ipv4s << network_map[:ipv4_gw]
|
617
815
|
ipv4s.uniq.reverse_each { |addr|
|
618
|
-
basic_cmds << "iptables
|
816
|
+
basic_cmds << "iptables -#{actions[action]} d_#{vif_map[:uuid]} -s #{addr} -j ACCEPT"
|
619
817
|
}
|
620
818
|
|
621
819
|
# IP protocol routing
|
622
820
|
[ 's', 'd' ].each do |bound|
|
623
821
|
protocol_map.each { |k,v|
|
624
|
-
basic_cmds << "iptables
|
822
|
+
#basic_cmds << "iptables -#{chain_actions[action]} #{bound}_#{vif_map[:uuid]}_#{k}"
|
823
|
+
# Log dropped packets
|
824
|
+
["#{bound}_#{vif_map[:uuid]}", "#{bound}_#{vif_map[:uuid]}_#{k}"].each { |chain|
|
825
|
+
basic_cmds << "iptables -#{actions[action]} #{chain}_drop -j LOG --log-level 4 --log-prefix 'D #{chain}:'" if @node.manifest.config.packet_drop_log
|
826
|
+
basic_cmds << "iptables -#{actions[action]} #{chain}_drop -j DROP"
|
827
|
+
}
|
625
828
|
|
626
829
|
case k
|
627
830
|
when 'tcp'
|
628
831
|
case bound
|
629
832
|
when 's'
|
630
|
-
basic_cmds << "iptables
|
833
|
+
basic_cmds << "iptables -#{actions[action]} #{bound}_#{vif_map[:uuid]} -m state --state NEW,ESTABLISHED -p #{k} -j #{bound}_#{vif_map[:uuid]}_#{k}"
|
631
834
|
when 'd'
|
632
|
-
basic_cmds << "iptables
|
633
|
-
basic_cmds << "iptables
|
835
|
+
basic_cmds << "iptables -#{actions[action]} #{bound}_#{vif_map[:uuid]} -m state --state RELATED,ESTABLISHED -p #{k} -j ACCEPT"
|
836
|
+
basic_cmds << "iptables -#{actions[action]} #{bound}_#{vif_map[:uuid]} -p #{k} -j #{bound}_#{vif_map[:uuid]}_#{k}"
|
634
837
|
end
|
635
838
|
when 'udp'
|
636
839
|
case bound
|
637
840
|
when 's'
|
638
|
-
basic_cmds << "iptables
|
841
|
+
basic_cmds << "iptables -#{actions[action]} #{bound}_#{vif_map[:uuid]} -m state --state NEW,ESTABLISHED -p #{k} -j #{bound}_#{vif_map[:uuid]}_#{k}"
|
639
842
|
when 'd'
|
640
|
-
basic_cmds << "iptables
|
641
|
-
basic_cmds << "iptables
|
843
|
+
basic_cmds << "iptables -#{actions[action]} #{bound}_#{vif_map[:uuid]} -m state --state ESTABLISHED -p #{k} -j ACCEPT"
|
844
|
+
basic_cmds << "iptables -#{actions[action]} #{bound}_#{vif_map[:uuid]} -p #{k} -j #{bound}_#{vif_map[:uuid]}_#{k}"
|
642
845
|
end
|
643
846
|
when 'icmp'
|
644
847
|
case bound
|
645
848
|
when 's'
|
646
|
-
basic_cmds << "iptables
|
849
|
+
basic_cmds << "iptables -#{actions[action]} #{bound}_#{vif_map[:uuid]} -m state --state NEW,ESTABLISHED,RELATED -p #{k} -j #{bound}_#{vif_map[:uuid]}_#{k}"
|
647
850
|
when 'd'
|
648
|
-
basic_cmds << "iptables
|
649
|
-
basic_cmds << "iptables
|
851
|
+
basic_cmds << "iptables -#{actions[action]} #{bound}_#{vif_map[:uuid]} -m state --state ESTABLISHED,RELATED -p #{k} -j ACCEPT"
|
852
|
+
basic_cmds << "iptables -#{actions[action]} #{bound}_#{vif_map[:uuid]} -p #{k} -j #{bound}_#{vif_map[:uuid]}_#{k}"
|
650
853
|
end
|
651
854
|
end
|
652
855
|
}
|
653
856
|
end
|
654
857
|
|
655
|
-
basic_cmds << "iptables
|
656
|
-
basic_cmds << "iptables
|
858
|
+
basic_cmds << "iptables -#{actions[action]} FORWARD -m physdev --physdev-is-bridged --physdev-in #{vif_map[:uuid]} -j s_#{vif_map[:uuid]}"
|
859
|
+
basic_cmds << "iptables -#{actions[action]} FORWARD -m physdev --physdev-is-bridged --physdev-out #{vif_map[:uuid]} -j d_#{vif_map[:uuid]}"
|
657
860
|
|
658
861
|
##
|
659
862
|
## ACCEPT
|
660
863
|
##
|
661
864
|
# DHCP Server
|
662
|
-
basic_cmds << "iptables
|
663
|
-
basic_cmds << "iptables
|
865
|
+
basic_cmds << "iptables -#{actions[action]} d_#{vif_map[:uuid]}_udp -p udp -s #{network_map[:dhcp_server]} --sport 67 -j ACCEPT"
|
866
|
+
basic_cmds << "iptables -#{actions[action]} d_#{vif_map[:uuid]}_udp -p udp -s #{network_map[:dhcp_server]} --sport 68 -j ACCEPT"
|
664
867
|
|
665
868
|
# DNS Server
|
666
|
-
basic_cmds << "iptables
|
869
|
+
basic_cmds << "iptables -#{actions[action]} s_#{vif_map[:uuid]}_udp -p udp -d #{network_map[:dns_server]} --dport 53 -j ACCEPT"
|
870
|
+
|
871
|
+
# MetaData Server
|
872
|
+
basic_cmds << "iptables -#{actions[action]} s_#{vif_map[:uuid]}_tcp -p tcp -d #{network_map[:metadata_server]} --dport #{network_map[:metadata_server_port]} -j ACCEPT"
|
667
873
|
|
668
874
|
##
|
669
875
|
## DROP
|
670
876
|
##
|
671
877
|
protocol_map.each { |k,v|
|
672
878
|
# DHCP
|
673
|
-
basic_cmds << "iptables
|
879
|
+
basic_cmds << "iptables -#{actions[action]} s_#{vif_map[:uuid]} -d #{network_map[:dhcp_server]} -p #{k} -j s_#{vif_map[:uuid]}_#{k}_drop"
|
674
880
|
# DNS
|
675
|
-
basic_cmds << "iptables
|
881
|
+
basic_cmds << "iptables -#{actions[action]} s_#{vif_map[:uuid]} -d #{network_map[:dns_server]} -p #{k} -j s_#{vif_map[:uuid]}_#{k}_drop"
|
676
882
|
}
|
677
883
|
|
884
|
+
basic_cmds << build_iptables_chains(protocol_map, vif_map, :delete) if action == :delete
|
885
|
+
|
678
886
|
basic_cmds
|
679
887
|
end
|
680
888
|
|
681
|
-
def build_iptables_group_part(vif_map, inst_map)
|
889
|
+
def build_iptables_group_part(vif_map, inst_map, action = :create)
|
682
890
|
group_cmds = []
|
683
891
|
|
684
892
|
################################
|
685
893
|
## 2. group part
|
686
894
|
################################
|
687
|
-
|
688
|
-
|
689
|
-
|
690
|
-
|
691
|
-
|
692
|
-
|
693
|
-
|
694
|
-
|
695
|
-
|
696
|
-
|
697
|
-
|
698
|
-
|
699
|
-
|
700
|
-
|
701
|
-
|
702
|
-
|
703
|
-
|
704
|
-
|
705
|
-
|
706
|
-
|
707
|
-
|
708
|
-
|
709
|
-
|
710
|
-
|
895
|
+
|
896
|
+
case action
|
897
|
+
when :delete
|
898
|
+
protocol_map(:iptables).each { |k,v|
|
899
|
+
# Fluch all security group chains
|
900
|
+
group_cmds << "iptables -A d_#{vif_map[:uuid]}_#{k} -F"
|
901
|
+
}
|
902
|
+
when :create
|
903
|
+
ng_maps = rpc.request('hva-collector', 'get_security_groups_of_instance', inst_map[:uuid])
|
904
|
+
rules = ng_maps.map { |ng_map|
|
905
|
+
ng_map[:rules].map { |rule| rule[:permission] }
|
906
|
+
}.flatten
|
907
|
+
|
908
|
+
# security group
|
909
|
+
build_rule(rules).each do |rule|
|
910
|
+
case rule[:ip_protocol]
|
911
|
+
when 'tcp', 'udp'
|
912
|
+
if rule[:ip_fport] == rule[:ip_tport]
|
913
|
+
group_cmds << "iptables -A d_#{vif_map[:uuid]}_#{rule[:ip_protocol]} -p #{rule[:ip_protocol]} -s #{rule[:ip_source]} --dport #{rule[:ip_fport]} -j ACCEPT"
|
914
|
+
else
|
915
|
+
group_cmds << "iptables -A d_#{vif_map[:uuid]}_#{rule[:ip_protocol]} -p #{rule[:ip_protocol]} -s #{rule[:ip_source]} --dport #{rule[:ip_fport]}:#{rule[:ip_tport]} -j ACCEPT"
|
916
|
+
end
|
917
|
+
when 'icmp'
|
918
|
+
# icmp
|
919
|
+
# This extension can be used if `--protocol icmp' is specified. It provides the following option:
|
920
|
+
# [!] --icmp-type {type[/code]|typename}
|
921
|
+
# This allows specification of the ICMP type, which can be a numeric ICMP type, type/code pair, or one of the ICMP type names shown by the command
|
922
|
+
# iptables -p icmp -h
|
923
|
+
if rule[:icmp_type] == -1 && rule[:icmp_code] == -1
|
924
|
+
group_cmds << "iptables -A d_#{vif_map[:uuid]}_#{rule[:ip_protocol]} -p #{rule[:ip_protocol]} -s #{rule[:ip_source]} -j ACCEPT"
|
925
|
+
else
|
926
|
+
group_cmds << "iptables -A d_#{vif_map[:uuid]}_#{rule[:ip_protocol]} -p #{rule[:ip_protocol]} -s #{rule[:ip_source]} --icmp-type #{rule[:icmp_type]}/#{rule[:icmp_code]} -j ACCEPT"
|
927
|
+
end
|
928
|
+
end
|
711
929
|
end
|
712
|
-
|
930
|
+
else
|
931
|
+
raise ArgumentError, "#{action} is not a valid action."
|
713
932
|
end
|
714
|
-
|
715
933
|
group_cmds
|
716
934
|
end
|
717
935
|
|
718
|
-
def build_iptables_final_part(vif_map)
|
936
|
+
def build_iptables_final_part(vif_map, action = :create)
|
937
|
+
actions = { :create => 'A' , :delete => 'D' }
|
938
|
+
raise ArgumentError, "#{action} is not a valid action. Valid actions are #{actions.keys.join(',')}" unless actions.keys.member?(action)
|
939
|
+
|
719
940
|
final_cmds = []
|
720
941
|
|
721
942
|
# support IP protocol
|
@@ -725,15 +946,19 @@ module Dcmgr
|
|
725
946
|
## 3. final part
|
726
947
|
################################
|
727
948
|
|
728
|
-
# drop
|
729
|
-
protocol_map.each { |k,v|
|
730
|
-
final_cmds << "iptables -A d_#{vif_map[:uuid]}_#{k} -p #{k} -j d_#{vif_map[:uuid]}_#{k}_drop"
|
731
|
-
}
|
732
|
-
|
733
|
-
# IP protocol routing
|
949
|
+
# Send dropped ip packets to their respective drop chains, based on their protocol
|
734
950
|
[ 'd' ].each do |bound|
|
735
951
|
protocol_map.each { |k,v|
|
736
|
-
|
952
|
+
# Any packets that travel the protocol specific chains and don't get accepted are directed to drop chains
|
953
|
+
# where logging can take place before the packets are dropped by the FORWARD chain policy
|
954
|
+
final_cmds << "iptables -#{actions[action]} #{bound}_#{vif_map[:uuid]} -p #{k} -j #{bound}_#{vif_map[:uuid]}_#{k}_drop"
|
955
|
+
}
|
956
|
+
end
|
957
|
+
|
958
|
+
# Allow outgoing traffic from the instance
|
959
|
+
[ 's' ].each do |bound|
|
960
|
+
protocol_map.each { |k,v|
|
961
|
+
final_cmds << "iptables -#{actions[action]} #{bound}_#{vif_map[:uuid]}_#{k} -p #{k} -j ACCEPT"
|
737
962
|
}
|
738
963
|
end
|
739
964
|
|
@@ -767,8 +992,8 @@ module Dcmgr
|
|
767
992
|
# ip_fport : tcp,udp? 1 - 16bit, icmp: -1
|
768
993
|
ip_protocol, ip_fport = from_pair.split(':')
|
769
994
|
|
770
|
-
# protocol : [ ip4 | ip6 |
|
771
|
-
# ip_source : ip4? xxx.xxx.xxx.xxx./[0-32], ip6? (not yet supprted)
|
995
|
+
# protocol : [ ip4 | ip6 | security_group_uuid ]
|
996
|
+
# ip_source : ip4? xxx.xxx.xxx.xxx./[0-32], ip6? (not yet supprted)
|
772
997
|
protocol, ip_source = source_pair.split(':')
|
773
998
|
|
774
999
|
begin
|
@@ -784,9 +1009,9 @@ module Dcmgr
|
|
784
1009
|
if prefix.to_i == 0
|
785
1010
|
ip_source = ip_addr
|
786
1011
|
end
|
787
|
-
when s.scan(/
|
1012
|
+
when s.scan(/ng-\w+/)
|
788
1013
|
from_group = true
|
789
|
-
inst_maps = rpc.request('hva-collector', '
|
1014
|
+
inst_maps = rpc.request('hva-collector', 'get_instances_of_security_group', protocol)
|
790
1015
|
inst_maps.each { |inst_map|
|
791
1016
|
ipv4s << inst_map[:ips]
|
792
1017
|
}
|