wakame-vdc-dcmgr 10.12.0 → 11.06.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (71) hide show
  1. data/LICENSE +164 -201
  2. data/Rakefile +6 -11
  3. data/bin/collector +10 -24
  4. data/config/dcmgr.conf.example +18 -6
  5. data/config/initializers/isono.rb +7 -23
  6. data/config/initializers/sequel.rb +11 -2
  7. data/lib/dcmgr.rb +70 -11
  8. data/lib/dcmgr/cli/base.rb +74 -0
  9. data/lib/dcmgr/cli/errors.rb +59 -0
  10. data/lib/dcmgr/cli/group.rb +101 -0
  11. data/lib/dcmgr/cli/host.rb +101 -0
  12. data/lib/dcmgr/cli/image.rb +108 -0
  13. data/lib/dcmgr/cli/keypair.rb +72 -0
  14. data/lib/dcmgr/cli/network.rb +198 -0
  15. data/lib/dcmgr/cli/quota.rb +28 -0
  16. data/lib/dcmgr/cli/spec.rb +82 -0
  17. data/lib/dcmgr/cli/storage.rb +88 -0
  18. data/lib/dcmgr/cli/tag.rb +81 -0
  19. data/lib/dcmgr/cli/vlan.rb +53 -0
  20. data/lib/dcmgr/drivers/hypervisor.rb +33 -0
  21. data/lib/dcmgr/drivers/iijgio_storage.rb +37 -0
  22. data/lib/dcmgr/drivers/kvm.rb +118 -0
  23. data/lib/dcmgr/drivers/lxc.rb +167 -0
  24. data/lib/dcmgr/drivers/s3_storage.rb +39 -0
  25. data/lib/dcmgr/drivers/snapshot_storage.rb +51 -0
  26. data/lib/dcmgr/endpoints/core_api.rb +188 -324
  27. data/lib/dcmgr/endpoints/core_api_mock.rb +52 -3
  28. data/lib/dcmgr/endpoints/errors.rb +73 -32
  29. data/lib/dcmgr/endpoints/metadata.rb +163 -16
  30. data/lib/dcmgr/helpers/cli_helper.rb +1 -1
  31. data/lib/dcmgr/helpers/nic_helper.rb +35 -0
  32. data/lib/dcmgr/logger.rb +5 -1
  33. data/lib/dcmgr/messaging_client.rb +117 -0
  34. data/lib/dcmgr/models/account.rb +27 -3
  35. data/lib/dcmgr/models/base_new.rb +21 -7
  36. data/lib/dcmgr/models/host_pool.rb +27 -7
  37. data/lib/dcmgr/models/image.rb +31 -3
  38. data/lib/dcmgr/models/instance.rb +72 -23
  39. data/lib/dcmgr/models/instance_nic.rb +12 -2
  40. data/lib/dcmgr/models/instance_spec.rb +16 -0
  41. data/lib/dcmgr/models/ip_lease.rb +37 -1
  42. data/lib/dcmgr/models/netfilter_group.rb +7 -7
  43. data/lib/dcmgr/models/network.rb +42 -3
  44. data/lib/dcmgr/models/quota.rb +25 -0
  45. data/lib/dcmgr/models/request_log.rb +26 -11
  46. data/lib/dcmgr/models/ssh_key_pair.rb +14 -1
  47. data/lib/dcmgr/models/storage_pool.rb +19 -72
  48. data/lib/dcmgr/models/tag.rb +5 -0
  49. data/lib/dcmgr/models/vlan_lease.rb +8 -0
  50. data/lib/dcmgr/models/volume.rb +26 -8
  51. data/lib/dcmgr/models/volume_snapshot.rb +37 -0
  52. data/lib/dcmgr/node_modules/hva_collector.rb +56 -36
  53. data/lib/dcmgr/node_modules/instance_ha.rb +1 -1
  54. data/lib/dcmgr/node_modules/instance_monitor.rb +70 -0
  55. data/lib/dcmgr/node_modules/service_netfilter.rb +914 -0
  56. data/lib/dcmgr/node_modules/sta_collector.rb +7 -30
  57. data/lib/dcmgr/rack/request_logger.rb +60 -0
  58. data/lib/dcmgr/rack/run_initializer.rb +42 -0
  59. data/lib/dcmgr/rpc/hva_handler.rb +388 -0
  60. data/lib/dcmgr/rubygems.rb +7 -0
  61. data/lib/dcmgr/storage_service.rb +98 -0
  62. data/lib/dcmgr/tags.rb +2 -2
  63. data/lib/dcmgr/version.rb +8 -0
  64. data/lib/ext/time.rb +8 -0
  65. data/lib/sinatra/respond_to.rb +3 -0
  66. data/lib/sinatra/sequel_transaction.rb +20 -5
  67. data/web/api/config.ru +9 -13
  68. data/web/metadata/config.ru +10 -13
  69. metadata +162 -120
  70. data/lib/dcmgr/models/physical_host.rb +0 -67
  71. data/lib/dcmgr/web/base.rb +0 -21
@@ -160,5 +160,10 @@ module Dcmgr::Models
160
160
  def to_api_document
161
161
  to_hash.merge({:type_id=>self.class.to_s.split('::').last})
162
162
  end
163
+
164
+ def self.declare(account_id, tag_class_name, name)
165
+ Dcmgr::Tags.const_get(tag_class_name).find_or_create(:account_id=>account_id,
166
+ :name=>name)
167
+ end
163
168
  end
164
169
  end
@@ -13,5 +13,13 @@ module Dcmgr::Models
13
13
  with_timestamps
14
14
 
15
15
  one_to_many :networks
16
+
17
+ def validate
18
+
19
+ unless 1 <= self.tag_id.to_i && self.tag_id.to_i <= 4095
20
+ errors.add(:tag_id, "Tag ID is out of range (1-4095)")
21
+ end
22
+
23
+ end
16
24
  end
17
25
  end
@@ -32,13 +32,16 @@ module Dcmgr::Models
32
32
  String :host_device_name
33
33
  String :guest_device_name
34
34
  String :export_path, :null=>false
35
+ # String :intermediate_path, :null=>false
35
36
  Text :transport_information
36
37
  Time :deleted_at
37
38
  Time :attached_at
38
39
  Time :detached_at
40
+
39
41
  index :storage_pool_id
40
42
  index :instance_id
41
43
  index :snapshot_id
44
+ index :deleted_at
42
45
  end
43
46
  with_timestamps
44
47
 
@@ -47,6 +50,15 @@ module Dcmgr::Models
47
50
 
48
51
  plugin ArchiveChangedColumn, :histories
49
52
 
53
+ subset(:lives, {:deleted_at => nil})
54
+
55
+ RECENT_TERMED_PERIOD=(60 * 15)
56
+ # lists the volumes are available and deleted within
57
+ # RECENT_TERMED_PERIOD sec.
58
+ def_dataset_method(:alives_and_recent_termed) {
59
+ filter("deleted_at IS NULL OR deleted_at >= ?", (Time.now.utc - RECENT_TERMED_PERIOD))
60
+ }
61
+
50
62
  # serialization plugin must be defined at the bottom of all class
51
63
  # method calls.
52
64
  # Possible column data:
@@ -58,19 +70,20 @@ module Dcmgr::Models
58
70
  class RequestError < RuntimeError; end
59
71
 
60
72
  def before_create
61
- # check the volume size
62
73
  sp = self.storage_pool
63
- volume_size = Volume.dataset.where(:storage_pool_id=> self.storage_pool_id).get{sum(:size)}
64
- total_size = sp.offerring_disk_space - volume_size.to_i
74
+ volume_size = sp.volumes_dataset.lives.sum(:size).to_i
75
+ # check if the sum of available volume and new volume is under
76
+ # the limit of offering capacity.
77
+ total_size = sp.offering_disk_space - volume_size.to_i
65
78
  if self.size > total_size
66
79
  raise DiskError, "out of disk space"
67
80
  end
68
81
 
69
- super
70
- end
71
-
72
- def before_save
73
- self.updated_at = Time.now
82
+ # TODO: Here may not be the right place for capacity validation.
83
+ per_account_totoal = self.class.filter(:account_id=>self.account_id).lives.sum(:size).to_i
84
+ if self.account.quota.volume_total_size < per_account_totoal + self.size.to_i
85
+ raise DiskError, "Out of account quota: #{self.account.quota.volume_total_size}, #{self.size.to_i}, #{per_account_totoal}"
86
+ end
74
87
  super
75
88
  end
76
89
 
@@ -134,8 +147,13 @@ module Dcmgr::Models
134
147
  :state => self.state,
135
148
  :instance_id => (self.instance && self.instance.canonical_uuid),
136
149
  :deleted_at => self.deleted_at,
150
+ :detached_at => self.detached_at,
137
151
  }
138
152
  end
153
+
154
+ def ready_to_take_snapshot?
155
+ %w(available attached).member?(self.state)
156
+ end
139
157
 
140
158
  def create_snapshot(account_id)
141
159
  vs = VolumeSnapshot.create(:account_id=>account_id,
@@ -17,13 +17,23 @@ module Dcmgr::Models
17
17
  Fixnum :size, :null=>false
18
18
  Fixnum :status, :null=>false, :default=>0
19
19
  String :state, :null=>false, :default=>STATE_TYPE_REGISTERING
20
+ String :destination_key, :null=>false
21
+ Time :deleted_at
20
22
  index :storage_pool_id
23
+ index :deleted_at
21
24
  end
22
25
  with_timestamps
23
26
 
24
27
  many_to_one :storage_pool
25
28
  plugin ArchiveChangedColumn, :histories
26
29
 
30
+ RECENT_TERMED_PERIOD=(60 * 15)
31
+ # lists the volumes are available and deleted within
32
+ # RECENT_TERMED_PERIOD sec.
33
+ def_dataset_method(:alives_and_recent_termed) {
34
+ filter("deleted_at IS NULL OR deleted_at >= ?", (Time.now.utc - RECENT_TERMED_PERIOD))
35
+ }
36
+
27
37
  class RequestError < RuntimeError; end
28
38
 
29
39
  # Hash data for API response.
@@ -34,7 +44,10 @@ module Dcmgr::Models
34
44
  :state => self.state,
35
45
  :size => self.size,
36
46
  :origin_volume_id => self.origin_volume_id,
47
+ :destination_id => self.destination,
48
+ :destination_name => self.display_name,
37
49
  :created_at => self.created_at,
50
+ :deleted_at => self.deleted_at,
38
51
  }
39
52
  end
40
53
 
@@ -44,9 +57,23 @@ module Dcmgr::Models
44
57
  storage_pool.create_volume(account_id, self.size, self.canonical_uuid)
45
58
  end
46
59
 
60
+ def display_name
61
+ repository_config = Dcmgr::StorageService.snapshot_repository_config
62
+ repository = repository_config[self.destination]
63
+ repository['display_name']
64
+ end
65
+
47
66
  def origin_volume
48
67
  Volume[origin_volume_id]
49
68
  end
69
+
70
+ def snapshot_filename
71
+ "#{self.canonical_uuid}.zsnap"
72
+ end
73
+
74
+ def destination
75
+ self.destination_key.split('@')[0]
76
+ end
50
77
 
51
78
  def self.delete_snapshot(account_id, uuid)
52
79
  vs = self.dataset.where(:account_id => account_id).where(:uuid => uuid.split('-').last).first
@@ -56,5 +83,15 @@ module Dcmgr::Models
56
83
  vs.state = :deleting
57
84
  vs.save_changes
58
85
  end
86
+
87
+ def update_destination_key(account_id, destination_key)
88
+ self.destination_key = destination_key
89
+ self.save_changes
90
+ end
91
+
92
+ def self.store_local?(destination)
93
+ destination.nil?
94
+ end
95
+
59
96
  end
60
97
  end
@@ -6,6 +6,7 @@ module Dcmgr
6
6
  module NodeModules
7
7
  class HvaCollector < Isono::NodeModules::Base
8
8
  include Isono::NodeModules
9
+ include Dcmgr::Logger
9
10
 
10
11
  initialize_hook do
11
12
  rpc = RpcChannel.new(node)
@@ -23,7 +24,6 @@ module Dcmgr
23
24
  end
24
25
 
25
26
  def get_instance(instance_id)
26
- Models::Instance.lock!
27
27
  inst = Models::Instance[instance_id]
28
28
  raise "UnknownInstanceID" if inst.nil?
29
29
 
@@ -32,7 +32,6 @@ module Dcmgr
32
32
  end
33
33
 
34
34
  def update_instance(instance_id, data)
35
- Models::Instance.lock!
36
35
  inst = Models::Instance[instance_id]
37
36
  raise "UnknownInstanceID" if inst.nil?
38
37
  if data[:state] == :terminated
@@ -48,54 +47,76 @@ module Dcmgr
48
47
  end
49
48
 
50
49
  def get_netfilter_groups_of_instance(instance_id)
51
- Models::Instance.lock!
52
50
  inst = Models::Instance[instance_id]
53
51
  raise "UnknownInstanceID" if inst.nil?
54
52
 
55
53
  inst.netfilter_groups.map { |g| g.to_hash }
56
54
  end
57
55
 
58
- def get_group_instance_ipv4s(instance_id)
59
- Models::Instance.lock!
56
+ #Returns an array containing the ip addresses of all instances in the same security group.
57
+ # _set_ determines which ip addresses are returned. There are 3 possible values
58
+ # :inside is the default value. This returns all inside ip addresses
59
+ # :outside returns all the outside addresses for instances that are natted.
60
+ # :all returns all ip addresses regardless of whether they're natted or not
61
+ def get_group_instance_ipv4s(instance_id,set = :inside)
60
62
  inst = Models::Instance[instance_id]
61
63
  raise "UnknownInstanceID" if inst.nil?
62
-
63
- ipv4s = inst.netfilter_groups.map { |netfilter_group|
64
- next if netfilter_group.nil?
65
- netfilter_group.instance_netfilter_groups.map { |instance_netfilter_group|
66
- next if instance_netfilter_group.nil?
67
- instance_netfilter_group.instance_dataset.lives.all.map { |instance|
68
- next if instance.nil?
69
- instance.ips.map { |ip|
70
- next if ip.nil?
71
- ip.ipv4
64
+ raise "Unknown ip set." unless [:inside,:all,:outside].member?(set)
65
+
66
+ inst.netfilter_groups.compact.map { |netfilter_group|
67
+ netfilter_group.instances_dataset.lives.all.compact.map { |instance|
68
+ instance.ips.compact.map { |ip|
69
+ case set
70
+ when :all
71
+ ip
72
+ when :inside
73
+ ip.map {|i| unless i.is_natted? then i.ipv4 else nil end}.compact
74
+ when :outside
75
+ ip.map {|i| if i.is_natted? then i.ipv4 else nil end}.compact
76
+ end
72
77
  }
73
78
  }
74
- }
75
- }.flatten.uniq.compact
76
- ipv4s
79
+ }.flatten.uniq.compact
77
80
  end
78
81
 
79
82
  # def get_instances_of_account_netfilter_group(account_id, netfilter_group_id)
80
83
  def get_instances_of_account_netfilter_group(account_id, netfilter_group_name)
81
- Models::NetfilterGroup.lock!
82
84
  ng_map = Models::NetfilterGroup.find(:account_id => account_id, :name => netfilter_group_name)
83
85
  raise "UnknownNetfilterGroupID" if ng_map.nil?
84
- inst_maps = ng_map.instance_netfilter_groups.map { |instance_netfilter_group|
85
- instance_netfilter_group.instance_dataset.lives.all.map { |inst| inst.to_hash }
86
- }.flatten.uniq.compact
86
+ inst_maps = ng_map.instances_dataset.lives.all.map { |inst| inst.to_hash }.flatten.uniq.compact
87
87
  inst_maps
88
88
  end
89
89
 
90
90
  def get_network(network_id)
91
- Models::Network.lock!
92
91
  network = Models::Network[network_id]
93
92
  raise "UnknownNetworkID" if network.nil?
94
93
  network.to_hash
95
94
  end
96
95
 
96
+ #Returns the current iplease for nic with uuid _nic_uuid_
97
+ def get_iplease_for_nic(nic_uuid)
98
+ nic = Models::Taggable.find(nic_uuid)
99
+ Models::IpLease.find(:instance_nic_id => nic[:id])[:ipv4]
100
+ end
101
+
102
+ def get_nat_leases(nic_uuid)
103
+ #TODO: get this to work with non canonical uuid
104
+ nic = Models::Taggable.find(nic_uuid)
105
+
106
+ leases = Models::IpLease.filter({:instance_nic_id => nic[:id]} & ~{:network_id => nic[:network_id]})
107
+ leases.map {|l| l[:ipv4]}
108
+ end
109
+
110
+ def is_natted_ip?(ip)
111
+ lease = Models::IpLease.find(:ipv4 => ip)
112
+
113
+ return false if lease.nil?
114
+
115
+ #lease.instance_nic.network_id != lease.network_id
116
+ lease.is_natted?
117
+ end
118
+
97
119
  def get_networks
98
- Models::Network.lock!
99
120
  networks = Models::Network.all
100
121
  networks.map { |network|
101
122
  network.to_hash
@@ -103,8 +124,6 @@ module Dcmgr
103
124
  end
104
125
 
105
126
  def get_dhcp_conf(network_name)
106
- Models::Network.lock!
107
-
108
127
  build_network_segment = proc { |network|
109
128
  gwaddr = network.ipaddress
110
129
  h = {
@@ -121,7 +140,7 @@ module Dcmgr
121
140
  :addr2host=> [],
122
141
  }
123
142
 
124
- network.ip_lease_dataset.filter(:type=>Models::IpLease::TYPE_AUTO).each { |ip|
143
+ network.ip_lease_dataset.filter(:alloc_type=>Models::IpLease::TYPE_AUTO).each { |ip|
125
144
  # ignore IPs unbound to vnic.
126
145
  next if ip.instance_nic.nil? || ip.instance_nic.instance.nil?
127
146
 
@@ -131,7 +150,7 @@ module Dcmgr
131
150
  }
132
151
  h[:addr2host] << {
133
152
  :hostname => ip.instance_nic.instance.fqdn_hostname,
134
- :ipaddr => ip.ipv4
153
+ :ipaddr => network.nat_network.nil? ? ip.ipv4 : ip.nat_outside_lease.ipv4
135
154
  }
136
155
  }
137
156
 
@@ -157,23 +176,24 @@ module Dcmgr
157
176
  end
158
177
 
159
178
  def get_instances_of_netfilter_group(netfilter_group_id)
160
- Models::NetfilterGroup.lock!
161
179
  g = Models::NetfilterGroup[netfilter_group_id]
162
180
  raise "UnknownNetfilterGroupID" if g.nil?
163
- inst_maps = g.instance_netfilter_groups.map { |instance_netfilter_group|
164
- instance_netfilter_group.instance_dataset.lives.all.map { |inst| inst.to_hash }
165
- }.flatten.uniq.compact
166
- inst_maps
181
+ g.instances.map {|i| i.to_hash }.flatten.uniq.compact
167
182
  end
168
183
 
169
184
  def get_alive_instances(node_id)
170
- Models::HostPool.lock!
171
185
  hp = Models::HostPool.find(:node_id => node_id)
172
- raise "UnknownNodeID", node_id if hp.nil?
186
+ if hp.nil?
187
+ logger.error("The node ID is not bound to HostPool yet: #{node_id}")
188
+ return []
189
+ end
173
190
  hps = Models::HostPool.where(:account_id => hp.account_id).all
174
191
  inst_on_hp = hps.map { |hp|
175
192
  inst_on_hp = hp.instances_dataset.lives.all.map { |inst|
176
- inst.to_hash
193
+ inst_map = inst.to_hash
194
+ # Does the hva have instance?
195
+ next unless inst_map[:host_pool][:node_id] == node_id
196
+ inst_map
177
197
  }
178
198
  }.flatten.uniq.compact
179
199
  inst_on_hp
@@ -7,7 +7,7 @@ module Dcmgr
7
7
  include Dcmgr::Logger
8
8
 
9
9
  initialize_hook do
10
- @thread_pool = Isono::ThreadPool.new
10
+ @thread_pool = Isono::ThreadPool.new(1, 'InstanceHA')
11
11
  event = Isono::NodeModules::EventChannel.new(node)
12
12
  event.subscribe('hva/fault_instance', '#') { |args|
13
13
  @thread_pool.pass {
@@ -0,0 +1,70 @@
1
+ # -*- coding: utf-8 -*-
2
+ module Dcmgr
3
+ module NodeModules
4
+ class InstanceMonitor < Isono::NodeModules::Base
5
+ include Dcmgr::Rpc::KvmHelper
6
+ include Dcmgr::Logger
7
+
8
+ initialize_hook do
9
+ @thread_pool = Isono::ThreadPool.new(1, 'InstanceMonitor')
10
+ @monitor = EventMachine::PeriodicTimer.new(5) {
11
+ next if @thread_pool.queue.size > 0
12
+ @thread_pool.pass {
13
+ myinstance.check_instance
14
+ }
15
+ }
16
+ end
17
+
18
+ terminate_hook do
19
+ @monitor.cancel
20
+ @thread_pool.shutdown
21
+ end
22
+
23
+ def check_instance()
24
+ instlst = rpc.request('hva-collector', 'get_alive_instances', manifest.node_id)
25
+ instlst.find_all{|i| i[:state] == 'running' }.each { |i|
26
+ begin
27
+ check_kvm_process(i)
28
+ rescue Exception => e
29
+ if i[:status] == 'online'
30
+ logger.error("#{e.class}, #{e.message}")
31
+
32
+ rpc.request('hva-collector', 'update_instance', i[:uuid], {:status=>:offline}) { |req|
33
+ req.oneshot = true
34
+ }
35
+ event.publish('hva/fault_instance', :args=>[i[:uuid]])
36
+ end
37
+ next
38
+ end
39
+
40
+ if i[:status] != 'online'
41
+ rpc.request('hva-collector', 'update_instance', i[:uuid], {:status=>:online}) { |req|
42
+ req.oneshot = true
43
+ }
44
+ end
45
+ }
46
+ end
47
+
48
+ private
49
+ def check_kvm_process(i)
50
+ kvm_pid_path = File.expand_path("#{i[:uuid]}/kvm.pid", node.manifest.config.vm_data_dir)
51
+ unless File.exists?(kvm_pid_path)
52
+ raise "Unable to find the kvm.pid file: #{i[:uuid]}"
53
+ end
54
+ pid = File.read(kvm_pid_path).to_i
55
+ unless File.exists?(File.expand_path(pid.to_s, '/proc'))
56
+ raise "Unable to find the pid of kvm process: #{pid}"
57
+ end
58
+ end
59
+
60
+ def rpc
61
+ @rpc ||= Isono::NodeModules::RpcChannel.new(@node)
62
+ end
63
+
64
+ def event
65
+ @event ||= Isono::NodeModules::EventChannel.new(@node)
66
+ end
67
+ end
68
+
69
+ end
70
+ end
@@ -0,0 +1,914 @@
1
+ # -*- coding: utf-8 -*-
2
+ require 'isono'
3
+ require 'ipaddress'
4
+
5
+ module Dcmgr
6
+ module NodeModules
7
+
8
+ module Bandwidth
9
+ include Dcmgr::Helpers::NicHelper
10
+ include Dcmgr::Logger
11
+
12
+ def clear_bandwidth_limits
13
+ logger.debug "Removing all bandwidth limits"
14
+ "tc qdisc del dev #{find_nic(@node.manifest.config.hv_ifindex)} root"
15
+ end
16
+
17
+ def limit_bandwidth(networks)
18
+ bandwidth_cmd = []
19
+ #raise ArgumentError unless inst_maps.is_a?(Hash)
20
+ nic = find_nic(@node.manifest.config.hv_ifindex)
21
+
22
+ #Determine the physical nic's peed in Mbit/s
23
+ speed = %x{ethtool #{nic} | grep Speed | cut -d ' ' -f2}.chomp.to_i
24
+
25
+ #Set up root disc
26
+ bandwidth_cmd << "tc qdisc add dev #{nic} root handle 1: htb"
27
+ bandwidth_cmd << "tc class add dev #{nic} parent 1: classid 1:1 htb rate #{speed}mbit ceil #{speed}mbit"
28
+
29
+ networks.each { |nw|
30
+ next if nw[:bandwidth].nil?
31
+
32
+ logger.debug "Limiting bandwidth to #{nw[:bandwidth]}Mbit/s for #{nw[:uuid]}."
33
+
34
+ #Set up the bandwidth limit for this network
35
+ bandwidth_cmd << "tc class add dev #{nic} parent 1:1 classid 1:1#{nw[:bandwidth_mark]} htb rate #{nw[:bandwidth]}mbit ceil #{nw[:bandwidth]}mbit prio 1"
36
+ bandwidth_cmd << "tc qdisc add dev #{nic} parent 1:1#{nw[:bandwidth_mark]} handle 1#{nw[:bandwidth_mark]}: sfq perturb 10"
37
+ bandwidth_cmd << "tc filter add dev #{nic} protocol ip parent 1: prio 1 handle #{nw[:bandwidth_mark]} fw classid 1:1#{nw[:bandwidth_mark]}"
38
+
39
+ #Mark the packets passing through this network
40
+ ["s","d"].each { |x| bandwidth_cmd << "iptables -A FORWARD -#{x} #{nw[:ipv4_gw]}/#{nw[:prefix]} -j MARK --set-mark 0x#{nw[:bandwidth_mark]}" }
41
+ }
42
+
43
+ bandwidth_cmd
44
+ end
45
+ end
46
+
47
+ module Nat
48
+ include Dcmgr::Helpers::NicHelper
49
+ include Dcmgr::Logger
50
+
51
+ # Takes an instance and nats it.
52
+ # If the instance is in a network that has a nat_network mapped to it,
53
+ # it will receive a second ip lease for that network. This lease will then be
54
+ # natted to the ip the instance already had in its own network.
55
+ # For example if 192.168.0.0/24 is natted to 172.16.0.0/16, then
56
+ # an instance with ip 192.168.0.10 might be natted to ip 172.16.46.23.
57
+ def nat_instance(inst_map)
58
+ nat_cmd = []
59
+ raise ArgumentError unless inst_map.is_a?(Hash)
60
+
61
+ inst_map[:instance_nics].each {
62
+ |nic|
63
+ nat_ips = rpc.request('hva-collector', 'get_nat_leases', nic[:uuid]).map {|ip| IPAddress(ip)}
64
+
65
+ #Get the internal ip for this nic
66
+ internal_ip = IPAddress rpc.request('hva-collector', 'get_iplease_for_nic', nic[:uuid])
67
+ inside_exception_ips = rpc.request('hva-collector','get_group_instance_ipv4s',inst_map[:uuid]).map {|ip| IPAddress(ip)}
68
+ outside_exception_ips = rpc.request('hva-collector','get_group_instance_ipv4s',inst_map[:uuid],:outside).map {|ip| IPAddress(ip)}
69
+
70
+ #output the commands to nat this nic and answer arp requests for its outside ip
71
+ friend_ipset = inst_map[:uuid] + "_friend_ips"
72
+ nat_ips.each { |external_ip|
73
+ if @node.manifest.config.use_ipset
74
+
75
+ nat_cmd << "ipset -N #{friend_ipset} iphash"
76
+
77
+ inside_exception_ips.each { |ex_ip|
78
+ nat_cmd << "ipset -A #{friend_ipset} #{ex_ip.address}"
79
+ }
80
+
81
+ # The good rules that use ipset
82
+ postrouting_command = "iptables -t nat -A POSTROUTING -s #{internal_ip.address} -m set ! --match-set #{friend_ipset} dst"
83
+ prerouting_command = "iptables -t nat -A PREROUTING -d #{external_ip.address} -m set ! --match-set #{friend_ipset} src"
84
+ else
85
+ # The ugly rules to use in case ipset is not installed
86
+ postrouting_command = "iptables -t nat -A POSTROUTING -s #{internal_ip.address}"
87
+ prerouting_command = "iptables -t nat -A PREROUTING -d #{external_ip.address}"
88
+ end
89
+
90
+ # Build the final nat rules and log any packets that traverse them
91
+ nat_cmd << postrouting_command + " -j LOG --log-prefix 'Snat '"
92
+ nat_cmd << postrouting_command + " -j SNAT --to #{external_ip.address}"
93
+
94
+ nat_cmd << prerouting_command + " -j LOG --log-prefix 'Dnat '"
95
+ nat_cmd << prerouting_command + " -j DNAT --to #{internal_ip.address}"
96
+
97
+ logger.debug "Natting #{internal_ip.address} to #{external_ip.address}"
98
+
99
+ nat_cmd << arp_respond(external_ip)
100
+ }
101
+ }
102
+
103
+ nat_cmd
104
+ end
105
+
106
+ def nat_exceptions(inst_map)
107
+ inside_exception_ips = rpc.request('hva-collector','get_group_instance_ipv4s',inst_map[:uuid]).map {|ip| IPAddress(ip)}
108
+ outside_exception_ips = rpc.request('hva-collector','get_group_instance_ipv4s',inst_map[:uuid],:outside).map {|ip| IPAddress(ip)}
109
+
110
+ cmds = []
111
+ inst_map[:instance_nics].each { |nic|
112
+ internal_ip = IPAddress(rpc.request('hva-collector', 'get_iplease_for_nic', nic[:uuid]))
113
+ inside_exception_ips.each { |ex_ip|
114
+ cmds << "iptables -t nat -A POSTROUTING -s #{internal_ip.address} -d #{ex_ip.address}/#{ex_ip.prefix} -j ACCEPT"
115
+ }
116
+ outside_exception_ips.each { |ex_ip|
117
+ cmds << "iptables -t nat -A PREROUTING -s #{internal_ip.address} -d #{ex_ip.address}/#{ex_ip.prefix} -j ACCEPT"
118
+ }
119
+ }
120
+
121
+ cmds
122
+ end
123
+
124
+ # Returns ebtables command to respond to ARP requests for the address _ip_.
125
+ def arp_respond(ip)
126
+ ip = IPAddress(ip) if ip.is_a?(String)
127
+ raise "Invalid IP address: #{ip}" unless ip.is_a?(IPAddress)
128
+
129
+ #Get the mac address for our physical nic
130
+ nic = find_nic(@node.manifest.config.hv_ifindex)
131
+ #TODO: Find a prettier way to get the mac address
132
+ mac_addr = %x{ifconfig | grep '#{nic}' | tr -s ' ' | cut -d ' ' -f5}.chomp
133
+
134
+ logger.debug "Replying ARP requests for address: #{ip.address}"
135
+
136
+ "ebtables -t nat -A PREROUTING -p arp --arp-ip-dst #{ip.address} --arp-opcode REQUEST -j arpreply --arpreply-mac #{mac_addr}"
137
+ end
138
+
139
+ def is_natted_ip?(ip)
140
+ ip = IPAddress(ip) if ip.is_a?(String)
141
+ #TODO: put in a proper argumenterror here
142
+ raise "Invalid IP address: #{ip}" unless ip.is_a?(IPAddress)
143
+
144
+ rpc.request('hva-collector', 'is_natted_ip?', ip.address)
145
+ end
146
+ end
147
+
148
+ class ServiceNetfilter < Isono::NodeModules::Base
149
+ include Dcmgr::Logger
150
+ include Dcmgr::Helpers::NicHelper
151
+ include Nat
152
+ include Bandwidth
153
+
154
+ initialize_hook do
155
+ @worker_thread = Isono::ThreadPool.new(1, 'Netfilter')
156
+
157
+ @worker_thread.pass {
158
+ myinstance.init_netfilter
159
+ }
160
+
161
+ event = Isono::NodeModules::EventChannel.new(node)
162
+
163
+ event.subscribe('hva/instance_started', '#') do |args|
164
+ @worker_thread.pass {
165
+ logger.info("refresh on instance_started: #{args.inspect}")
166
+ inst_id = args[0]
167
+ logger.info("refresh_netfilter_by_friend_instance_id: #{inst_id}")
168
+ myinstance.refresh_netfilter_by_friend_instance_id(inst_id)
169
+ }
170
+ end
171
+
172
+ event.subscribe('hva/instance_terminated', '#') do |args|
173
+ @worker_thread.pass {
174
+ logger.info("refresh on instance_terminated: #{args.inspect}")
175
+ inst_id = args[0]
176
+ logger.info("refresh_netfilter_by_friend_instance_id: #{inst_id}")
177
+ myinstance.refresh_netfilter_by_friend_instance_id(inst_id)
178
+ }
179
+ end
180
+
181
+ event.subscribe('hva/netfilter_updated', '#') do |args|
182
+ @worker_thread.pass {
183
+ logger.info("refresh on netfilter_updated: #{args.inspect}")
184
+ netfilter_group_id = args[0]
185
+ myinstance.refresh_netfilter_by_joined_netfilter_group_id(netfilter_group_id)
186
+ }
187
+ end
188
+ end
189
+
190
+ def init_netfilter
191
+ begin
192
+ inst_maps = rpc.request('hva-collector', 'get_alive_instances', node.node_id)
193
+
194
+ viftable_map = {}
195
+ inst_maps = inst_maps.map { |inst_map|
196
+ viftable_map[ inst_map[:ips].first ] = inst_map[:instance_nics].first[:uuid]
197
+
198
+ # Does the hva have instance?
199
+ unless inst_map[:host_pool][:node_id] == node.node_id
200
+ logger.warn("no match for the instance: #{inst_map[:uuid]}")
201
+ next
202
+ end
203
+ # Does host have vif?
204
+ next unless valid_nic?(inst_map[:instance_nics].first[:uuid])
205
+ inst_maps
206
+ }.flatten.uniq.compact
207
+
208
+ init_iptables(inst_maps) if @node.manifest.config.enable_iptables
209
+ init_ebtables(inst_maps, viftable_map) if @node.manifest.config.enable_ebtables
210
+ init_static_nat(inst_maps) if @node.manifest.config.enable_iptables && @node.manifest.config.enable_ebtables
211
+ init_bandwidth_limit(networks = rpc.request('hva-collector', 'get_networks')) if @node.manifest.config.enable_iptables
212
+ sleep 1
213
+
214
+ logger.info("initialized netfilter")
215
+ rescue Exception => e
216
+ p e
217
+ end
218
+ end
219
+
220
+ # from event_subscriber
221
+ def refresh_netfilter_by_friend_instance_id(inst_id)
222
+ raise "UnknownInstanceID" if inst_id.nil?
223
+
224
+ begin
225
+ ng_maps = rpc.request('hva-collector', 'get_netfilter_groups_of_instance', inst_id)
226
+ # get friend instance(s)
227
+ friend_inst_maps = ng_maps.map { |ng_map|
228
+ rpc.request('hva-collector', 'get_instances_of_netfilter_group', ng_map[:id])
229
+ }.flatten.uniq
230
+ guest_inst_maps = rpc.request('hva-collector', 'get_alive_instances', node.node_id)
231
+
232
+ uuids = friend_inst_maps.map { |inst_map| inst_map[:uuid] } & guest_inst_maps.map { |inst_map| inst_map[:uuid] }
233
+ logger.info("my guest instance(s)?: #{uuids.inspect}")
234
+
235
+ if uuids.flatten.uniq.size > 0
236
+ init_netfilter
237
+ else
238
+ # group_instance: 1->0
239
+ inst_map = rpc.request('hva-collector', 'get_instance', inst_id)
240
+ init_netfilter if inst_map[:host_pool][:node_id] == node.node_id
241
+ end
242
+ rescue Exception => e
243
+ p e
244
+ end
245
+ end
246
+
247
+ # from event_subscriber
248
+ def refresh_netfilter_by_joined_netfilter_group_id(netfilter_group_id)
249
+ raise "UnknownNetfilterGroupID" if netfilter_group_id.nil?
250
+
251
+ begin
252
+ inst_maps = rpc.request('hva-collector', 'get_instances_of_netfilter_group', netfilter_group_id)
253
+ init_netfilter if inst_maps.size > 0
254
+ rescue Exception => e
255
+ p e
256
+ end
257
+ end
258
+
259
+ def build_vif_map(inst_map = {})
260
+ vif_map = {
261
+ :uuid => inst_map[:instance_nics].first[:uuid],
262
+ :mac => inst_map[:instance_nics].first[:mac_addr].unpack('A2'*6).join(':'),
263
+ :ipv4 => inst_map[:ips].first,
264
+ }
265
+ end
266
+
267
+ def protocol_map(type)
268
+ case type
269
+ when :iptables
270
+ {
271
+ 'tcp' => 'tcp',
272
+ 'udp' => 'udp',
273
+ 'icmp' => 'icmp',
274
+ }
275
+ when :ebtables
276
+ {
277
+ 'ip4' => 'ip4',
278
+ 'arp' => 'arp',
279
+ #'ip6' => 'ip6',
280
+ #'rarp' => '0x8035',
281
+ }
282
+ end
283
+ end
284
+
285
+ def do_exec(cmds)
286
+ recmds = []
287
+
288
+ eos = "__EOS_#{Isono::Util.gen_id}___"
289
+ recmds << "/bin/cat <<'#{eos}' | /bin/bash"
290
+ cmds.flatten.uniq.each { |cmd|
291
+ puts cmd if @node.manifest.config.verbose_netfilter == true
292
+ recmds << cmd
293
+ }
294
+ recmds << "#{eos}"
295
+
296
+ logger.debug("applying rule line(s): #{recmds.size - 2}")
297
+ system(recmds.join("\n"))
298
+ logger.debug("applied rule line(s): #{recmds.size - 2}")
299
+ end
300
+
301
+ def init_ebtables(inst_maps = [], viftable_map = {})
302
+ init_cmds = []
303
+ basic_cmds = []
304
+ group_cmds = []
305
+ nat_cmds = []
306
+ final_cmds = []
307
+
308
+ init_cmds << "ebtables --init-table"
309
+ #Clear the nat table. This table is only used in build_ebtables_nat_part
310
+ init_cmds << "ebtables -t nat --init-table"
311
+
312
+ inst_maps.each { |inst_map|
313
+ vif_map = build_vif_map(inst_map)
314
+
315
+ basic_cmds << build_ebtables_basic_part(vif_map, inst_map)
316
+ group_cmds << build_ebtables_group_part(vif_map, inst_map, viftable_map)
317
+ final_cmds << build_ebtables_final_part(vif_map)
318
+ }
319
+
320
+ viftable_map.each { |k,v|
321
+ logger.debug("viftable: #{v} <=> #{k}")
322
+ }
323
+
324
+ do_exec([init_cmds, basic_cmds, group_cmds, final_cmds])
325
+ end
326
+
327
+ def init_iptables(inst_maps = [])
328
+ init_cmds = []
329
+ basic_cmds = []
330
+ group_cmds = []
331
+ nat_cmds = []
332
+ final_cmds = []
333
+
334
+ [ 'raw', 'nat', 'filter' ].each { |table|
335
+ [ 'F', 'Z', 'X' ].each { |xcmd|
336
+ init_cmds << "iptables -t #{table} -#{xcmd}"
337
+ }
338
+ }
339
+
340
+ #TODO: Make an option in the config file to use ipset
341
+ use_ipset = true
342
+ if use_ipset
343
+ ['F','X'].each { |xcmd|
344
+ init_cmds << "ipset -#{xcmd}"
345
+ }
346
+ end
347
+
348
+ # via http://backreference.org/2010/06/11/iptables-debugging/
349
+ # To debug ipv4 packets.
350
+ # $ sudo tail -F /var/log/kern.log | grep TRACE:
351
+ if @node.manifest.config.debug_iptables
352
+ init_cmds << "iptables -t raw -A OUTPUT -p icmp -j TRACE"
353
+ init_cmds << "iptables -t raw -A PREROUTING -p icmp -j TRACE"
354
+ end
355
+
356
+ inst_maps.each { |inst_map|
357
+ vif_map = build_vif_map(inst_map)
358
+
359
+ basic_cmds << build_iptables_basic_part(vif_map, inst_map)
360
+ group_cmds << build_iptables_group_part(vif_map, inst_map)
361
+ final_cmds << build_iptables_final_part(vif_map)
362
+ }
363
+
364
+ do_exec([init_cmds, basic_cmds, group_cmds, final_cmds])
365
+ end
366
+
367
+ def init_static_nat(inst_maps = [])
368
+ accept_cmds = []
369
+ nat_cmds = []
370
+
371
+ inst_maps.each { |inst_map|
372
+ accept_cmds << nat_exceptions(inst_map) unless @node.manifest.config.use_ipset
373
+ nat_cmds << nat_instance(inst_map)
374
+ }
375
+
376
+ do_exec([accept_cmds,nat_cmds])
377
+ end
378
+
379
+ def init_bandwidth_limit(network_maps)
380
+ do_exec([clear_bandwidth_limits,limit_bandwidth(network_maps)])
381
+ end
382
+
383
+ def build_ebtables_basic_part(vif_map, inst_map)
384
+ basic_cmds = []
385
+ hva_ipv4 = Isono::Util.default_gw_ipaddr
386
+
387
+ ################################
388
+ ## 0. chain name
389
+ ################################
390
+
391
+ # support IP protocol
392
+ protocol_map = protocol_map(:ebtables)
393
+
394
+ # make chain names.
395
+ chains = []
396
+ chains << "s_#{vif_map[:uuid]}"
397
+ chains << "d_#{vif_map[:uuid]}"
398
+ chains << "s_#{vif_map[:uuid]}_d_hst"
399
+ chains << "d_#{vif_map[:uuid]}_s_hst"
400
+ protocol_map.each { |k,v|
401
+ chains << "s_#{vif_map[:uuid]}_#{k}"
402
+ chains << "d_#{vif_map[:uuid]}_#{k}"
403
+ chains << "s_#{vif_map[:uuid]}_d_hst_#{k}"
404
+ chains << "d_#{vif_map[:uuid]}_s_hst_#{k}"
405
+ }
406
+
407
+ ################################
408
+ ## 1. basic part
409
+ ################################
410
+
411
+ # create user defined chains.
412
+ [ 'N' ].each { |xcmd|
413
+ chains.each { |chain|
414
+ basic_cmds << "ebtables -#{xcmd} #{chain}"
415
+ }
416
+ }
417
+
418
+ # jumt to user defined chains
419
+ basic_cmds << "ebtables -A FORWARD -i #{vif_map[:uuid]} -j s_#{vif_map[:uuid]}"
420
+ basic_cmds << "ebtables -A FORWARD -o #{vif_map[:uuid]} -j d_#{vif_map[:uuid]}"
421
+ basic_cmds << "ebtables -A INPUT -i #{vif_map[:uuid]} -j s_#{vif_map[:uuid]}_d_hst"
422
+ basic_cmds << "ebtables -A OUTPUT -o #{vif_map[:uuid]} -j d_#{vif_map[:uuid]}_s_hst"
423
+
424
+ # IP protocol routing
425
+ protocol_map.each { |k,v|
426
+ basic_cmds << "ebtables -A s_#{vif_map[:uuid]} -p #{v} -j s_#{vif_map[:uuid]}_#{k}"
427
+ basic_cmds << "ebtables -A d_#{vif_map[:uuid]} -p #{v} -j d_#{vif_map[:uuid]}_#{k}"
428
+ basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst -p #{v} -j s_#{vif_map[:uuid]}_d_hst_#{k}"
429
+ basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_s_hst -p #{v} -j d_#{vif_map[:uuid]}_s_hst_#{k}"
430
+ }
431
+
432
+ if @node.manifest.config.packet_drop_log
433
+ basic_cmds << "ebtables -A s_#{vif_map[:uuid]} --log-level 4 --log-ip --log-arp --log-prefix 'D s_#{vif_map[:uuid]}:' -j CONTINUE"
434
+ basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst --log-level 4 --log-ip --log-arp --log-prefix 'D s_#{vif_map[:uuid]}_d_hst:' -j CONTINUE"
435
+ end
436
+ basic_cmds << "ebtables -A s_#{vif_map[:uuid]} -j DROP"
437
+ basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst -j DROP"
438
+ # anti spoof: mac # guest -> *
439
+ basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_arp --protocol arp --arp-mac-src ! #{vif_map[:mac]} --log-ip --log-arp --log-prefix 'Dmc s_#{vif_map[:uuid]}_arp:' -j CONTINUE"
440
+ basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-mac-src ! #{vif_map[:mac]} --log-ip --log-arp --log-prefix 'Dmc s_#{vif_map[:uuid]}_d_hst_arp:' -j CONTINUE"
441
+ basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_arp --protocol arp --arp-mac-src ! #{vif_map[:mac]} -j DROP"
442
+ basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-mac-src ! #{vif_map[:mac]} -j DROP"
443
+ # guest <- * (broadcast)
444
+ basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-mac-dst 00:00:00:00:00:00 --log-ip --log-arp --log-prefix 'Amc d_#{vif_map[:uuid]}_arp:' -j CONTINUE"
445
+ basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_s_hst_arp --protocol arp --arp-ip-src=#{hva_ipv4} --arp-mac-dst 00:00:00:00:00:00 --log-ip --log-arp --log-prefix 'Amc d_#{vif_map[:uuid]}_hst_arp:' -j CONTINUE"
446
+ basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-mac-dst 00:00:00:00:00:00 -j ACCEPT"
447
+ basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_s_hst_arp --protocol arp --arp-ip-src=#{hva_ipv4} --arp-mac-dst 00:00:00:00:00:00 -j ACCEPT"
448
+
449
+ # guest <- *
450
+ basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-mac-dst ! #{vif_map[:mac]} --log-ip --log-arp --log-prefix 'Dmc d_#{vif_map[:uuid]}_arp:' -j CONTINUE"
451
+ basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_s_hst_arp --protocol arp --arp-mac-dst ! #{vif_map[:mac]} --log-ip --log-arp --log-prefix 'Dmc d_#{vif_map[:uuid]}_s_hst_arp:' -j CONTINUE"
452
+ basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-mac-dst ! #{vif_map[:mac]} -j DROP"
453
+ basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_s_hst_arp --protocol arp --arp-mac-dst ! #{vif_map[:mac]} -j DROP"
454
+
455
+ # anti spoof: ipv4
456
+ inst_map[:ips].each { |ipv4|
457
+ #next if is_natted_ip? ipv4
458
+ # guest -> *
459
+ basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-src ! #{ipv4} --log-ip --log-arp --log-prefix 'Dip s_#{vif_map[:uuid]}_arp:' -j CONTINUE"
460
+ basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-ip-src ! #{ipv4} --log-ip --log-arp --log-prefix 'Dip s_#{vif_map[:uuid]}_d_hst_arp:' -j CONTINUE"
461
+ basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-src ! #{ipv4} -j DROP"
462
+ basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-ip-src ! #{ipv4} -j DROP"
463
+ # guest <- *
464
+ basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-dst ! #{ipv4} --log-ip --log-arp --log-prefix 'Dip d_#{vif_map[:uuid]}_arp:' -j CONTINUE"
465
+ basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_s_hst_arp --protocol arp --arp-ip-dst ! #{ipv4} --log-ip --log-arp --log-prefix 'Dip d_#{vif_map[:uuid]}_s_hst_arp:' -j CONTINUE"
466
+ basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-dst ! #{ipv4} -j DROP"
467
+ basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_s_hst_arp --protocol arp --arp-ip-dst ! #{ipv4} -j DROP"
468
+ }
469
+
470
+ basic_cmds
471
+ end
472
+
473
+ def build_ebtables_group_part(vif_map, inst_map, viftable_map)
474
+ group_cmds = []
475
+ hva_ipv4 = Isono::Util.default_gw_ipaddr
476
+
477
+ ################################
478
+ ## 2. group part
479
+ ################################
480
+ same_subnet_ipv4s = rpc.request('hva-collector', 'get_group_instance_ipv4s', inst_map[:uuid])
481
+
482
+ network_map = rpc.request('hva-collector', 'get_network', inst_map[:instance_nics].first[:network_id])
483
+ raise "UnknownNetworkId" if network_map.nil?
484
+ joined_network = IPAddress("#{network_map[:ipv4_gw]}/#{network_map[:prefix]}")
485
+
486
+ [ network_map[:dns_server], network_map[:dhcp_server] ].each { |ipv4|
487
+ next unless joined_network.include? IPAddress(ipv4)
488
+ same_subnet_ipv4s << ipv4
489
+ }
490
+
491
+ # network resource node(s)
492
+ ng_maps = rpc.request('hva-collector', 'get_netfilter_groups_of_instance', inst_map[:uuid])
493
+ rules = ng_maps.map { |ng_map|
494
+ ng_map[:rules].map { |rule| rule[:permission] }
495
+ }.flatten
496
+ build_rule(rules).each do |rule|
497
+ begin
498
+ # <ArgumentError: Invalid IP "0.0.0.0">
499
+ next unless joined_network.include? IPAddress(rule[:ip_source])
500
+ same_subnet_ipv4s << rule[:ip_source]
501
+ rescue Exception => e
502
+ p e
503
+ end
504
+ end
505
+ same_subnet_ipv4s << network_map[:ipv4_gw]
506
+
507
+ # guest node(s) in HyperVisor.
508
+ alive_inst_maps = rpc.request('hva-collector', 'get_alive_instances', node.node_id)
509
+ guest_ipv4s = alive_inst_maps.map { |alive_inst_map|
510
+ alive_inst_map[:ips]
511
+ }.flatten.uniq.compact
512
+
513
+ same_subnet_ipv4s.uniq.reverse_each do |ipv4|
514
+ next if vif_map[:ipv4] == ipv4
515
+
516
+ # get_macaddr_by_ipv4, ipv4
517
+ if ipv4 == hva_ipv4
518
+ #p "#{vif_map[:uuid]}(#{vif_map[:ipv4]}) -> [host] ***-****** (#{ipv4})"
519
+ group_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-ip-src #{vif_map[:ipv4]} --arp-ip-dst #{ipv4} --log-ip --log-arp --log-prefix 'Afw s_#{vif_map[:uuid]}_d_hst_arp:' -j CONTINUE"
520
+ group_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-ip-src #{vif_map[:ipv4]} --arp-ip-dst #{ipv4} -j ACCEPT"
521
+ elsif guest_ipv4s.include?(ipv4)
522
+ #p "#{vif_map[:uuid]}(#{vif_map[:ipv4]}) -> [guest] #{viftable_map[ipv4]}(#{ipv4})"
523
+
524
+ # guest->guest
525
+ group_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-src #{ipv4} --arp-ip-dst #{vif_map[:ipv4]} --log-ip --log-arp --log-prefix 'Afw d_#{vif_map[:uuid]}_arp:' -j CONTINUE"
526
+ group_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-src #{ipv4} --arp-ip-dst #{vif_map[:ipv4]} -j ACCEPT"
527
+ # guest->host
528
+ group_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-ip-src #{vif_map[:ipv4]} --arp-ip-dst #{ipv4} --log-ip --log-arp --log-prefix 'Afw s_#{vif_map[:uuid]}_d_hst_arp:' -j CONTINUE"
529
+ group_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-ip-src #{vif_map[:ipv4]} --arp-ip-dst #{ipv4} -j ACCEPT"
530
+
531
+ unless viftable_map[ipv4].nil?
532
+ # guest->guest
533
+ group_cmds << "ebtables -A d_#{viftable_map[ipv4]}_arp --protocol arp --arp-ip-src #{vif_map[:ipv4]} --arp-ip-dst #{ipv4} --log-ip --log-arp --log-prefix 'Arv d_#{viftable_map[ipv4]}_arp:' -j CONTINUE"
534
+ group_cmds << "ebtables -A d_#{viftable_map[ipv4]}_arp --protocol arp --arp-ip-src #{vif_map[:ipv4]} --arp-ip-dst #{ipv4} -j ACCEPT"
535
+
536
+ # guest->host
537
+ group_cmds << "ebtables -A s_#{viftable_map[ipv4]}_d_hst_arp --protocol arp --arp-ip-src #{ipv4} --arp-ip-dst #{vif_map[:ipv4]} --log-ip --log-arp --log-prefix 'Arv s_#{viftable_map[ipv4]}_d_hst_arp:' -j CONTINUE"
538
+ group_cmds << "ebtables -A s_#{viftable_map[ipv4]}_d_hst_arp --protocol arp --arp-ip-src #{ipv4} --arp-ip-dst #{vif_map[:ipv4]} -j ACCEPT"
539
+ end
540
+ else
541
+ #p "#{vif_map[:uuid]}(#{vif_map[:ipv4]}) -> [other] ***-******** (#{ipv4})"
542
+ group_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-src #{ipv4} --arp-ip-dst #{vif_map[:ipv4]} --log-ip --log-arp --log-prefix 'Afw :d_#{vif_map[:uuid]}_arp' -j CONTINUE"
543
+ group_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-src #{ipv4} --arp-ip-dst #{vif_map[:ipv4]} -j ACCEPT"
544
+ end
545
+ end
546
+
547
+ group_cmds
548
+ end
549
+ def build_ebtables_final_part(vif_map)
550
+ final_cmds = []
551
+
552
+ ################################
553
+ ## 3. final part
554
+ ################################
555
+ # deny,allow
556
+ final_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --log-level 4 --log-ip --log-arp --log-prefix 'D d_#{vif_map[:uuid]}_arp:' -j CONTINUE"
557
+ final_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --log-level 4 --log-ip --log-arp --log-prefix 'D s_#{vif_map[:uuid]}_d_hst_arp:' -j CONTINUE"
558
+ final_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp -j DROP"
559
+ final_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp -j DROP"
560
+
561
+ final_cmds
562
+ end
563
+
564
+ def build_iptables_basic_part(vif_map, inst_map)
565
+ basic_cmds = []
566
+
567
+ network_map = rpc.request('hva-collector', 'get_network', inst_map[:instance_nics].first[:network_id])
568
+ raise "UnknownNetworkId" if network_map.nil?
569
+
570
+ ################################
571
+ ## 0. chain name
572
+ ################################
573
+
574
+ # support IP protocol
575
+ protocol_map = protocol_map(:iptables)
576
+
577
+ # make chain names.
578
+ chains = []
579
+ protocol_map.each { |k,v|
580
+ chains << "s_#{vif_map[:uuid]}_#{k}"
581
+ chains << "d_#{vif_map[:uuid]}_#{k}"
582
+ }
583
+ chains << "s_#{vif_map[:uuid]}"
584
+ chains << "d_#{vif_map[:uuid]}"
585
+
586
+ ################################
587
+ ## 1. basic part
588
+ ################################
589
+
590
+ # metadata-server
591
+ port = network_map[:metadata_server_port] || 80
592
+ [ 'A' ].each { |xcmd|
593
+ basic_cmds << "iptables -t nat -#{xcmd} PREROUTING -m physdev --physdev-in #{vif_map[:uuid]} -d 169.254.169.254 -p tcp --dport 80 -j DNAT --to-destination #{network_map[:metadata_server]}:#{port}"
594
+ }
595
+ # create user defined chains.
596
+ [ 'N' ].each { |xcmd|
597
+ chains.each { |chain|
598
+ basic_cmds << "iptables -#{xcmd} #{chain}"
599
+
600
+ # logger & drop
601
+ basic_cmds << "iptables -N #{chain}_drop"
602
+ if @node.manifest.config.packet_drop_log
603
+ basic_cmds << "iptables -A #{chain}_drop -j LOG --log-level 4 --log-prefix 'D #{chain}:'"
604
+ end
605
+ basic_cmds << "iptables -A #{chain}_drop -j DROP"
606
+ }
607
+ }
608
+
609
+ # DHCP Server
610
+ basic_cmds << "iptables -A d_#{vif_map[:uuid]}_udp -p udp ! -s #{network_map[:dhcp_server]} --sport 67 -j d_#{vif_map[:uuid]}_udp_drop"
611
+ basic_cmds << "iptables -A d_#{vif_map[:uuid]}_udp -p udp ! -s #{network_map[:dhcp_server]} --sport 68 -j d_#{vif_map[:uuid]}_udp_drop"
612
+
613
+ # group nodes
614
+ # group node IPv4 addresses.
615
+ ipv4s = rpc.request('hva-collector', 'get_group_instance_ipv4s', inst_map[:uuid])
616
+ ipv4s << network_map[:ipv4_gw]
617
+ ipv4s.uniq.reverse_each { |addr|
618
+ basic_cmds << "iptables -A d_#{vif_map[:uuid]} -s #{addr} -j ACCEPT"
619
+ }
620
+
621
+ # IP protocol routing
622
+ [ 's', 'd' ].each do |bound|
623
+ protocol_map.each { |k,v|
624
+ basic_cmds << "iptables -N #{bound}_#{vif_map[:uuid]}_#{k}"
625
+
626
+ case k
627
+ when 'tcp'
628
+ case bound
629
+ when 's'
630
+ basic_cmds << "iptables -A #{bound}_#{vif_map[:uuid]} -m state --state NEW,ESTABLISHED -p #{k} -j #{bound}_#{vif_map[:uuid]}_#{k}"
631
+ when 'd'
632
+ basic_cmds << "iptables -A #{bound}_#{vif_map[:uuid]} -m state --state RELATED,ESTABLISHED -p #{k} -j ACCEPT"
633
+ basic_cmds << "iptables -A #{bound}_#{vif_map[:uuid]} -p #{k} -j #{bound}_#{vif_map[:uuid]}_#{k}"
634
+ end
635
+ when 'udp'
636
+ case bound
637
+ when 's'
638
+ basic_cmds << "iptables -A #{bound}_#{vif_map[:uuid]} -m state --state NEW,ESTABLISHED -p #{k} -j #{bound}_#{vif_map[:uuid]}_#{k}"
639
+ when 'd'
640
+ basic_cmds << "iptables -A #{bound}_#{vif_map[:uuid]} -m state --state ESTABLISHED -p #{k} -j ACCEPT"
641
+ basic_cmds << "iptables -A #{bound}_#{vif_map[:uuid]} -p #{k} -j #{bound}_#{vif_map[:uuid]}_#{k}"
642
+ end
643
+ when 'icmp'
644
+ case bound
645
+ when 's'
646
+ basic_cmds << "iptables -A #{bound}_#{vif_map[:uuid]} -m state --state NEW,ESTABLISHED,RELATED -p #{k} -j #{bound}_#{vif_map[:uuid]}_#{k}"
647
+ when 'd'
648
+ basic_cmds << "iptables -A #{bound}_#{vif_map[:uuid]} -m state --state ESTABLISHED,RELATED -p #{k} -j ACCEPT"
649
+ basic_cmds << "iptables -A #{bound}_#{vif_map[:uuid]} -p #{k} -j #{bound}_#{vif_map[:uuid]}_#{k}"
650
+ end
651
+ end
652
+ }
653
+ end
654
+
655
+ basic_cmds << "iptables -A FORWARD -m physdev --physdev-is-bridged --physdev-in #{vif_map[:uuid]} -j s_#{vif_map[:uuid]}"
656
+ basic_cmds << "iptables -A FORWARD -m physdev --physdev-is-bridged --physdev-out #{vif_map[:uuid]} -j d_#{vif_map[:uuid]}"
657
+
658
+ ##
659
+ ## ACCEPT
660
+ ##
661
+ # DHCP Server
662
+ basic_cmds << "iptables -A d_#{vif_map[:uuid]}_udp -p udp -s #{network_map[:dhcp_server]} --sport 67 -j ACCEPT"
663
+ basic_cmds << "iptables -A d_#{vif_map[:uuid]}_udp -p udp -s #{network_map[:dhcp_server]} --sport 68 -j ACCEPT"
664
+
665
+ # DNS Server
666
+ basic_cmds << "iptables -A s_#{vif_map[:uuid]}_udp -p udp -d #{network_map[:dns_server]} --dport 53 -j ACCEPT"
667
+
668
+ ##
669
+ ## DROP
670
+ ##
671
+ protocol_map.each { |k,v|
672
+ # DHCP
673
+ basic_cmds << "iptables -A s_#{vif_map[:uuid]} -d #{network_map[:dhcp_server]} -p #{k} -j s_#{vif_map[:uuid]}_#{k}_drop"
674
+ # DNS
675
+ basic_cmds << "iptables -A s_#{vif_map[:uuid]} -d #{network_map[:dns_server]} -p #{k} -j s_#{vif_map[:uuid]}_#{k}_drop"
676
+ }
677
+
678
+ basic_cmds
679
+ end
680
+
681
+ def build_iptables_group_part(vif_map, inst_map)
682
+ group_cmds = []
683
+
684
+ ################################
685
+ ## 2. group part
686
+ ################################
687
+ ng_maps = rpc.request('hva-collector', 'get_netfilter_groups_of_instance', inst_map[:uuid])
688
+ rules = ng_maps.map { |ng_map|
689
+ ng_map[:rules].map { |rule| rule[:permission] }
690
+ }.flatten
691
+
692
+ # security group
693
+ build_rule(rules).each do |rule|
694
+ case rule[:ip_protocol]
695
+ when 'tcp', 'udp'
696
+ if rule[:ip_fport] == rule[:ip_tport]
697
+ group_cmds << "iptables -A d_#{vif_map[:uuid]}_#{rule[:ip_protocol]} -p #{rule[:ip_protocol]} -s #{rule[:ip_source]} --dport #{rule[:ip_fport]} -j ACCEPT"
698
+ else
699
+ group_cmds << "iptables -A d_#{vif_map[:uuid]}_#{rule[:ip_protocol]} -p #{rule[:ip_protocol]} -s #{rule[:ip_source]} --dport #{rule[:ip_fport]}:#{rule[:ip_tport]} -j ACCEPT"
700
+ end
701
+ when 'icmp'
702
+ # icmp
703
+ # This extension can be used if `--protocol icmp' is specified. It provides the following option:
704
+ # [!] --icmp-type {type[/code]|typename}
705
+ # This allows specification of the ICMP type, which can be a numeric ICMP type, type/code pair, or one of the ICMP type names shown by the command
706
+ # iptables -p icmp -h
707
+ if rule[:icmp_type] == -1 && rule[:icmp_code] == -1
708
+ group_cmds << "iptables -A d_#{vif_map[:uuid]}_#{rule[:ip_protocol]} -p #{rule[:ip_protocol]} -s #{rule[:ip_source]} -j ACCEPT"
709
+ else
710
+ group_cmds << "iptables -A d_#{vif_map[:uuid]}_#{rule[:ip_protocol]} -p #{rule[:ip_protocol]} -s #{rule[:ip_source]} --icmp-type #{rule[:icmp_type]}/#{rule[:icmp_code]} -j ACCEPT"
711
+ end
712
+ end
713
+ end
714
+
715
+ group_cmds
716
+ end
717
+
718
+ def build_iptables_final_part(vif_map)
719
+ final_cmds = []
720
+
721
+ # support IP protocol
722
+ protocol_map = protocol_map(:iptables)
723
+
724
+ ################################
725
+ ## 3. final part
726
+ ################################
727
+
728
+ # drop other routings
729
+ protocol_map.each { |k,v|
730
+ final_cmds << "iptables -A d_#{vif_map[:uuid]}_#{k} -p #{k} -j d_#{vif_map[:uuid]}_#{k}_drop"
731
+ }
732
+
733
+ # IP protocol routing
734
+ [ 'd' ].each do |bound|
735
+ protocol_map.each { |k,v|
736
+ final_cmds << "iptables -A #{bound}_#{vif_map[:uuid]}_#{k} -j #{bound}_#{vif_map[:uuid]}_#{k}_drop"
737
+ }
738
+ end
739
+
740
+ final_cmds
741
+ end
742
+
743
+ def build_rule(rules = [])
744
+ rule_maps = []
745
+
746
+ rules.each do |rule|
747
+ rule = rule.strip.gsub(/[\s\t]+/, '')
748
+ from_group = false
749
+ ipv4s = []
750
+
751
+ # ex.
752
+ # "tcp:22,22,ip4:0.0.0.0"
753
+ # "udp:53,53,ip4:0.0.0.0"
754
+ # "icmp:-1,-1,ip4:0.0.0.0"
755
+
756
+ # 1st phase
757
+ # ip_tport : tcp,udp? 1 - 16bit, icmp: -1
758
+ # id_port has been separeted in first phase.
759
+ from_pair, ip_tport, source_pair = rule.split(',')
760
+
761
+ next if from_pair.nil?
762
+ next if ip_tport.nil?
763
+ next if source_pair.nil?
764
+
765
+ # 2nd phase
766
+ # ip_protocol : [ tcp | udp | icmp ]
767
+ # ip_fport : tcp,udp? 1 - 16bit, icmp: -1
768
+ ip_protocol, ip_fport = from_pair.split(':')
769
+
770
+ # protocol : [ ip4 | ip6 | #{account_id} ]
771
+ # ip_source : ip4? xxx.xxx.xxx.xxx./[0-32], ip6? (not yet supprted), #{netfilter_group_id}
772
+ protocol, ip_source = source_pair.split(':')
773
+
774
+ begin
775
+ s = StringScanner.new(protocol)
776
+ until s.eos?
777
+ case
778
+ when s.scan(/ip6/)
779
+ # TODO#FUTURE: support IPv6 address format
780
+ next
781
+ when s.scan(/ip4/)
782
+ # IPAddress doesn't support prefix '0'.
783
+ ip_addr, prefix = ip_source.split('/', 2)
784
+ if prefix.to_i == 0
785
+ ip_source = ip_addr
786
+ end
787
+ when s.scan(/a-\w{8}/)
788
+ from_group = true
789
+ inst_maps = rpc.request('hva-collector', 'get_instances_of_account_netfilter_group', protocol, ip_source)
790
+ inst_maps.each { |inst_map|
791
+ ipv4s << inst_map[:ips]
792
+ }
793
+ else
794
+ raise "unexpected protocol '#{s.peep(20)}'"
795
+ end
796
+ end
797
+ rescue Exception => e
798
+ p e
799
+ next
800
+ end
801
+
802
+ begin
803
+ if from_group == false
804
+ #p "from_group:(#{from_group}) ip_source -> #{ip_source}"
805
+ ip = IPAddress(ip_source)
806
+ ip_source = case ip.u32
807
+ when 0
808
+ "#{ip.address}/0"
809
+ else
810
+ "#{ip.address}/#{ip.prefix}"
811
+ end
812
+ else
813
+ ipv4s = ipv4s.flatten.uniq
814
+ end
815
+ rescue Exception => e
816
+ p e
817
+ next
818
+ end
819
+
820
+ case ip_protocol
821
+ when 'tcp', 'udp'
822
+ ip_fport = ip_fport.to_i
823
+ ip_tport = ip_tport.to_i
824
+
825
+ # validate port range
826
+ [ ip_fport, ip_tport ].each do |port|
827
+ next unless port >= 1 && port <= 65535
828
+ end
829
+
830
+ if ip_fport <= ip_tport
831
+ if from_group == false
832
+ rule_maps << {
833
+ :ip_protocol => ip_protocol,
834
+ :ip_fport => ip_fport,
835
+ :ip_tport => ip_tport,
836
+ :protocol => protocol,
837
+ :ip_source => ip_source,
838
+ }
839
+ else
840
+ ipv4s.each { |ip|
841
+ rule_maps << {
842
+ :ip_protocol => ip_protocol,
843
+ :ip_fport => ip_fport,
844
+ :ip_tport => ip_tport,
845
+ :protocol => 'ip4',
846
+ :ip_source => ip,
847
+ }
848
+ }
849
+ end
850
+ end
851
+ when 'icmp'
852
+ # via http://docs.amazonwebservices.com/AWSEC2/latest/CommandLineReference/
853
+ #
854
+ # For the ICMP protocol, the ICMP type and code must be specified.
855
+ # This must be specified in the format type:code where both are integers.
856
+ # Type, code, or both can be specified as -1, which is a wildcard.
857
+
858
+ icmp_type = ip_fport.to_i
859
+ icmp_code = ip_tport.to_i
860
+
861
+ # icmp_type
862
+ case icmp_type
863
+ when -1
864
+ when 0, 3, 5, 8, 11, 12, 13, 14, 15, 16, 17, 18
865
+ else
866
+ next
867
+ end
868
+
869
+ # icmp_code
870
+ case icmp_code
871
+ when -1
872
+ when 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
873
+ # when icmp_type equals -1 icmp_code must equal -1.
874
+ next if icmp_type == -1
875
+ else
876
+ next
877
+ end
878
+
879
+ if from_group == false
880
+ rule_maps << {
881
+ :ip_protocol => ip_protocol,
882
+ :icmp_type => ip_tport.to_i, # ip_tport.to_i, # -1 or 0, 3, 5, 8, 11, 12, 13, 14, 15, 16, 17, 18
883
+ :icmp_code => ip_fport.to_i, # ip_fport.to_i, # -1 or 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
884
+ :protocol => protocol,
885
+ :ip_source => ip_source,
886
+ }
887
+ else
888
+ ipv4s.each { |ip|
889
+ rule_maps << {
890
+ :ip_protocol => ip_protocol,
891
+ :icmp_type => ip_tport.to_i, # ip_tport.to_i, # -1 or 0, 3, 5, 8, 11, 12, 13, 14, 15, 16, 17, 18
892
+ :icmp_code => ip_fport.to_i, # ip_fport.to_i, # -1 or 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
893
+ :protocol => 'ip4',
894
+ :ip_source => ip,
895
+ }
896
+ }
897
+ end
898
+ end
899
+ end
900
+
901
+ rule_maps
902
+ end
903
+
904
+ def rpc
905
+ @rpc ||= Isono::NodeModules::RpcChannel.new(@node)
906
+ end
907
+
908
+ def event
909
+ @event ||= Isono::NodeModules::EventChannel.new(@node)
910
+ end
911
+
912
+ end
913
+ end
914
+ end