wakame-vdc-agents 11.06.0 → 11.12.0
Sign up to get free protection for your applications and to get access to all the features.
- data/Rakefile +19 -31
- data/bin/hva +15 -5
- data/bin/nsa +15 -5
- data/bin/sta +9 -222
- data/config/db/migrations/0001_v1110_origin.rb +446 -0
- data/config/hva.conf.example +19 -11
- data/config/nsa.conf.example +1 -1
- data/lib/dcmgr.rb +99 -22
- data/lib/dcmgr/cli/base.rb +34 -1
- data/lib/dcmgr/cli/host.rb +24 -20
- data/lib/dcmgr/cli/image.rb +38 -19
- data/lib/dcmgr/cli/keypair.rb +16 -12
- data/lib/dcmgr/cli/network.rb +189 -81
- data/lib/dcmgr/cli/quota.rb +2 -2
- data/lib/dcmgr/cli/security_group.rb +106 -0
- data/lib/dcmgr/cli/spec.rb +144 -39
- data/lib/dcmgr/cli/storage.rb +16 -15
- data/lib/dcmgr/cli/tag.rb +20 -14
- data/lib/dcmgr/cli/vlan.rb +5 -5
- data/lib/dcmgr/drivers/backing_store.rb +32 -0
- data/lib/dcmgr/drivers/comstar.rb +81 -0
- data/lib/dcmgr/drivers/iijgio_storage.rb +9 -19
- data/lib/dcmgr/drivers/iscsi_target.rb +41 -0
- data/lib/dcmgr/drivers/kvm.rb +161 -28
- data/lib/dcmgr/drivers/linux_iscsi.rb +60 -0
- data/lib/dcmgr/drivers/local_storage.rb +24 -0
- data/lib/dcmgr/drivers/lxc.rb +167 -125
- data/lib/dcmgr/drivers/raw.rb +74 -0
- data/lib/dcmgr/drivers/s3_storage.rb +7 -19
- data/lib/dcmgr/drivers/snapshot_storage.rb +18 -28
- data/lib/dcmgr/drivers/storage_initiator.rb +28 -0
- data/lib/dcmgr/drivers/sun_iscsi.rb +32 -0
- data/lib/dcmgr/drivers/zfs.rb +77 -0
- data/lib/dcmgr/endpoints/core_api.rb +315 -263
- data/lib/dcmgr/endpoints/errors.rb +21 -10
- data/lib/dcmgr/endpoints/metadata.rb +360 -23
- data/lib/dcmgr/helpers/cli_helper.rb +6 -3
- data/lib/dcmgr/helpers/ec2_metadata_helper.rb +9 -0
- data/lib/dcmgr/helpers/nic_helper.rb +11 -0
- data/lib/dcmgr/helpers/snapshot_storage_helper.rb +34 -0
- data/lib/dcmgr/models/account.rb +0 -6
- data/lib/dcmgr/models/account_resource.rb +0 -4
- data/lib/dcmgr/models/base_new.rb +14 -2
- data/lib/dcmgr/models/dhcp_range.rb +38 -0
- data/lib/dcmgr/models/frontend_system.rb +0 -6
- data/lib/dcmgr/models/history.rb +0 -11
- data/lib/dcmgr/models/host_node.rb +131 -0
- data/lib/dcmgr/models/hostname_lease.rb +0 -8
- data/lib/dcmgr/models/image.rb +31 -18
- data/lib/dcmgr/models/instance.rb +137 -143
- data/lib/dcmgr/models/instance_nic.rb +52 -29
- data/lib/dcmgr/models/instance_security_group.rb +9 -0
- data/lib/dcmgr/models/instance_spec.rb +163 -31
- data/lib/dcmgr/models/ip_lease.rb +10 -21
- data/lib/dcmgr/models/mac_lease.rb +30 -11
- data/lib/dcmgr/models/network.rb +148 -27
- data/lib/dcmgr/models/physical_network.rb +18 -0
- data/lib/dcmgr/models/quota.rb +0 -10
- data/lib/dcmgr/models/request_log.rb +3 -18
- data/lib/dcmgr/models/security_group.rb +66 -0
- data/lib/dcmgr/models/security_group_rule.rb +145 -0
- data/lib/dcmgr/models/ssh_key_pair.rb +16 -19
- data/lib/dcmgr/models/{storage_pool.rb → storage_node.rb} +35 -25
- data/lib/dcmgr/models/tag.rb +0 -14
- data/lib/dcmgr/models/tag_mapping.rb +1 -7
- data/lib/dcmgr/models/vlan_lease.rb +2 -8
- data/lib/dcmgr/models/volume.rb +49 -37
- data/lib/dcmgr/models/volume_snapshot.rb +15 -17
- data/lib/dcmgr/node_modules/hva_collector.rb +69 -28
- data/lib/dcmgr/node_modules/instance_ha.rb +23 -12
- data/lib/dcmgr/node_modules/instance_monitor.rb +16 -2
- data/lib/dcmgr/node_modules/openflow_controller.rb +784 -0
- data/lib/dcmgr/node_modules/scheduler.rb +189 -0
- data/lib/dcmgr/node_modules/service_netfilter.rb +452 -227
- data/lib/dcmgr/node_modules/service_openflow.rb +731 -0
- data/lib/dcmgr/node_modules/sta_collector.rb +20 -0
- data/lib/dcmgr/node_modules/sta_tgt_initializer.rb +35 -0
- data/lib/dcmgr/rack/request_logger.rb +11 -6
- data/lib/dcmgr/rpc/hva_handler.rb +256 -110
- data/lib/dcmgr/rpc/sta_handler.rb +244 -0
- data/lib/dcmgr/scheduler.rb +122 -8
- data/lib/dcmgr/scheduler/host_node/exclude_same.rb +24 -0
- data/lib/dcmgr/scheduler/host_node/find_first.rb +12 -0
- data/lib/dcmgr/scheduler/host_node/least_usage.rb +28 -0
- data/lib/dcmgr/scheduler/host_node/per_instance.rb +18 -0
- data/lib/dcmgr/scheduler/host_node/specify_node.rb +26 -0
- data/lib/dcmgr/scheduler/network/flat_single.rb +23 -0
- data/lib/dcmgr/scheduler/network/nat_one_to_one.rb +23 -0
- data/lib/dcmgr/scheduler/network/per_instance.rb +39 -0
- data/lib/dcmgr/scheduler/network/vif_template.rb +19 -0
- data/lib/dcmgr/scheduler/storage_node/find_first.rb +13 -0
- data/lib/dcmgr/scheduler/storage_node/least_usage.rb +23 -0
- data/lib/dcmgr/storage_service.rb +39 -40
- data/lib/dcmgr/tags.rb +3 -3
- data/lib/dcmgr/version.rb +1 -1
- data/lib/dcmgr/vnet.rb +105 -0
- data/lib/dcmgr/vnet/factories.rb +141 -0
- data/lib/dcmgr/vnet/isolators/by_securitygroup.rb +21 -0
- data/lib/dcmgr/vnet/isolators/dummy.rb +17 -0
- data/lib/dcmgr/vnet/netfilter/cache.rb +51 -0
- data/lib/dcmgr/vnet/netfilter/chain.rb +66 -0
- data/lib/dcmgr/vnet/netfilter/controller.rb +193 -0
- data/lib/dcmgr/vnet/netfilter/ebtables_rule.rb +53 -0
- data/lib/dcmgr/vnet/netfilter/iptables_rule.rb +45 -0
- data/lib/dcmgr/vnet/netfilter/task_manager.rb +459 -0
- data/lib/dcmgr/vnet/tasks/accept_all_dns.rb +19 -0
- data/lib/dcmgr/vnet/tasks/accept_arp_broadcast.rb +24 -0
- data/lib/dcmgr/vnet/tasks/accept_arp_from_friends.rb +34 -0
- data/lib/dcmgr/vnet/tasks/accept_arp_from_gateway.rb +21 -0
- data/lib/dcmgr/vnet/tasks/accept_arp_to_host.rb +30 -0
- data/lib/dcmgr/vnet/tasks/accept_ip_from_friends.rb +26 -0
- data/lib/dcmgr/vnet/tasks/accept_ip_from_gateway.rb +23 -0
- data/lib/dcmgr/vnet/tasks/accept_ip_to_anywhere.rb +18 -0
- data/lib/dcmgr/vnet/tasks/accept_related_established.rb +45 -0
- data/lib/dcmgr/vnet/tasks/accept_wakame_dhcp_only.rb +33 -0
- data/lib/dcmgr/vnet/tasks/accept_wakame_dns_only.rb +33 -0
- data/lib/dcmgr/vnet/tasks/debug_iptables.rb +21 -0
- data/lib/dcmgr/vnet/tasks/drop_arp_forwarding.rb +27 -0
- data/lib/dcmgr/vnet/tasks/drop_arp_to_host.rb +24 -0
- data/lib/dcmgr/vnet/tasks/drop_ip_from_anywhere.rb +18 -0
- data/lib/dcmgr/vnet/tasks/drop_ip_spoofing.rb +34 -0
- data/lib/dcmgr/vnet/tasks/drop_mac_spoofing.rb +33 -0
- data/lib/dcmgr/vnet/tasks/exclude_from_nat.rb +47 -0
- data/lib/dcmgr/vnet/tasks/security_group.rb +37 -0
- data/lib/dcmgr/vnet/tasks/static_nat.rb +54 -0
- data/lib/dcmgr/vnet/tasks/translate_metadata_address.rb +32 -0
- metadata +105 -68
- data/lib/dcmgr/cli/group.rb +0 -101
- data/lib/dcmgr/endpoints/core_api_mock.rb +0 -865
- data/lib/dcmgr/models/host_pool.rb +0 -122
- data/lib/dcmgr/models/instance_netfilter_group.rb +0 -16
- data/lib/dcmgr/models/netfilter_group.rb +0 -89
- data/lib/dcmgr/models/netfilter_rule.rb +0 -21
- data/lib/dcmgr/scheduler/find_last.rb +0 -16
- data/lib/dcmgr/scheduler/find_random.rb +0 -16
- data/lib/dcmgr/stm/instance.rb +0 -25
- data/lib/dcmgr/stm/snapshot_context.rb +0 -33
- data/lib/dcmgr/stm/volume_context.rb +0 -65
@@ -21,6 +21,15 @@ module Dcmgr
|
|
21
21
|
terminate_hook do
|
22
22
|
end
|
23
23
|
|
24
|
+
# collect all volume instances on the sta node.
|
25
|
+
def get_available_volumes(sta_node_id)
|
26
|
+
stnode = Models::StorageNode.filter(:node_id=>sta_node_id).first || raise("Unknown sta node ID: #{sta_node_id}")
|
27
|
+
v = stnode.volumes_dataset.lives.all
|
28
|
+
v.map { |volume|
|
29
|
+
volume.merge_pool_data
|
30
|
+
}
|
31
|
+
end
|
32
|
+
|
24
33
|
def get_volume(volume_id)
|
25
34
|
v = Dcmgr::Models::Volume[volume_id]
|
26
35
|
v.merge_pool_data
|
@@ -34,6 +43,12 @@ module Dcmgr
|
|
34
43
|
def update_volume(volume_id, data)
|
35
44
|
v = Dcmgr::Models::Volume[volume_id]
|
36
45
|
v.set(data).save
|
46
|
+
if data[:state] == :deleted
|
47
|
+
# Volume#destroy do not really delete row.
|
48
|
+
# just for chain react destroy hooks in the associated models.
|
49
|
+
v.destroy
|
50
|
+
end
|
51
|
+
|
37
52
|
# do not respond model object.
|
38
53
|
nil
|
39
54
|
end
|
@@ -41,6 +56,11 @@ module Dcmgr
|
|
41
56
|
def update_snapshot(snapshot_id, data)
|
42
57
|
vs = Dcmgr::Models::VolumeSnapshot[snapshot_id]
|
43
58
|
vs.set(data).save
|
59
|
+
if data[:state] == :deleted
|
60
|
+
# VolumeSnapshot#destroy do not really delete row.
|
61
|
+
# just for chain react destroy hooks in the associated models.
|
62
|
+
vs.destroy
|
63
|
+
end
|
44
64
|
# do not respond model object.
|
45
65
|
nil
|
46
66
|
end
|
@@ -0,0 +1,35 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
require 'isono'
|
3
|
+
|
4
|
+
module Dcmgr
|
5
|
+
module NodeModules
|
6
|
+
class StaTgtInitializer < Isono::NodeModules::Base
|
7
|
+
include Dcmgr::Helpers::CliHelper
|
8
|
+
include Dcmgr::Logger
|
9
|
+
|
10
|
+
initialize_hook do
|
11
|
+
|
12
|
+
manifest.config.initiator_address ||= 'ALL'
|
13
|
+
|
14
|
+
EM.defer do
|
15
|
+
myinstance.register_volumes
|
16
|
+
end
|
17
|
+
end
|
18
|
+
|
19
|
+
terminate_hook do
|
20
|
+
end
|
21
|
+
|
22
|
+
def register_volumes
|
23
|
+
volumes = rpc.request('sta-collector', 'get_available_volumes', node.node_id)
|
24
|
+
iscsit = Dcmgr::Drivers::IscsiTarget.select_iscsi_target(self.node.manifest.config.iscsi_target, self.node)
|
25
|
+
volumes.each { |volume|
|
26
|
+
iscsit.register(volume)
|
27
|
+
}
|
28
|
+
end
|
29
|
+
|
30
|
+
def rpc
|
31
|
+
@rpc ||= Isono::NodeModules::RpcChannel.new(@node)
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
@@ -1,11 +1,14 @@
|
|
1
1
|
# -*- coding: utf-8 -*-
|
2
2
|
|
3
|
+
require 'rack/request'
|
4
|
+
|
3
5
|
module Dcmgr::Rack
|
4
6
|
# Rack middleware for logging each API request.
|
5
7
|
class RequestLogger
|
6
8
|
HTTP_X_VDC_REQUEST_ID='HTTP_X_VDC_REQUEST_ID'.freeze
|
7
9
|
HEADER_X_VDC_REQUEST_ID='X-VDC-Request-ID'.freeze
|
8
|
-
|
10
|
+
RACK_REQUEST_LOG_KEY='vdc.request_log'.freeze
|
11
|
+
|
9
12
|
def initialize(app, with_header=true)
|
10
13
|
raise TypeError unless app.is_a?(Dcmgr::Endpoints::CoreAPI)
|
11
14
|
@app = app
|
@@ -17,8 +20,9 @@ module Dcmgr::Rack
|
|
17
20
|
end
|
18
21
|
|
19
22
|
def _call(env)
|
20
|
-
|
21
|
-
|
23
|
+
request = ::Rack::Request.new(env)
|
24
|
+
env[RACK_REQUEST_LOG_KEY] = @log = Dcmgr::Models::RequestLog.new
|
25
|
+
log_env(request)
|
22
26
|
begin
|
23
27
|
ret = @app.call(env)
|
24
28
|
@log.response_status = ret[0]
|
@@ -42,8 +46,9 @@ module Dcmgr::Rack
|
|
42
46
|
|
43
47
|
private
|
44
48
|
# set common values in Rack env.
|
45
|
-
# @params [
|
46
|
-
def log_env(
|
49
|
+
# @params [Rack::Request] request
|
50
|
+
def log_env(request)
|
51
|
+
env = request.env
|
47
52
|
#@log.frontend_system_id = env[Dcmgr::Endpoints::RACK_FRONTEND_SYSTEM_ID].to_s
|
48
53
|
if env[Dcmgr::Endpoints::HTTP_X_VDC_ACCOUNT_UUID].nil? || env[Dcmgr::Endpoints::HTTP_X_VDC_ACCOUNT_UUID] == ''
|
49
54
|
@log.account_id = 'nil'
|
@@ -53,7 +58,7 @@ module Dcmgr::Rack
|
|
53
58
|
@log.requester_token = env[Dcmgr::Endpoints::HTTP_X_VDC_REQUESTER_TOKEN]
|
54
59
|
@log.request_method = env['REQUEST_METHOD']
|
55
60
|
@log.api_path = env['PATH_INFO']
|
56
|
-
@log.params =
|
61
|
+
@log.params = request.params
|
57
62
|
end
|
58
63
|
|
59
64
|
end
|
@@ -1,34 +1,13 @@
|
|
1
1
|
# -*- coding: utf-8 -*-
|
2
2
|
require 'isono'
|
3
|
-
require 'net/telnet'
|
4
3
|
require 'fileutils'
|
4
|
+
require 'ipaddress'
|
5
5
|
|
6
6
|
module Dcmgr
|
7
7
|
module Rpc
|
8
|
-
module KvmHelper
|
9
|
-
# Establish telnet connection to KVM monitor console
|
10
|
-
def connect_monitor(port, &blk)
|
11
|
-
begin
|
12
|
-
telnet = ::Net::Telnet.new("Host" => "localhost",
|
13
|
-
"Port"=>port.to_s,
|
14
|
-
"Prompt" => /\n\(qemu\) \z/,
|
15
|
-
"Timeout" => 60,
|
16
|
-
"Waittime" => 0.2)
|
17
|
-
|
18
|
-
blk.call(telnet)
|
19
|
-
rescue => e
|
20
|
-
logger.error(e) if self.respond_to?(:logger)
|
21
|
-
raise e
|
22
|
-
ensure
|
23
|
-
telnet.close
|
24
|
-
end
|
25
|
-
end
|
26
|
-
end
|
27
|
-
|
28
8
|
class HvaHandler < EndpointBuilder
|
29
9
|
include Dcmgr::Logger
|
30
10
|
include Dcmgr::Helpers::CliHelper
|
31
|
-
include KvmHelper
|
32
11
|
include Dcmgr::Helpers::NicHelper
|
33
12
|
|
34
13
|
def select_hypervisor
|
@@ -43,21 +22,28 @@ module Dcmgr
|
|
43
22
|
tryagain do
|
44
23
|
next true if File.exist?(@os_devpath)
|
45
24
|
|
46
|
-
sh("iscsiadm -m discovery -t sendtargets -p %s", [@vol[:
|
25
|
+
sh("iscsiadm -m discovery -t sendtargets -p %s", [@vol[:storage_node][:ipaddr]])
|
47
26
|
sh("iscsiadm -m node -l -T '%s' --portal '%s'",
|
48
|
-
[@vol[:transport_information][:iqn], @vol[:
|
49
|
-
|
27
|
+
[@vol[:transport_information][:iqn], @vol[:storage_node][:ipaddr]])
|
28
|
+
# wait udev queue
|
29
|
+
sh("/sbin/udevadm settle")
|
50
30
|
end
|
51
31
|
|
52
32
|
rpc.request('sta-collector', 'update_volume', @vol_id, {
|
53
33
|
:state=>:attaching,
|
54
34
|
:attached_at => nil,
|
35
|
+
:instance_id => @inst[:id], # needed after cleanup
|
55
36
|
:host_device_name => @os_devpath})
|
56
37
|
end
|
57
38
|
|
58
39
|
def detach_volume_from_host
|
59
40
|
# iscsi logout
|
60
41
|
sh("iscsiadm -m node -T '%s' --logout", [@vol[:transport_information][:iqn]])
|
42
|
+
# wait udev queue
|
43
|
+
sh("/sbin/udevadm settle")
|
44
|
+
end
|
45
|
+
|
46
|
+
def update_volume_state_to_available
|
61
47
|
rpc.request('sta-collector', 'update_volume', @vol_id, {
|
62
48
|
:state=>:available,
|
63
49
|
:host_device_name=>nil,
|
@@ -67,10 +53,24 @@ module Dcmgr
|
|
67
53
|
event.publish('hva/volume_detached', :args=>[@inst_id, @vol_id])
|
68
54
|
end
|
69
55
|
|
70
|
-
def terminate_instance
|
56
|
+
def terminate_instance(state_update=false)
|
71
57
|
@hv.terminate_instance(HvaContext.new(self))
|
58
|
+
|
59
|
+
unless @inst[:volume].nil?
|
60
|
+
@inst[:volume].each { |volid, v|
|
61
|
+
@vol_id = volid
|
62
|
+
@vol = v
|
63
|
+
# force to continue detaching volumes during termination.
|
64
|
+
detach_volume_from_host rescue logger.error($!)
|
65
|
+
if state_update
|
66
|
+
update_volume_state_to_available rescue logger.error($!)
|
67
|
+
end
|
68
|
+
}
|
69
|
+
end
|
70
|
+
|
71
|
+
# cleanup vm data folder
|
72
|
+
FileUtils.rm_r(File.expand_path("#{@inst_id}", @node.manifest.config.vm_data_dir))
|
72
73
|
end
|
73
|
-
|
74
74
|
|
75
75
|
def update_instance_state(opts, ev)
|
76
76
|
raise "Can't update instance info without setting @inst_id" if @inst_id.nil?
|
@@ -85,83 +85,209 @@ module Dcmgr
|
|
85
85
|
end
|
86
86
|
|
87
87
|
def check_interface
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
if network_map[:vlan_id] == 0
|
97
|
-
# bridge interface
|
98
|
-
bridge_if = @node.manifest.config.bridge_novlan
|
99
|
-
unless valid_nic?(bridge_if)
|
100
|
-
sh("/usr/sbin/brctl addbr %s", [bridge_if])
|
101
|
-
sh("/usr/sbin/brctl addif %s %s", [bridge_if, physical_if])
|
102
|
-
end
|
103
|
-
else
|
104
|
-
# vlan interface
|
105
|
-
vlan_if = "#{physical_if}.#{network_map[:vlan_id]}"
|
88
|
+
@inst[:instance_nics].each { |vnic|
|
89
|
+
network = rpc.request('hva-collector', 'get_network', vnic[:network_id])
|
90
|
+
|
91
|
+
fwd_if = phy_if = network[:physical_network][:interface]
|
92
|
+
bridge_if = network[:link_interface]
|
93
|
+
|
94
|
+
if network[:vlan_id].to_i > 0 && phy_if
|
95
|
+
fwd_if = "#{phy_if}.#{network[:vlan_id]}"
|
106
96
|
unless valid_nic?(vlan_if)
|
107
|
-
sh("/sbin/vconfig add #{
|
108
|
-
|
109
|
-
|
110
|
-
# bridge interface
|
111
|
-
bridge_if = "#{@node.manifest.config.bridge_prefix}-#{physical_if}.#{network_map[:vlan_id]}"
|
112
|
-
unless valid_nic?(bridge_if)
|
113
|
-
sh("/usr/sbin/brctl addbr %s", [bridge_if])
|
114
|
-
sh("/usr/sbin/brctl addif %s %s", [bridge_if, vlan_if])
|
97
|
+
sh("/sbin/vconfig add #{phy_if} #{network[:vlan_id]}")
|
98
|
+
sh("/sbin/ip link set %s up", [fwd_if])
|
99
|
+
sh("/sbin/ip link set %s promisc on", [fwd_if])
|
115
100
|
end
|
116
101
|
end
|
117
102
|
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
103
|
+
unless valid_nic?(bridge_if)
|
104
|
+
sh("/usr/sbin/brctl addbr %s", [bridge_if])
|
105
|
+
sh("/usr/sbin/brctl setfd %s 0", [bridge_if])
|
106
|
+
# There is null case for the forward interface to create closed bridge network.
|
107
|
+
if fwd_if
|
108
|
+
sh("/usr/sbin/brctl addif %s %s", [bridge_if, fwd_if])
|
122
109
|
end
|
123
110
|
end
|
124
|
-
|
125
|
-
|
126
|
-
end
|
111
|
+
}
|
112
|
+
sleep 1
|
127
113
|
end
|
128
114
|
|
129
115
|
|
130
116
|
def get_linux_dev_path
|
131
117
|
# check under until the dev file is created.
|
132
118
|
# /dev/disk/by-path/ip-192.168.1.21:3260-iscsi-iqn.1986-03.com.sun:02:a1024afa-775b-65cf-b5b0-aa17f3476bfc-lun-0
|
133
|
-
@os_devpath = "/dev/disk/by-path/ip-%s-iscsi-%s-lun-%d" % ["#{@vol[:
|
119
|
+
@os_devpath = "/dev/disk/by-path/ip-%s-iscsi-%s-lun-%d" % ["#{@vol[:storage_node][:ipaddr]}:3260",
|
134
120
|
@vol[:transport_information][:iqn],
|
135
121
|
@vol[:transport_information][:lun]]
|
136
122
|
end
|
137
123
|
|
124
|
+
def setup_metadata_drive
|
125
|
+
logger.info("Setting up metadata drive image for :#{@hva_ctx.inst_id}")
|
126
|
+
# truncate creates sparsed file.
|
127
|
+
sh("/usr/bin/truncate -s 10m '#{@hva_ctx.metadata_img_path}'; sync;")
|
128
|
+
# TODO: need to lock loop device not to use same device from
|
129
|
+
# another thread/process.
|
130
|
+
lodev=`/sbin/losetup -f`.chomp
|
131
|
+
sh("/sbin/losetup #{lodev} '#{@hva_ctx.metadata_img_path}'")
|
132
|
+
sh("mkfs.vfat -n METADATA '#{@hva_ctx.metadata_img_path}'")
|
133
|
+
Dir.mkdir("#{@hva_ctx.inst_data_dir}/tmp") unless File.exists?("#{@hva_ctx.inst_data_dir}/tmp")
|
134
|
+
sh("/bin/mount -t vfat #{lodev} '#{@hva_ctx.inst_data_dir}/tmp'")
|
135
|
+
|
136
|
+
vnic = @inst[:instance_nics].first || {}
|
137
|
+
# Appendix B: Metadata Categories
|
138
|
+
# http://docs.amazonwebservices.com/AWSEC2/latest/UserGuide/index.html?AESDG-chapter-instancedata.html
|
139
|
+
metadata_items = {
|
140
|
+
'ami-id' => @inst[:image][:uuid],
|
141
|
+
'ami-launch-index' => 0,
|
142
|
+
'ami-manifest-path' => nil,
|
143
|
+
'ancestor-ami-ids' => nil,
|
144
|
+
'block-device-mapping/root' => '/dev/sda',
|
145
|
+
'hostname' => @inst[:hostname],
|
146
|
+
'instance-action' => @inst[:state],
|
147
|
+
'instance-id' => @inst[:uuid],
|
148
|
+
'instance-type' => @inst[:instance_spec][:uuid],
|
149
|
+
'kernel-id' => nil,
|
150
|
+
'local-hostname' => @inst[:hostname],
|
151
|
+
'local-ipv4' => @inst[:ips].first,
|
152
|
+
'mac' => vnic[:mac_addr].unpack('A2'*6).join(':'),
|
153
|
+
'placement/availability-zone' => nil,
|
154
|
+
'product-codes' => nil,
|
155
|
+
'public-hostname' => @inst[:hostname],
|
156
|
+
'public-ipv4' => @inst[:nat_ips].first,
|
157
|
+
'ramdisk-id' => nil,
|
158
|
+
'reservation-id' => nil,
|
159
|
+
'security-groups' => @inst[:security_groups].join(' '),
|
160
|
+
}
|
161
|
+
|
162
|
+
@inst[:vif].each { |vnic|
|
163
|
+
netaddr = IPAddress::IPv4.new("#{vnic[:ipv4][:network][:ipv4_network]}/#{vnic[:ipv4][:network][:prefix]}")
|
164
|
+
|
165
|
+
# vfat doesn't allow folder name including ":".
|
166
|
+
# folder name including mac address replaces "-" to ":".
|
167
|
+
mac = vnic[:mac_addr].unpack('A2'*6).join('-')
|
168
|
+
metadata_items.merge!({
|
169
|
+
"network/interfaces/macs/#{mac}/local-hostname" => @inst[:hostname],
|
170
|
+
"network/interfaces/macs/#{mac}/local-ipv4s" => vnic[:ipv4][:address],
|
171
|
+
"network/interfaces/macs/#{mac}/mac" => vnic[:mac_addr].unpack('A2'*6).join(':'),
|
172
|
+
"network/interfaces/macs/#{mac}/public-hostname" => @inst[:hostname],
|
173
|
+
"network/interfaces/macs/#{mac}/public-ipv4s" => vnic[:ipv4][:nat_address],
|
174
|
+
"network/interfaces/macs/#{mac}/security-groups" => @inst[:security_groups].join(' '),
|
175
|
+
# wakame-vdc extention items.
|
176
|
+
# TODO: need an iface index number?
|
177
|
+
"network/interfaces/macs/#{mac}/x-gateway" => vnic[:ipv4][:network][:ipv4_gw],
|
178
|
+
"network/interfaces/macs/#{mac}/x-netmask" => netaddr.prefix.to_ip,
|
179
|
+
"network/interfaces/macs/#{mac}/x-network" => vnic[:ipv4][:network][:ipv4_network],
|
180
|
+
"network/interfaces/macs/#{mac}/x-broadcast" => netaddr.broadcast,
|
181
|
+
"network/interfaces/macs/#{mac}/x-metric" => vnic[:ipv4][:network][:metric],
|
182
|
+
})
|
183
|
+
}
|
184
|
+
|
185
|
+
if @inst[:ssh_key_data]
|
186
|
+
metadata_items.merge!({
|
187
|
+
"public-keys/0=#{@inst[:ssh_key_data][:name]}" => @inst[:ssh_key_data][:public_key],
|
188
|
+
'public-keys/0/openssh-key'=> @inst[:ssh_key_data][:public_key],
|
189
|
+
})
|
190
|
+
else
|
191
|
+
metadata_items.merge!({'public-keys/'=>nil})
|
192
|
+
end
|
193
|
+
|
194
|
+
# build metadata directory tree
|
195
|
+
metadata_base_dir = File.expand_path("meta-data", "#{@hva_ctx.inst_data_dir}/tmp")
|
196
|
+
FileUtils.mkdir_p(metadata_base_dir)
|
197
|
+
|
198
|
+
metadata_items.each { |k, v|
|
199
|
+
if k[-1,1] == '/' && v.nil?
|
200
|
+
# just create empty folder
|
201
|
+
FileUtils.mkdir_p(File.expand_path(k, metadata_base_dir))
|
202
|
+
next
|
203
|
+
end
|
204
|
+
|
205
|
+
dir = File.dirname(k)
|
206
|
+
if dir != '.'
|
207
|
+
FileUtils.mkdir_p(File.expand_path(dir, metadata_base_dir))
|
208
|
+
end
|
209
|
+
File.open(File.expand_path(k, metadata_base_dir), 'w') { |f|
|
210
|
+
f.puts(v.to_s)
|
211
|
+
}
|
212
|
+
}
|
213
|
+
# user-data
|
214
|
+
File.open(File.expand_path('user-data', "#{@hva_ctx.inst_data_dir}/tmp"), 'w') { |f|
|
215
|
+
f.puts(@inst[:user_data])
|
216
|
+
}
|
217
|
+
|
218
|
+
ensure
|
219
|
+
# ignore any errors from cleanup work.
|
220
|
+
sh("/bin/umount -f '#{@hva_ctx.inst_data_dir}/tmp'") rescue logger.warn($!.message)
|
221
|
+
sh("/sbin/losetup -d #{lodev}") rescue logger.warn($!.message)
|
222
|
+
end
|
223
|
+
|
138
224
|
job :run_local_store, proc {
|
139
225
|
@inst_id = request.args[0]
|
140
226
|
logger.info("Booting #{@inst_id}")
|
141
227
|
|
142
228
|
@inst = rpc.request('hva-collector', 'get_instance', @inst_id)
|
143
|
-
raise "Invalid instance state: #{@inst[:state]}" unless %w(
|
229
|
+
raise "Invalid instance state: #{@inst[:state]}" unless %w(pending failingover).member?(@inst[:state].to_s)
|
144
230
|
|
145
231
|
# select hypervisor :kvm, :lxc
|
146
232
|
select_hypervisor
|
147
233
|
|
148
234
|
# create hva context
|
149
|
-
|
235
|
+
@hva_ctx = HvaContext.new(self)
|
150
236
|
|
151
237
|
rpc.request('hva-collector', 'update_instance', @inst_id, {:state=>:starting})
|
152
238
|
# setup vm data folder
|
153
|
-
inst_data_dir =
|
239
|
+
inst_data_dir = @hva_ctx.inst_data_dir
|
154
240
|
FileUtils.mkdir(inst_data_dir) unless File.exists?(inst_data_dir)
|
155
241
|
# copy image file
|
156
242
|
img_src = @inst[:image][:source]
|
157
243
|
@os_devpath = File.expand_path("#{@inst[:uuid]}", inst_data_dir)
|
158
|
-
|
244
|
+
|
245
|
+
# vmimage cache
|
246
|
+
vmimg_cache_dir = File.expand_path("_base", @node.manifest.config.vm_data_dir)
|
247
|
+
FileUtils.mkdir_p(vmimg_cache_dir) unless File.exists?(vmimg_cache_dir)
|
248
|
+
vmimg_basename = File.basename(img_src[:uri])
|
249
|
+
vmimg_cache_path = File.expand_path(vmimg_basename, vmimg_cache_dir)
|
250
|
+
|
251
|
+
logger.debug("preparing #{@os_devpath}")
|
252
|
+
|
253
|
+
# vmimg cached?
|
254
|
+
unless File.exists?(vmimg_cache_path)
|
255
|
+
logger.debug("copying #{img_src[:uri]} to #{vmimg_cache_path}")
|
256
|
+
pararell_curl("#{img_src[:uri]}", "#{vmimg_cache_path}")
|
257
|
+
else
|
258
|
+
md5sum = sh("md5sum #{vmimg_cache_path}")
|
259
|
+
if md5sum[:stdout].split(' ')[0] == @inst[:image][:md5sum]
|
260
|
+
logger.debug("verified vm cache image: #{vmimg_cache_path}")
|
261
|
+
else
|
262
|
+
logger.debug("not verified vm cache image: #{vmimg_cache_path}")
|
263
|
+
sh("rm -f %s", [vmimg_cache_path])
|
264
|
+
tmp_id = Isono::Util::gen_id
|
265
|
+
logger.debug("copying #{img_src[:uri]} to #{vmimg_cache_path}")
|
266
|
+
pararell_curl("#{img_src[:uri]}", "#{vmimg_cache_path}.#{tmp_id}")
|
267
|
+
|
268
|
+
sh("mv #{vmimg_cache_path}.#{tmp_id} #{vmimg_cache_path}")
|
269
|
+
logger.debug("vmimage cache deployed on #{vmimg_cache_path}")
|
270
|
+
end
|
271
|
+
end
|
272
|
+
|
273
|
+
####
|
274
|
+
logger.debug("copying #{vmimg_cache_path} to #{@os_devpath}")
|
275
|
+
case vmimg_cache_path
|
276
|
+
when /\.gz$/
|
277
|
+
sh("zcat %s | cp --sparse=always /dev/stdin %s",[vmimg_cache_path, @os_devpath])
|
278
|
+
else
|
279
|
+
sh("cp -p --sparse=always %s %s",[vmimg_cache_path, @os_devpath])
|
280
|
+
end
|
281
|
+
|
159
282
|
sleep 1
|
160
283
|
|
161
|
-
|
162
|
-
|
284
|
+
setup_metadata_drive
|
285
|
+
|
286
|
+
check_interface
|
287
|
+
@hv.run_instance(@hva_ctx)
|
163
288
|
update_instance_state({:state=>:running}, 'hva/instance_started')
|
164
289
|
}, proc {
|
290
|
+
terminate_instance(false) rescue logger.error($!)
|
165
291
|
update_instance_state({:state=>:terminated, :terminated_at=>Time.now.utc},
|
166
292
|
'hva/instance_terminated')
|
167
293
|
}
|
@@ -169,28 +295,23 @@ module Dcmgr
|
|
169
295
|
job :run_vol_store, proc {
|
170
296
|
@inst_id = request.args[0]
|
171
297
|
@vol_id = request.args[1]
|
172
|
-
|
173
298
|
@inst = rpc.request('hva-collector', 'get_instance', @inst_id)
|
174
299
|
@vol = rpc.request('sta-collector', 'get_volume', @vol_id)
|
175
300
|
logger.info("Booting #{@inst_id}")
|
176
|
-
raise "Invalid instance state: #{@inst[:state]}" unless %w(
|
301
|
+
raise "Invalid instance state: #{@inst[:state]}" unless %w(pending failingover).member?(@inst[:state].to_s)
|
177
302
|
|
178
303
|
# select hypervisor :kvm, :lxc
|
179
304
|
select_hypervisor
|
180
305
|
|
181
306
|
# create hva context
|
182
|
-
|
307
|
+
@hva_ctx = HvaContext.new(self)
|
183
308
|
|
184
309
|
rpc.request('hva-collector', 'update_instance', @inst_id, {:state=>:starting})
|
185
310
|
|
186
311
|
# setup vm data folder
|
187
|
-
inst_data_dir =
|
312
|
+
inst_data_dir = @hva_ctx.inst_data_dir
|
188
313
|
FileUtils.mkdir(inst_data_dir) unless File.exists?(inst_data_dir)
|
189
314
|
|
190
|
-
# create volume from snapshot
|
191
|
-
jobreq.run("zfs-handle.#{@vol[:storage_pool][:node_id]}", "create_volume", @vol_id)
|
192
|
-
|
193
|
-
logger.debug("volume created on #{@vol[:storage_pool][:node_id]}: #{@vol_id}")
|
194
315
|
# reload volume info
|
195
316
|
@vol = rpc.request('sta-collector', 'get_volume', @vol_id)
|
196
317
|
|
@@ -203,14 +324,20 @@ module Dcmgr
|
|
203
324
|
# attach disk
|
204
325
|
attach_volume_to_host
|
205
326
|
|
327
|
+
setup_metadata_drive
|
328
|
+
|
206
329
|
# run vm
|
207
|
-
|
208
|
-
@hv.run_instance(
|
330
|
+
check_interface
|
331
|
+
@hv.run_instance(@hva_ctx)
|
209
332
|
update_instance_state({:state=>:running}, 'hva/instance_started')
|
210
333
|
update_volume_state({:state=>:attached, :attached_at=>Time.now.utc}, 'hva/volume_attached')
|
211
334
|
}, proc {
|
335
|
+
# TODO: Run detach & destroy volume
|
212
336
|
update_instance_state({:state=>:terminated, :terminated_at=>Time.now.utc},
|
213
337
|
'hva/instance_terminated')
|
338
|
+
terminate_instance(false) rescue logger.error($!)
|
339
|
+
update_volume_state({:state=>:deleted, :deleted_at=>Time.now.utc},
|
340
|
+
'hva/volume_deleted')
|
214
341
|
}
|
215
342
|
|
216
343
|
job :terminate do
|
@@ -224,28 +351,15 @@ module Dcmgr
|
|
224
351
|
|
225
352
|
begin
|
226
353
|
rpc.request('hva-collector', 'update_instance', @inst_id, {:state=>:shuttingdown})
|
227
|
-
|
228
|
-
terminate_instance
|
229
|
-
|
230
|
-
unless @inst[:volume].nil?
|
231
|
-
@inst[:volume].each { |volid, v|
|
232
|
-
@vol_id = volid
|
233
|
-
@vol = v
|
234
|
-
# force to continue detaching volumes during termination.
|
235
|
-
detach_volume_from_host rescue logger.error($!)
|
236
|
-
}
|
237
|
-
end
|
238
|
-
|
239
|
-
# cleanup vm data folder
|
240
|
-
FileUtils.rm_r(File.expand_path("#{@inst_id}", @node.manifest.config.vm_data_dir))
|
354
|
+
terminate_instance(true)
|
241
355
|
ensure
|
242
356
|
update_instance_state({:state=>:terminated,:terminated_at=>Time.now.utc},
|
243
357
|
'hva/instance_terminated')
|
244
358
|
end
|
245
359
|
end
|
246
360
|
|
247
|
-
# just do terminate instance and unmount volumes not
|
248
|
-
# state
|
361
|
+
# just do terminate instance and unmount volumes. it should not change
|
362
|
+
# state on any resources.
|
249
363
|
# called from HA at which the faluty instance get cleaned properly.
|
250
364
|
job :cleanup do
|
251
365
|
@inst_id = request.args[0]
|
@@ -253,19 +367,36 @@ module Dcmgr
|
|
253
367
|
@inst = rpc.request('hva-collector', 'get_instance', @inst_id)
|
254
368
|
raise "Invalid instance state: #{@inst[:state]}" unless @inst[:state].to_s == 'running'
|
255
369
|
|
370
|
+
# select hypervisor :kvm, :lxc
|
371
|
+
select_hypervisor
|
372
|
+
|
256
373
|
begin
|
257
|
-
terminate_instance
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
@vol_id = volid
|
262
|
-
@vol = v
|
263
|
-
# force to continue detaching volumes during termination.
|
264
|
-
detach_volume_from_host rescue logger.error($!)
|
265
|
-
}
|
266
|
-
end
|
374
|
+
terminate_instance(false)
|
375
|
+
ensure
|
376
|
+
# just publish "hva/instance_terminated" to update security group rules once
|
377
|
+
update_instance_state({}, 'hva/instance_terminated')
|
267
378
|
end
|
379
|
+
end
|
380
|
+
|
381
|
+
# stop instance is mostly similar to terminate_instance. the
|
382
|
+
# difference is the state transition of instance and associated
|
383
|
+
# resources to the instance , attached volumes and vnic, are kept
|
384
|
+
# same sate.
|
385
|
+
job :stop do
|
386
|
+
@inst_id = request.args[0]
|
268
387
|
|
388
|
+
@inst = rpc.request('hva-collector', 'get_instance', @inst_id)
|
389
|
+
raise "Invalid instance state: #{@inst[:state]}" unless @inst[:state].to_s == 'running'
|
390
|
+
|
391
|
+
select_hypervisor
|
392
|
+
|
393
|
+
begin
|
394
|
+
rpc.request('hva-collector', 'update_instance', @inst_id, {:state=>:stopping})
|
395
|
+
terminate_instance(false)
|
396
|
+
ensure
|
397
|
+
#
|
398
|
+
update_instance_state({:state=>:stopped, :host_node_id=>nil}, 'hva/instance_terminated')
|
399
|
+
end
|
269
400
|
end
|
270
401
|
|
271
402
|
job :attach, proc {
|
@@ -291,7 +422,11 @@ module Dcmgr
|
|
291
422
|
logger.info("Attaching #{@vol_id} on #{@inst_id}")
|
292
423
|
|
293
424
|
# attach disk on guest os
|
294
|
-
pci_devaddr
|
425
|
+
pci_devaddr=nil
|
426
|
+
tryagain do
|
427
|
+
pci_devaddr = @hv.attach_volume_to_guest(HvaContext.new(self))
|
428
|
+
end
|
429
|
+
raise "Can't attach #{@vol_id} on #{@inst_id}" if pci_devaddr.nil?
|
295
430
|
|
296
431
|
rpc.request('sta-collector', 'update_volume', @vol_id, {
|
297
432
|
:state=>:attached,
|
@@ -299,6 +434,12 @@ module Dcmgr
|
|
299
434
|
:guest_device_name=>pci_devaddr})
|
300
435
|
event.publish('hva/volume_attached', :args=>[@inst_id, @vol_id])
|
301
436
|
logger.info("Attached #{@vol_id} on #{@inst_id}")
|
437
|
+
}, proc {
|
438
|
+
# TODO: Run detach volume
|
439
|
+
# push back volume state to available.
|
440
|
+
update_volume_state({:state=>:available},
|
441
|
+
'hva/volume_available')
|
442
|
+
logger.error("Attach failed: #{@vol_id} on #{@inst_id}")
|
302
443
|
}
|
303
444
|
|
304
445
|
job :detach do
|
@@ -315,10 +456,13 @@ module Dcmgr
|
|
315
456
|
|
316
457
|
rpc.request('sta-collector', 'update_volume', @vol_id, {:state=>:detaching, :detached_at=>nil})
|
317
458
|
# detach disk on guest os
|
318
|
-
|
459
|
+
tryagain do
|
460
|
+
@hv.detach_volume_from_guest(HvaContext.new(self))
|
461
|
+
end
|
319
462
|
|
320
463
|
# detach disk on host os
|
321
464
|
detach_volume_from_host
|
465
|
+
update_volume_state_to_available
|
322
466
|
end
|
323
467
|
|
324
468
|
job :reboot, proc {
|
@@ -328,9 +472,6 @@ module Dcmgr
|
|
328
472
|
# select_hypervisor :kvm, :lxc
|
329
473
|
select_hypervisor
|
330
474
|
|
331
|
-
# check interface
|
332
|
-
@bridge_if = check_interface
|
333
|
-
|
334
475
|
# reboot instance
|
335
476
|
@hv.reboot_instance(HvaContext.new(self))
|
336
477
|
}
|
@@ -346,6 +487,11 @@ module Dcmgr
|
|
346
487
|
def event
|
347
488
|
@event ||= Isono::NodeModules::EventChannel.new(@node)
|
348
489
|
end
|
490
|
+
|
491
|
+
def pararell_curl(url, output_path)
|
492
|
+
script_root_path = File.join(File.expand_path('../../../../',__FILE__), 'script')
|
493
|
+
sh("#{script_root_path}/pararell-curl.sh --url=#{url} --output_path=#{output_path}")
|
494
|
+
end
|
349
495
|
end
|
350
496
|
|
351
497
|
class HvaContext
|
@@ -371,8 +517,8 @@ module Dcmgr
|
|
371
517
|
@hva.instance_variable_get(:@os_devpath)
|
372
518
|
end
|
373
519
|
|
374
|
-
def
|
375
|
-
|
520
|
+
def metadata_img_path
|
521
|
+
File.expand_path('metadata.img', inst_data_dir)
|
376
522
|
end
|
377
523
|
|
378
524
|
def vol
|