wakame-vdc-agents 10.12.0 → 11.06.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (71) hide show
  1. data/LICENSE +164 -201
  2. data/Rakefile +6 -11
  3. data/bin/hva +11 -1351
  4. data/bin/nsa +5 -9
  5. data/bin/sta +124 -71
  6. data/config/hva.conf.example +12 -0
  7. data/config/initializers/isono.rb +7 -23
  8. data/config/initializers/sequel.rb +11 -2
  9. data/lib/dcmgr.rb +70 -11
  10. data/lib/dcmgr/cli/base.rb +74 -0
  11. data/lib/dcmgr/cli/errors.rb +59 -0
  12. data/lib/dcmgr/cli/group.rb +101 -0
  13. data/lib/dcmgr/cli/host.rb +101 -0
  14. data/lib/dcmgr/cli/image.rb +108 -0
  15. data/lib/dcmgr/cli/keypair.rb +72 -0
  16. data/lib/dcmgr/cli/network.rb +198 -0
  17. data/lib/dcmgr/cli/quota.rb +28 -0
  18. data/lib/dcmgr/cli/spec.rb +82 -0
  19. data/lib/dcmgr/cli/storage.rb +88 -0
  20. data/lib/dcmgr/cli/tag.rb +81 -0
  21. data/lib/dcmgr/cli/vlan.rb +53 -0
  22. data/lib/dcmgr/drivers/hypervisor.rb +33 -0
  23. data/lib/dcmgr/drivers/iijgio_storage.rb +37 -0
  24. data/lib/dcmgr/drivers/kvm.rb +118 -0
  25. data/lib/dcmgr/drivers/lxc.rb +167 -0
  26. data/lib/dcmgr/drivers/s3_storage.rb +39 -0
  27. data/lib/dcmgr/drivers/snapshot_storage.rb +51 -0
  28. data/lib/dcmgr/endpoints/core_api.rb +188 -324
  29. data/lib/dcmgr/endpoints/core_api_mock.rb +52 -3
  30. data/lib/dcmgr/endpoints/errors.rb +73 -32
  31. data/lib/dcmgr/endpoints/metadata.rb +163 -16
  32. data/lib/dcmgr/helpers/cli_helper.rb +1 -1
  33. data/lib/dcmgr/helpers/nic_helper.rb +35 -0
  34. data/lib/dcmgr/logger.rb +5 -1
  35. data/lib/dcmgr/messaging_client.rb +117 -0
  36. data/lib/dcmgr/models/account.rb +27 -3
  37. data/lib/dcmgr/models/base_new.rb +21 -7
  38. data/lib/dcmgr/models/host_pool.rb +27 -7
  39. data/lib/dcmgr/models/image.rb +31 -3
  40. data/lib/dcmgr/models/instance.rb +72 -23
  41. data/lib/dcmgr/models/instance_nic.rb +12 -2
  42. data/lib/dcmgr/models/instance_spec.rb +16 -0
  43. data/lib/dcmgr/models/ip_lease.rb +37 -1
  44. data/lib/dcmgr/models/netfilter_group.rb +7 -7
  45. data/lib/dcmgr/models/network.rb +42 -3
  46. data/lib/dcmgr/models/quota.rb +25 -0
  47. data/lib/dcmgr/models/request_log.rb +26 -11
  48. data/lib/dcmgr/models/ssh_key_pair.rb +14 -1
  49. data/lib/dcmgr/models/storage_pool.rb +19 -72
  50. data/lib/dcmgr/models/tag.rb +5 -0
  51. data/lib/dcmgr/models/vlan_lease.rb +8 -0
  52. data/lib/dcmgr/models/volume.rb +26 -8
  53. data/lib/dcmgr/models/volume_snapshot.rb +37 -0
  54. data/lib/dcmgr/node_modules/hva_collector.rb +56 -36
  55. data/lib/dcmgr/node_modules/instance_ha.rb +1 -1
  56. data/lib/dcmgr/node_modules/instance_monitor.rb +70 -0
  57. data/lib/dcmgr/node_modules/service_netfilter.rb +914 -0
  58. data/lib/dcmgr/node_modules/sta_collector.rb +7 -30
  59. data/lib/dcmgr/rack/request_logger.rb +60 -0
  60. data/lib/dcmgr/rack/run_initializer.rb +42 -0
  61. data/lib/dcmgr/rpc/hva_handler.rb +388 -0
  62. data/lib/dcmgr/rubygems.rb +7 -0
  63. data/lib/dcmgr/storage_service.rb +98 -0
  64. data/lib/dcmgr/tags.rb +2 -2
  65. data/lib/dcmgr/version.rb +8 -0
  66. data/lib/ext/time.rb +8 -0
  67. data/lib/sinatra/respond_to.rb +3 -0
  68. data/lib/sinatra/sequel_transaction.rb +20 -5
  69. metadata +133 -100
  70. data/lib/dcmgr/models/physical_host.rb +0 -67
  71. data/lib/dcmgr/web/base.rb +0 -21
@@ -4,6 +4,7 @@ require 'isono'
4
4
  module Dcmgr
5
5
  module NodeModules
6
6
  class StaCollector < Isono::NodeModules::Base
7
+ include Dcmgr::Logger
7
8
 
8
9
  initialize_hook do
9
10
  app = Isono::Rack::ObjectMethod.new(myinstance)
@@ -21,49 +22,25 @@ module Dcmgr
21
22
  end
22
23
 
23
24
  def get_volume(volume_id)
24
- Models::Volume.lock!
25
25
  v = Dcmgr::Models::Volume[volume_id]
26
26
  v.merge_pool_data
27
27
  end
28
28
 
29
29
  def get_snapshot(snapshot_id)
30
- Models::VolumeSnapshot.lock!
31
30
  vs = Dcmgr::Models::VolumeSnapshot[snapshot_id]
32
31
  vs.to_hash
33
32
  end
34
33
 
35
- def update_volume(data)
36
- Models::Volume.lock!
37
- v = Dcmgr::Models::Volume[data[:volume_id]]
38
-
39
- column = case data[:state]
40
- when :creating
41
- [:state, :export_path]
42
- when :available
43
- if !data[:transport_information].nil?
44
- [:state, :transport_information]
45
- else
46
- [:state, :host_device_name, :instance_id, :detached_at]
47
- end
48
- when :attaching
49
- [:state, :host_device_name]
50
- when :attached
51
- [:state, :guest_device_name, :attached_at]
52
- when :detaching
53
- [:state, :guest_device_name]
54
- else
55
- [:state]
56
- end
57
-
58
- v.set_fields(data, column).save
34
+ def update_volume(volume_id, data)
35
+ v = Dcmgr::Models::Volume[volume_id]
36
+ v.set(data).save
59
37
  # do not respond model object.
60
38
  nil
61
39
  end
62
40
 
63
- def update_snapshot(data)
64
- Models::VolumeSnapshot.lock!
65
- vs = Dcmgr::Models::VolumeSnapshot[data[:snapshot_id]]
66
- vs.set_fields(data, [:state]).save
41
+ def update_snapshot(snapshot_id, data)
42
+ vs = Dcmgr::Models::VolumeSnapshot[snapshot_id]
43
+ vs.set(data).save
67
44
  # do not respond model object.
68
45
  nil
69
46
  end
@@ -0,0 +1,60 @@
1
+ # -*- coding: utf-8 -*-
2
+
3
+ module Dcmgr::Rack
4
+ # Rack middleware for logging each API request.
5
+ class RequestLogger
6
+ HTTP_X_VDC_REQUEST_ID='HTTP_X_VDC_REQUEST_ID'.freeze
7
+ HEADER_X_VDC_REQUEST_ID='X-VDC-Request-ID'.freeze
8
+
9
+ def initialize(app, with_header=true)
10
+ raise TypeError unless app.is_a?(Dcmgr::Endpoints::CoreAPI)
11
+ @app = app
12
+ @with_header = with_header
13
+ end
14
+
15
+ def call(env)
16
+ dup._call(env)
17
+ end
18
+
19
+ def _call(env)
20
+ @log = Dcmgr::Models::RequestLog.new
21
+ log_env(env)
22
+ begin
23
+ ret = @app.call(env)
24
+ @log.response_status = ret[0]
25
+ @log.response_msg = ''
26
+
27
+ # inject X-VDC-Request-ID header
28
+ if @with_header
29
+ ret[1] = (ret[1] || {}).merge({HEADER_X_VDC_REQUEST_ID=>@log.request_id})
30
+ end
31
+ return ret
32
+ rescue ::Exception => e
33
+ @log.response_status = 999
34
+ @log.response_msg = e.message
35
+ raise e
36
+ ensure
37
+ @log.class.db.transaction do
38
+ @log.save
39
+ end
40
+ end
41
+ end
42
+
43
+ private
44
+ # set common values in Rack env.
45
+ # @params [Hash] env
46
+ def log_env(env)
47
+ #@log.frontend_system_id = env[Dcmgr::Endpoints::RACK_FRONTEND_SYSTEM_ID].to_s
48
+ if env[Dcmgr::Endpoints::HTTP_X_VDC_ACCOUNT_UUID].nil? || env[Dcmgr::Endpoints::HTTP_X_VDC_ACCOUNT_UUID] == ''
49
+ @log.account_id = 'nil'
50
+ else
51
+ @log.account_id = env[Dcmgr::Endpoints::HTTP_X_VDC_ACCOUNT_UUID]
52
+ end
53
+ @log.requester_token = env[Dcmgr::Endpoints::HTTP_X_VDC_REQUESTER_TOKEN]
54
+ @log.request_method = env['REQUEST_METHOD']
55
+ @log.api_path = env['PATH_INFO']
56
+ @log.params = ''
57
+ end
58
+
59
+ end
60
+ end
@@ -0,0 +1,42 @@
1
+ # -*- coding: utf-8 -*-
2
+
3
+ module Dcmgr
4
+ module Rack
5
+ # Rack middleware for running initialization/setup procedure.
6
+ # Case 1: only when the HTTP request came first time.
7
+ # Case 2: every time when the HTTP request comes.
8
+ #
9
+ # ex.
10
+ # use InitializeFirstRequest, proc {
11
+ # # run setup codes. for example, establish database connection etc..
12
+ # }
13
+ #
14
+ class RunInitializer
15
+ def initialize(app, run_once, run_every=nil)
16
+ raise ArgumentError unless run_once.nil? || run_once.is_a?(Proc)
17
+ raise ArgumentError unless run_every.nil? || run_every.is_a?(Proc)
18
+ @app = app
19
+ @run_once_block = run_once
20
+ @run_every_block = run_every
21
+ end
22
+
23
+ def call(env)
24
+ def call(env)
25
+ if @run_every_block
26
+ @run_every_block.arity == 1 ? @run_every_block.call(env) : @run_every_block.call
27
+ end
28
+ @app.call(env)
29
+ end
30
+
31
+ if @run_once_block
32
+ @run_once_block.arity == 1 ? @run_once_block.call(env) : @run_once_block.call
33
+ end
34
+ if @run_every_block
35
+ @run_every_block.arity == 1 ? @run_every_block.call(env) : @run_every_block.call
36
+ end
37
+ @app.call(env)
38
+ end
39
+
40
+ end
41
+ end
42
+ end
@@ -0,0 +1,388 @@
1
+ # -*- coding: utf-8 -*-
2
+ require 'isono'
3
+ require 'net/telnet'
4
+ require 'fileutils'
5
+
6
+ module Dcmgr
7
+ module Rpc
8
+ module KvmHelper
9
+ # Establish telnet connection to KVM monitor console
10
+ def connect_monitor(port, &blk)
11
+ begin
12
+ telnet = ::Net::Telnet.new("Host" => "localhost",
13
+ "Port"=>port.to_s,
14
+ "Prompt" => /\n\(qemu\) \z/,
15
+ "Timeout" => 60,
16
+ "Waittime" => 0.2)
17
+
18
+ blk.call(telnet)
19
+ rescue => e
20
+ logger.error(e) if self.respond_to?(:logger)
21
+ raise e
22
+ ensure
23
+ telnet.close
24
+ end
25
+ end
26
+ end
27
+
28
+ class HvaHandler < EndpointBuilder
29
+ include Dcmgr::Logger
30
+ include Dcmgr::Helpers::CliHelper
31
+ include KvmHelper
32
+ include Dcmgr::Helpers::NicHelper
33
+
34
+ def select_hypervisor
35
+ @hv = Dcmgr::Drivers::Hypervisor.select_hypervisor(@inst[:instance_spec][:hypervisor])
36
+ end
37
+
38
+ def attach_volume_to_host
39
+ # check under until the dev file is created.
40
+ # /dev/disk/by-path/ip-192.168.1.21:3260-iscsi-iqn.1986-03.com.sun:02:a1024afa-775b-65cf-b5b0-aa17f3476bfc-lun-0
41
+ get_linux_dev_path
42
+
43
+ tryagain do
44
+ next true if File.exist?(@os_devpath)
45
+
46
+ sh("iscsiadm -m discovery -t sendtargets -p %s", [@vol[:storage_pool][:ipaddr]])
47
+ sh("iscsiadm -m node -l -T '%s' --portal '%s'",
48
+ [@vol[:transport_information][:iqn], @vol[:storage_pool][:ipaddr]])
49
+ sleep 1
50
+ end
51
+
52
+ rpc.request('sta-collector', 'update_volume', @vol_id, {
53
+ :state=>:attaching,
54
+ :attached_at => nil,
55
+ :host_device_name => @os_devpath})
56
+ end
57
+
58
+ def detach_volume_from_host
59
+ # iscsi logout
60
+ sh("iscsiadm -m node -T '%s' --logout", [@vol[:transport_information][:iqn]])
61
+ rpc.request('sta-collector', 'update_volume', @vol_id, {
62
+ :state=>:available,
63
+ :host_device_name=>nil,
64
+ :instance_id=>nil,
65
+ :detached_at => Time.now.utc,
66
+ })
67
+ event.publish('hva/volume_detached', :args=>[@inst_id, @vol_id])
68
+ end
69
+
70
+ def terminate_instance
71
+ @hv.terminate_instance(HvaContext.new(self))
72
+ end
73
+
74
+
75
+ def update_instance_state(opts, ev)
76
+ raise "Can't update instance info without setting @inst_id" if @inst_id.nil?
77
+ rpc.request('hva-collector', 'update_instance', @inst_id, opts)
78
+ event.publish(ev, :args=>[@inst_id])
79
+ end
80
+
81
+ def update_volume_state(opts, ev)
82
+ raise "Can't update volume info without setting @vol_id" if @vol_id.nil?
83
+ rpc.request('sta-collector', 'update_volume', @vol_id, opts)
84
+ event.publish(ev, :args=>[@vol_id])
85
+ end
86
+
87
+ def check_interface
88
+ vnic = @inst[:instance_nics].first
89
+ unless vnic.nil?
90
+ network_map = rpc.request('hva-collector', 'get_network', @inst[:instance_nics].first[:network_id])
91
+
92
+ # physical interface
93
+ physical_if = find_nic(@node.manifest.config.hv_ifindex)
94
+ raise "UnknownPhysicalNIC" if physical_if.nil?
95
+
96
+ if network_map[:vlan_id] == 0
97
+ # bridge interface
98
+ bridge_if = @node.manifest.config.bridge_novlan
99
+ unless valid_nic?(bridge_if)
100
+ sh("/usr/sbin/brctl addbr %s", [bridge_if])
101
+ sh("/usr/sbin/brctl addif %s %s", [bridge_if, physical_if])
102
+ end
103
+ else
104
+ # vlan interface
105
+ vlan_if = "#{physical_if}.#{network_map[:vlan_id]}"
106
+ unless valid_nic?(vlan_if)
107
+ sh("/sbin/vconfig add #{physical_if} #{network_map[:vlan_id]}")
108
+ end
109
+
110
+ # bridge interface
111
+ bridge_if = "#{@node.manifest.config.bridge_prefix}-#{physical_if}.#{network_map[:vlan_id]}"
112
+ unless valid_nic?(bridge_if)
113
+ sh("/usr/sbin/brctl addbr %s", [bridge_if])
114
+ sh("/usr/sbin/brctl addif %s %s", [bridge_if, vlan_if])
115
+ end
116
+ end
117
+
118
+ # interface up? down?
119
+ [ vlan_if, bridge_if ].each do |ifname|
120
+ if nic_state(ifname) == "down"
121
+ sh("/sbin/ifconfig #{ifname} 0.0.0.0 up")
122
+ end
123
+ end
124
+ sleep 1
125
+ bridge_if
126
+ end
127
+ end
128
+
129
+
130
+ def get_linux_dev_path
131
+ # check under until the dev file is created.
132
+ # /dev/disk/by-path/ip-192.168.1.21:3260-iscsi-iqn.1986-03.com.sun:02:a1024afa-775b-65cf-b5b0-aa17f3476bfc-lun-0
133
+ @os_devpath = "/dev/disk/by-path/ip-%s-iscsi-%s-lun-%d" % ["#{@vol[:storage_pool][:ipaddr]}:3260",
134
+ @vol[:transport_information][:iqn],
135
+ @vol[:transport_information][:lun]]
136
+ end
137
+
138
+ job :run_local_store, proc {
139
+ @inst_id = request.args[0]
140
+ logger.info("Booting #{@inst_id}")
141
+
142
+ @inst = rpc.request('hva-collector', 'get_instance', @inst_id)
143
+ raise "Invalid instance state: #{@inst[:state]}" unless %w(init failingover).member?(@inst[:state].to_s)
144
+
145
+ # select hypervisor :kvm, :lxc
146
+ select_hypervisor
147
+
148
+ # create hva context
149
+ hc = HvaContext.new(self)
150
+
151
+ rpc.request('hva-collector', 'update_instance', @inst_id, {:state=>:starting})
152
+ # setup vm data folder
153
+ inst_data_dir = hc.inst_data_dir
154
+ FileUtils.mkdir(inst_data_dir) unless File.exists?(inst_data_dir)
155
+ # copy image file
156
+ img_src = @inst[:image][:source]
157
+ @os_devpath = File.expand_path("#{@inst[:uuid]}", inst_data_dir)
158
+ sh("curl --silent -o '#{@os_devpath}' #{img_src[:uri]}")
159
+ sleep 1
160
+
161
+ @bridge_if = check_interface
162
+ @hv.run_instance(hc)
163
+ update_instance_state({:state=>:running}, 'hva/instance_started')
164
+ }, proc {
165
+ update_instance_state({:state=>:terminated, :terminated_at=>Time.now.utc},
166
+ 'hva/instance_terminated')
167
+ }
168
+
169
+ job :run_vol_store, proc {
170
+ @inst_id = request.args[0]
171
+ @vol_id = request.args[1]
172
+
173
+ @inst = rpc.request('hva-collector', 'get_instance', @inst_id)
174
+ @vol = rpc.request('sta-collector', 'get_volume', @vol_id)
175
+ logger.info("Booting #{@inst_id}")
176
+ raise "Invalid instance state: #{@inst[:state]}" unless %w(init failingover).member?(@inst[:state].to_s)
177
+
178
+ # select hypervisor :kvm, :lxc
179
+ select_hypervisor
180
+
181
+ # create hva context
182
+ hc = HvaContext.new(self)
183
+
184
+ rpc.request('hva-collector', 'update_instance', @inst_id, {:state=>:starting})
185
+
186
+ # setup vm data folder
187
+ inst_data_dir = hc.inst_data_dir
188
+ FileUtils.mkdir(inst_data_dir) unless File.exists?(inst_data_dir)
189
+
190
+ # create volume from snapshot
191
+ jobreq.run("zfs-handle.#{@vol[:storage_pool][:node_id]}", "create_volume", @vol_id)
192
+
193
+ logger.debug("volume created on #{@vol[:storage_pool][:node_id]}: #{@vol_id}")
194
+ # reload volume info
195
+ @vol = rpc.request('sta-collector', 'get_volume', @vol_id)
196
+
197
+ rpc.request('sta-collector', 'update_volume', @vol_id, {:state=>:attaching, :attached_at=>nil})
198
+ logger.info("Attaching #{@vol_id} on #{@inst_id}")
199
+ # check under until the dev file is created.
200
+ # /dev/disk/by-path/ip-192.168.1.21:3260-iscsi-iqn.1986-03.com.sun:02:a1024afa-775b-65cf-b5b0-aa17f3476bfc-lun-0
201
+ get_linux_dev_path
202
+
203
+ # attach disk
204
+ attach_volume_to_host
205
+
206
+ # run vm
207
+ @bridge_if = check_interface
208
+ @hv.run_instance(HvaContext.new(self))
209
+ update_instance_state({:state=>:running}, 'hva/instance_started')
210
+ update_volume_state({:state=>:attached, :attached_at=>Time.now.utc}, 'hva/volume_attached')
211
+ }, proc {
212
+ update_instance_state({:state=>:terminated, :terminated_at=>Time.now.utc},
213
+ 'hva/instance_terminated')
214
+ }
215
+
216
+ job :terminate do
217
+ @inst_id = request.args[0]
218
+
219
+ @inst = rpc.request('hva-collector', 'get_instance', @inst_id)
220
+ raise "Invalid instance state: #{@inst[:state]}" unless @inst[:state].to_s == 'running'
221
+
222
+ # select hypervisor :kvm, :lxc
223
+ select_hypervisor
224
+
225
+ begin
226
+ rpc.request('hva-collector', 'update_instance', @inst_id, {:state=>:shuttingdown})
227
+
228
+ terminate_instance
229
+
230
+ unless @inst[:volume].nil?
231
+ @inst[:volume].each { |volid, v|
232
+ @vol_id = volid
233
+ @vol = v
234
+ # force to continue detaching volumes during termination.
235
+ detach_volume_from_host rescue logger.error($!)
236
+ }
237
+ end
238
+
239
+ # cleanup vm data folder
240
+ FileUtils.rm_r(File.expand_path("#{@inst_id}", @node.manifest.config.vm_data_dir))
241
+ ensure
242
+ update_instance_state({:state=>:terminated,:terminated_at=>Time.now.utc},
243
+ 'hva/instance_terminated')
244
+ end
245
+ end
246
+
247
+ # just do terminate instance and unmount volumes not to affect
248
+ # state management.
249
+ # called from HA at which the faluty instance get cleaned properly.
250
+ job :cleanup do
251
+ @inst_id = request.args[0]
252
+
253
+ @inst = rpc.request('hva-collector', 'get_instance', @inst_id)
254
+ raise "Invalid instance state: #{@inst[:state]}" unless @inst[:state].to_s == 'running'
255
+
256
+ begin
257
+ terminate_instance
258
+
259
+ unless @inst[:volume].nil?
260
+ @inst[:volume].each { |volid, v|
261
+ @vol_id = volid
262
+ @vol = v
263
+ # force to continue detaching volumes during termination.
264
+ detach_volume_from_host rescue logger.error($!)
265
+ }
266
+ end
267
+ end
268
+
269
+ end
270
+
271
+ job :attach, proc {
272
+ @inst_id = request.args[0]
273
+ @vol_id = request.args[1]
274
+
275
+ @inst = rpc.request('hva-collector', 'get_instance', @inst_id)
276
+ @vol = rpc.request('sta-collector', 'get_volume', @vol_id)
277
+ logger.info("Attaching #{@vol_id}")
278
+ raise "Invalid volume state: #{@vol[:state]}" unless @vol[:state].to_s == 'available'
279
+
280
+ # select hypervisor :kvm, :lxc
281
+ select_hypervisor
282
+
283
+ rpc.request('sta-collector', 'update_volume', @vol_id, {:state=>:attaching, :attached_at=>nil})
284
+ # check under until the dev file is created.
285
+ # /dev/disk/by-path/ip-192.168.1.21:3260-iscsi-iqn.1986-03.com.sun:02:a1024afa-775b-65cf-b5b0-aa17f3476bfc-lun-0
286
+ get_linux_dev_path
287
+
288
+ # attach disk on host os
289
+ attach_volume_to_host
290
+
291
+ logger.info("Attaching #{@vol_id} on #{@inst_id}")
292
+
293
+ # attach disk on guest os
294
+ pci_devaddr = @hv.attach_volume_to_guest(HvaContext.new(self))
295
+
296
+ rpc.request('sta-collector', 'update_volume', @vol_id, {
297
+ :state=>:attached,
298
+ :attached_at=>Time.now.utc,
299
+ :guest_device_name=>pci_devaddr})
300
+ event.publish('hva/volume_attached', :args=>[@inst_id, @vol_id])
301
+ logger.info("Attached #{@vol_id} on #{@inst_id}")
302
+ }
303
+
304
+ job :detach do
305
+ @inst_id = request.args[0]
306
+ @vol_id = request.args[1]
307
+
308
+ @inst = rpc.request('hva-collector', 'get_instance', @inst_id)
309
+ @vol = rpc.request('sta-collector', 'get_volume', @vol_id)
310
+ logger.info("Detaching #{@vol_id} on #{@inst_id}")
311
+ raise "Invalid volume state: #{@vol[:state]}" unless @vol[:state].to_s == 'attached'
312
+
313
+ # select hypervisor :kvm, :lxc
314
+ select_hypervisor
315
+
316
+ rpc.request('sta-collector', 'update_volume', @vol_id, {:state=>:detaching, :detached_at=>nil})
317
+ # detach disk on guest os
318
+ @hv.detach_volume_from_guest(HvaContext.new(self))
319
+
320
+ # detach disk on host os
321
+ detach_volume_from_host
322
+ end
323
+
324
+ job :reboot, proc {
325
+ @inst_id = request.args[0]
326
+ @inst = rpc.request('hva-collector', 'get_instance', @inst_id)
327
+
328
+ # select_hypervisor :kvm, :lxc
329
+ select_hypervisor
330
+
331
+ # check interface
332
+ @bridge_if = check_interface
333
+
334
+ # reboot instance
335
+ @hv.reboot_instance(HvaContext.new(self))
336
+ }
337
+
338
+ def rpc
339
+ @rpc ||= Isono::NodeModules::RpcChannel.new(@node)
340
+ end
341
+
342
+ def jobreq
343
+ @jobreq ||= Isono::NodeModules::JobChannel.new(@node)
344
+ end
345
+
346
+ def event
347
+ @event ||= Isono::NodeModules::EventChannel.new(@node)
348
+ end
349
+ end
350
+
351
+ class HvaContext
352
+
353
+ def initialize(hvahandler)
354
+ raise "Invalid Class: #{hvahandler}" unless hvahandler.instance_of?(HvaHandler)
355
+ @hva = hvahandler
356
+ end
357
+
358
+ def node
359
+ @hva.instance_variable_get(:@node)
360
+ end
361
+
362
+ def inst_id
363
+ @hva.instance_variable_get(:@inst_id)
364
+ end
365
+
366
+ def inst
367
+ @hva.instance_variable_get(:@inst)
368
+ end
369
+
370
+ def os_devpath
371
+ @hva.instance_variable_get(:@os_devpath)
372
+ end
373
+
374
+ def bridge_if
375
+ @hva.instance_variable_get(:@bridge_if)
376
+ end
377
+
378
+ def vol
379
+ @hva.instance_variable_get(:@vol)
380
+ end
381
+
382
+ def inst_data_dir
383
+ File.expand_path("#{inst_id}", node.manifest.config.vm_data_dir)
384
+ end
385
+ end
386
+
387
+ end
388
+ end