wakame-vdc-agents 10.11.0
Sign up to get free protection for your applications and to get access to all the features.
- data/LICENSE +202 -0
- data/NOTICE +1 -0
- data/Rakefile +142 -0
- data/bin/hva +972 -0
- data/bin/nsa +147 -0
- data/bin/sta +182 -0
- data/config/hva.conf.example +10 -0
- data/config/initializers/isono.rb +43 -0
- data/config/initializers/passenger.rb +6 -0
- data/config/initializers/sequel.rb +21 -0
- data/config/nsa.conf.example +9 -0
- data/config/path_resolver.rb +12 -0
- data/lib/dcmgr.rb +115 -0
- data/lib/dcmgr/endpoints/core_api.rb +1004 -0
- data/lib/dcmgr/endpoints/core_api_mock.rb +816 -0
- data/lib/dcmgr/endpoints/errors.rb +55 -0
- data/lib/dcmgr/endpoints/metadata.rb +129 -0
- data/lib/dcmgr/logger.rb +44 -0
- data/lib/dcmgr/models/account.rb +104 -0
- data/lib/dcmgr/models/account_resource.rb +16 -0
- data/lib/dcmgr/models/base.rb +69 -0
- data/lib/dcmgr/models/base_new.rb +371 -0
- data/lib/dcmgr/models/frontend_system.rb +38 -0
- data/lib/dcmgr/models/host_pool.rb +102 -0
- data/lib/dcmgr/models/image.rb +46 -0
- data/lib/dcmgr/models/instance.rb +255 -0
- data/lib/dcmgr/models/instance_netfilter_group.rb +16 -0
- data/lib/dcmgr/models/instance_nic.rb +68 -0
- data/lib/dcmgr/models/instance_spec.rb +21 -0
- data/lib/dcmgr/models/ip_lease.rb +42 -0
- data/lib/dcmgr/models/netfilter_group.rb +88 -0
- data/lib/dcmgr/models/netfilter_rule.rb +21 -0
- data/lib/dcmgr/models/network.rb +32 -0
- data/lib/dcmgr/models/physical_host.rb +67 -0
- data/lib/dcmgr/models/request_log.rb +25 -0
- data/lib/dcmgr/models/ssh_key_pair.rb +55 -0
- data/lib/dcmgr/models/storage_pool.rb +134 -0
- data/lib/dcmgr/models/tag.rb +126 -0
- data/lib/dcmgr/models/tag_mapping.rb +28 -0
- data/lib/dcmgr/models/volume.rb +130 -0
- data/lib/dcmgr/models/volume_snapshot.rb +47 -0
- data/lib/dcmgr/node_modules/hva_collector.rb +134 -0
- data/lib/dcmgr/node_modules/sta_collector.rb +72 -0
- data/lib/dcmgr/scheduler.rb +12 -0
- data/lib/dcmgr/scheduler/find_last.rb +16 -0
- data/lib/dcmgr/scheduler/find_random.rb +16 -0
- data/lib/dcmgr/stm/instance.rb +25 -0
- data/lib/dcmgr/stm/snapshot_context.rb +33 -0
- data/lib/dcmgr/stm/volume_context.rb +65 -0
- data/lib/dcmgr/web/base.rb +21 -0
- data/lib/sinatra/accept_media_types.rb +128 -0
- data/lib/sinatra/lazy_auth.rb +56 -0
- data/lib/sinatra/rabbit.rb +278 -0
- data/lib/sinatra/respond_to.rb +272 -0
- data/lib/sinatra/sequel_transaction.rb +27 -0
- data/lib/sinatra/static_assets.rb +83 -0
- data/lib/sinatra/url_for.rb +44 -0
- metadata +270 -0
data/bin/nsa
ADDED
@@ -0,0 +1,147 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
#
|
4
|
+
# Naming Service Agent:
|
5
|
+
# This agent aims to configure DNS/DHCP daemons or devices to supply
|
6
|
+
# IP address and Hostname for Instances.
|
7
|
+
|
8
|
+
begin
|
9
|
+
require 'rubygems'
|
10
|
+
require 'bundler'
|
11
|
+
Bundler.setup(:default)
|
12
|
+
rescue Exception
|
13
|
+
end
|
14
|
+
|
15
|
+
require File.expand_path('../../config/path_resolver', __FILE__)
|
16
|
+
|
17
|
+
|
18
|
+
require 'eventmachine'
|
19
|
+
|
20
|
+
class SuperviseDnsmasq < Isono::NodeModules::Base
|
21
|
+
include Dcmgr::Logger
|
22
|
+
|
23
|
+
config_section do
|
24
|
+
desc "configuration file for dnsmasq dhcp"
|
25
|
+
dhcp_hosts_conf File.expand_path('dnsmasq-dhcp.conf', '/var/tmp/')
|
26
|
+
end
|
27
|
+
|
28
|
+
initialize_hook do
|
29
|
+
if manifest.config.network_name.nil?
|
30
|
+
abort("network_name is not set yet in nsa.conf")
|
31
|
+
end
|
32
|
+
|
33
|
+
opts = sprintf("-k --no-hosts --no-resolv --addn-hosts=%s --dhcp-hostsfile=%s --conf-file=%s",
|
34
|
+
config_section.dhcp_hosts_conf + ".hosts",
|
35
|
+
config_section.dhcp_hosts_conf + ".dhcp",
|
36
|
+
config_section.dhcp_hosts_conf
|
37
|
+
)
|
38
|
+
cmd = "#{manifest.config.dnsmasq_bin_path} #{opts}"
|
39
|
+
|
40
|
+
@dnsmasq_pid = fork {
|
41
|
+
Process.exec(cmd)
|
42
|
+
}
|
43
|
+
begin
|
44
|
+
if !Process.waitpid(@dnsmasq_pid, Process::WNOHANG).nil?
|
45
|
+
abort("dnsmasq is terminated unexpectedly")
|
46
|
+
end
|
47
|
+
rescue Errno::ECHILD
|
48
|
+
abort("Failed to exec dnsmasq process.")
|
49
|
+
end
|
50
|
+
|
51
|
+
myinstance.refresh_dnsmasq_conf
|
52
|
+
|
53
|
+
event = Isono::NodeModules::EventChannel.new(node)
|
54
|
+
|
55
|
+
event.subscribe('hva/instance_started', '#') do |args|
|
56
|
+
logger.info("refresh on instance_started: #{args.inspect}")
|
57
|
+
myinstance.refresh_dnsmasq_conf
|
58
|
+
end
|
59
|
+
|
60
|
+
event.subscribe('hva/instance_terminated', '#') do |args|
|
61
|
+
logger.info("refresh on instance_terminated: #{args.inspect}")
|
62
|
+
myinstance.refresh_dnsmasq_conf
|
63
|
+
end
|
64
|
+
end
|
65
|
+
|
66
|
+
terminate_hook do
|
67
|
+
system("/bin/kill #{@dnsmasq_pid}")
|
68
|
+
end
|
69
|
+
|
70
|
+
|
71
|
+
def refresh_dnsmasq_conf
|
72
|
+
EM.defer {
|
73
|
+
begin
|
74
|
+
generate_dhcp_conf()
|
75
|
+
system("/bin/kill -HUP #{@dnsmasq_pid}")
|
76
|
+
logger.info("refreshed dnsmasq conf")
|
77
|
+
rescue Exception => e
|
78
|
+
logger.error(e)
|
79
|
+
end
|
80
|
+
}
|
81
|
+
end
|
82
|
+
|
83
|
+
def generate_dhcp_conf
|
84
|
+
rpc = Isono::NodeModules::RpcChannel.new(node)
|
85
|
+
# load entier macaddr,ipaddr pairs for all instances from collector.
|
86
|
+
confdata = rpc.request('hva-collector', 'get_dhcp_conf', manifest.config.network_name)
|
87
|
+
|
88
|
+
require 'erb'
|
89
|
+
|
90
|
+
File.open(config_section.dhcp_hosts_conf, 'w') { |f|
|
91
|
+
f << ERB.new(<<'_EOS_', nil, '-').result(binding)
|
92
|
+
#interface=eth0
|
93
|
+
server=8.8.8.8
|
94
|
+
dhcp-range=<%= confdata[:ipv4_gw] %>,static,<%= confdata[:netmask] %>
|
95
|
+
dhcp-option=option:netmask,<%= confdata[:netmask] %>
|
96
|
+
dhcp-option=option:router,<%= confdata[:ipv4_gw] %>
|
97
|
+
dhcp-option=option:dns-server,<%= confdata[:dns_server] %>
|
98
|
+
dhcp-option=option:domain-name,<%= confdata[:domain_name] %>
|
99
|
+
#dhcp-option=option:domain-search,<%= confdata[:domain_name] %>
|
100
|
+
<%- confdata[:mac2addr].each { |i| -%>
|
101
|
+
#dhcp-host=<%= i[:mac_addr] %>,<%= i[:ipaddr] %>
|
102
|
+
<%- } -%>
|
103
|
+
<%- confdata[:addr2host].each { |i| -%>
|
104
|
+
#address=/<%= i[:hostname] %>/<%= i[:ipaddr] %>
|
105
|
+
<%- } -%>
|
106
|
+
_EOS_
|
107
|
+
}
|
108
|
+
|
109
|
+
File.open(config_section.dhcp_hosts_conf + ".dhcp", 'w') { |f|
|
110
|
+
f << ERB.new(<<'_EOS_', nil, '-').result(binding)
|
111
|
+
<%- confdata[:mac2addr].each { |i| -%>
|
112
|
+
<%= i[:mac_addr] %>,<%= i[:ipaddr] %>
|
113
|
+
<%- } -%>
|
114
|
+
_EOS_
|
115
|
+
}
|
116
|
+
|
117
|
+
File.open(config_section.dhcp_hosts_conf + ".hosts", 'w') { |f|
|
118
|
+
f << ERB.new(<<'_EOS_', nil, '-').result(binding)
|
119
|
+
<%- confdata[:addr2host].each { |i| -%>
|
120
|
+
<%= i[:ipaddr] %> <%= i[:hostname] %>
|
121
|
+
<%- } -%>
|
122
|
+
_EOS_
|
123
|
+
}
|
124
|
+
end
|
125
|
+
end
|
126
|
+
|
127
|
+
include Isono::Runner::RpcServer
|
128
|
+
|
129
|
+
manifest = DEFAULT_MANIFEST.dup
|
130
|
+
manifest.instance_eval do
|
131
|
+
node_name 'nsa'
|
132
|
+
node_instance_id "#{Isono::Util.default_gw_ipaddr}"
|
133
|
+
load_module Isono::NodeModules::NodeHeartbeat
|
134
|
+
load_module SuperviseDnsmasq
|
135
|
+
|
136
|
+
config do |c|
|
137
|
+
c.dnsmasq_bin_path = '/usr/sbin/dnsmasq'
|
138
|
+
c.network_name = nil
|
139
|
+
end
|
140
|
+
|
141
|
+
config_path File.expand_path('config/nsa.conf', app_root)
|
142
|
+
load_config
|
143
|
+
end
|
144
|
+
|
145
|
+
|
146
|
+
start(manifest) do
|
147
|
+
end
|
data/bin/sta
ADDED
@@ -0,0 +1,182 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
|
4
|
+
begin
|
5
|
+
require 'rubygems'
|
6
|
+
require 'bundler'
|
7
|
+
Bundler.setup(:default)
|
8
|
+
rescue Exception
|
9
|
+
end
|
10
|
+
|
11
|
+
require File.expand_path('../../config/path_resolver', __FILE__)
|
12
|
+
|
13
|
+
include Isono::Runner::RpcServer
|
14
|
+
|
15
|
+
class ZfsHandler < EndpointBuilder
|
16
|
+
include Dcmgr::Logger
|
17
|
+
|
18
|
+
job :create_volume do
|
19
|
+
volume_id = request.args[0]
|
20
|
+
job = Dcmgr::Stm::VolumeContext.new(volume_id)
|
21
|
+
data = rpc.request('sta-collector', 'get_volume', volume_id)
|
22
|
+
sdata = rpc.request('sta-collector', 'get_snapshot', data[:snapshot_id]) unless data[:snapshot_id].nil?
|
23
|
+
logger.info("creating new volume #{volume_id}")
|
24
|
+
raise "Invalid volume state: #{data[:state]}" unless data[:state].to_s == 'registering'
|
25
|
+
job.stm.state=data[:state].to_sym
|
26
|
+
job.stm.on_create
|
27
|
+
|
28
|
+
vol_path = "#{data[:storage_pool][:export_path]}/#{data[:export_path]}"
|
29
|
+
`zfs list #{File.dirname(vol_path)} > /dev/null 2>&1`
|
30
|
+
if $?.exitstatus != 0
|
31
|
+
# create parent filesystem
|
32
|
+
`zfs create -p #{File.dirname(vol_path)}`
|
33
|
+
logger.info("create parent filesystem: #{File.dirname(vol_path)}")
|
34
|
+
end
|
35
|
+
|
36
|
+
if sdata
|
37
|
+
# create volume from snapshot
|
38
|
+
v = `zfs receive #{vol_path} < #{data[:storage_pool][:snapshot_base_path]}/#{sdata[:account_id]}/#{sdata[:uuid]}.zsnap`
|
39
|
+
if $?.exitstatus != 0
|
40
|
+
raise "volume already exists: #{volume_id}"
|
41
|
+
end
|
42
|
+
|
43
|
+
v = `zfs destroy #{vol_path}@#{sdata[:uuid]}`
|
44
|
+
if $?.exitstatus != 0
|
45
|
+
raise "volume snapshot has not deleted: #{volume_id}@#{sdata[:uuid]}"
|
46
|
+
end
|
47
|
+
|
48
|
+
vl = `zfs list #{vol_path}`
|
49
|
+
if vl.nil? || $?.exitstatus != 0
|
50
|
+
raise "volume has not be created: #{volume_id}"
|
51
|
+
end
|
52
|
+
else
|
53
|
+
# create volume
|
54
|
+
v = `zfs create -p -V #{data[:size]}m #{vol_path}`
|
55
|
+
if $?.exitstatus != 0
|
56
|
+
raise "volume already exists: #{volume_id}"
|
57
|
+
end
|
58
|
+
vl = `zfs list #{vol_path}`
|
59
|
+
if vl.nil? || $?.exitstatus != 0
|
60
|
+
raise "volume has not be created: #{volume_id}"
|
61
|
+
end
|
62
|
+
end
|
63
|
+
|
64
|
+
rpc.request('sta-collector', 'update_volume', job.to_hash(:export_path=>data[:export_path]))
|
65
|
+
logger.info("created new volume: #{volume_id}")
|
66
|
+
|
67
|
+
job.stm.on_register
|
68
|
+
vr = `zfs shareiscsi=on #{data[:storage_pool][:export_path]}/#{data[:uuid]}`
|
69
|
+
if $?.exitstatus != 0
|
70
|
+
raise "failed iscsi target request: #{volume_id}"
|
71
|
+
end
|
72
|
+
il = `iscsitadm list target -v #{data[:storage_pool][:export_path]}/#{data[:uuid]}`
|
73
|
+
if $?.exitstatus != 0
|
74
|
+
raise "iscsi target has not be created #{volume_id}"
|
75
|
+
end
|
76
|
+
il = il.downcase.split("\n").select {|row| row.strip!}
|
77
|
+
# :transport_information => {:iqn => "iqn.1986-03.com.sun:02:787bca42-9639-44e4-f115-f5b06ed31817", :lun => 0}
|
78
|
+
opt = {:iqn => il[0].split(": ").last, :lun=>il[6].split(": ").last.to_i}
|
79
|
+
|
80
|
+
rpc.request('sta-collector', 'update_volume', job.to_hash(:transport_information=>opt))
|
81
|
+
logger.info("registered iscsi target: #{volume_id}")
|
82
|
+
end
|
83
|
+
|
84
|
+
job :delete_volume do
|
85
|
+
volume_id = request.args[0]
|
86
|
+
job = Dcmgr::Stm::VolumeContext.new(volume_id)
|
87
|
+
data = rpc.request('sta-collector', 'get_volume', volume_id)
|
88
|
+
logger.info("deleting volume: #{volume_id}")
|
89
|
+
raise "Invalid volume state: #{data[:state]}" unless data[:state].to_s == 'deregistering'
|
90
|
+
job.stm.state=data[:state].to_sym
|
91
|
+
|
92
|
+
# deregisterd iscsi target
|
93
|
+
job.stm.on_delete
|
94
|
+
vr = `zfs shareiscsi=off #{data[:storage_pool][:export_path]}/#{data[:export_path]}`
|
95
|
+
il = `iscsitadm list target #{data[:storage_pool][:export_path]}/#{data[:export_path]}`
|
96
|
+
unless il.empty?
|
97
|
+
raise "iscsi target has not deleted: #{volume_id} iqn: #{data[:transport_information][:iqn]}"
|
98
|
+
end
|
99
|
+
|
100
|
+
rpc.request('sta-collector', 'update_volume', job.to_hash)
|
101
|
+
logger.info("deregistered iscsi target: #{volume_id} iqn: #{data[:transport_information][:iqn]}")
|
102
|
+
|
103
|
+
# delete volume
|
104
|
+
job.stm.on_delete
|
105
|
+
job.on_delete
|
106
|
+
v = `zfs destroy #{data[:storage_pool][:export_path]}/#{data[:export_path]}`
|
107
|
+
vl = `zfs list #{data[:storage_pool][:export_path]}/#{data[:export_path]}`
|
108
|
+
unless vl.empty?
|
109
|
+
raise "volume has not deleted: #{volume_id}"
|
110
|
+
end
|
111
|
+
|
112
|
+
rpc.request('sta-collector', 'update_volume', job.to_hash)
|
113
|
+
logger.info("deleted volume: #{volume_id}")
|
114
|
+
end
|
115
|
+
|
116
|
+
job :create_snapshot do
|
117
|
+
snapshot_id = request.args[0]
|
118
|
+
job = Dcmgr::Stm::SnapshotContext.new(snapshot_id)
|
119
|
+
sdata = rpc.request('sta-collector', 'get_snapshot', snapshot_id) unless snapshot_id.nil?
|
120
|
+
data = rpc.request('sta-collector', 'get_volume', sdata[:origin_volume_id])
|
121
|
+
logger.info("create new snapshot: #{snapshot_id}")
|
122
|
+
job.stm.state=sdata[:state].to_sym
|
123
|
+
job.stm.on_create
|
124
|
+
|
125
|
+
vol_path = "#{data[:storage_pool][:export_path]}/#{data[:export_path]}"
|
126
|
+
snap_dir = "#{data[:storage_pool][:snapshot_base_path]}/#{sdata[:account_id]}"
|
127
|
+
unless File.exist?(snap_dir)
|
128
|
+
# create a directory to save snapshot
|
129
|
+
`mkdir -p #{snap_dir}`
|
130
|
+
logger.info("create a directory: #{snap_dir}")
|
131
|
+
end
|
132
|
+
snap = `zfs snapshot #{vol_path}@#{sdata[:uuid]}`
|
133
|
+
rpc.request('sta-collector', 'update_snapshot', job.to_hash)
|
134
|
+
logger.info("creating new snapshot: #{snapshot_id}")
|
135
|
+
|
136
|
+
job.stm.on_create
|
137
|
+
snap_send = `zfs send #{vol_path}@#{sdata[:uuid]} > #{snap_dir}/#{sdata[:uuid]}.zsnap`
|
138
|
+
snap_delete = `zfs destroy #{vol_path}@#{sdata[:uuid]}`
|
139
|
+
|
140
|
+
rpc.request('sta-collector', 'update_snapshot', job.to_hash)
|
141
|
+
logger.info("created new snapshot: #{snapshot_id}")
|
142
|
+
end
|
143
|
+
|
144
|
+
job :delete_snapshot do
|
145
|
+
snapshot_id = request.args[0]
|
146
|
+
job = Dcmgr::Stm::SnapshotContext.new(snapshot_id)
|
147
|
+
sdata = rpc.request('sta-collector', 'get_snapshot', snapshot_id) unless snapshot_id.nil?
|
148
|
+
data = rpc.request('sta-collector', 'get_volume', sdata[:origin_volume_id])
|
149
|
+
logger.info("deleting snapshot: #{snapshot_id}")
|
150
|
+
raise "Invalid snapshot state: #{sdata[:state]}" unless sdata[:state].to_s == 'deleting'
|
151
|
+
job.stm.state=sdata[:state].to_sym
|
152
|
+
job.stm.on_delete
|
153
|
+
|
154
|
+
snap_delete = `rm -rf #{data[:storage_pool][:snapshot_base_path]}/#{sdata[:account_id]}/#{sdata[:uuid]}.zsnap`
|
155
|
+
rpc.request('sta-collector', 'update_snapshot', job.to_hash)
|
156
|
+
logger.info("deleted snapshot: #{snapshot_id}")
|
157
|
+
end
|
158
|
+
|
159
|
+
def rpc
|
160
|
+
@rpc ||= Isono::NodeModules::RpcChannel.new(@node)
|
161
|
+
end
|
162
|
+
|
163
|
+
def jobreq
|
164
|
+
@jobreq ||= Isono::NodeModules::JobChannel.new(@node)
|
165
|
+
end
|
166
|
+
|
167
|
+
def event
|
168
|
+
@event ||= Isono::NodeModules::EventChannel.new(@node)
|
169
|
+
end
|
170
|
+
end
|
171
|
+
|
172
|
+
manifest = Isono::Runner::RpcServer::DEFAULT_MANIFEST.dup
|
173
|
+
manifest.instance_eval do
|
174
|
+
node_name 'sta'
|
175
|
+
node_instance_id "#{Isono::Util.default_gw_ipaddr}"
|
176
|
+
|
177
|
+
load_module Isono::NodeModules::NodeHeartbeat
|
178
|
+
end
|
179
|
+
|
180
|
+
start(manifest) do
|
181
|
+
endpoint "zfs-handle.#{@node.node_id}", ZfsHandler
|
182
|
+
end
|
@@ -0,0 +1,43 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
|
3
|
+
require 'isono'
|
4
|
+
require 'eventmachine'
|
5
|
+
|
6
|
+
Signal.trap('EXIT') { EventMachine.stop }
|
7
|
+
|
8
|
+
if defined?(PhusionPassenger)
|
9
|
+
if PhusionPassenger::VERSION_STRING =~ /^3\.0\./
|
10
|
+
blk = proc { |forked|
|
11
|
+
if EventMachine.reactor_running?
|
12
|
+
EventMachine.stop
|
13
|
+
Dcmgr.class_eval {
|
14
|
+
@messaging_client = nil
|
15
|
+
}
|
16
|
+
end
|
17
|
+
Thread.new { EventMachine.epoll; EventMachine.run; }
|
18
|
+
}
|
19
|
+
else
|
20
|
+
blk = proc {
|
21
|
+
if EventMachine.reactor_running?
|
22
|
+
EventMachine.stop
|
23
|
+
Dcmgr.class_eval {
|
24
|
+
@messaging_client = nil
|
25
|
+
}
|
26
|
+
end
|
27
|
+
Thread.new { EventMachine.epoll; EventMachine.run; }
|
28
|
+
}
|
29
|
+
end
|
30
|
+
PhusionPassenger.on_event(:starting_worker_process, &blk)
|
31
|
+
else
|
32
|
+
EventMachine.stop if EventMachine.reactor_running?
|
33
|
+
Thread.new { EventMachine.epoll; EventMachine.run; }
|
34
|
+
end
|
35
|
+
|
36
|
+
Dcmgr.class_eval {
|
37
|
+
def self.messaging
|
38
|
+
@messaging_client ||= Isono::MessagingClient.start(conf.amqp_server_uri) do
|
39
|
+
node_name 'dcmgr'
|
40
|
+
node_instance_id "#{Isono::Util.default_gw_ipaddr}:#{Process.pid}"
|
41
|
+
end
|
42
|
+
end
|
43
|
+
}
|
@@ -0,0 +1,21 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
|
3
|
+
require 'sequel'
|
4
|
+
db = Sequel.connect(Dcmgr.conf.database_url)
|
5
|
+
#require 'logger'
|
6
|
+
#db.loggers << Logger.new(STDOUT)
|
7
|
+
if db.is_a?(Sequel::MySQL::Database)
|
8
|
+
Sequel::MySQL.default_charset = 'utf8'
|
9
|
+
Sequel::MySQL.default_collate = 'utf8_general_ci'
|
10
|
+
Sequel::MySQL.default_engine = 'InnoDB'
|
11
|
+
|
12
|
+
db << "SET AUTOCOMMIT=0"
|
13
|
+
end
|
14
|
+
|
15
|
+
# Disable TEXT to Sequel::SQL::Blob translation.
|
16
|
+
# see the thread: MySQL text turning into blobs
|
17
|
+
# http://groups.google.com/group/sequel-talk/browse_thread/thread/d0f4c85abe9b3227/9ceaf291f90111e6
|
18
|
+
# lib/sequel/adapters/mysql.rb
|
19
|
+
[249, 250, 251, 252].each { |v|
|
20
|
+
Sequel::MySQL::MYSQL_TYPES.delete(v)
|
21
|
+
}
|
data/lib/dcmgr.rb
ADDED
@@ -0,0 +1,115 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
|
3
|
+
module Dcmgr
|
4
|
+
VERSION='10.11.0'
|
5
|
+
|
6
|
+
class << self
|
7
|
+
def conf
|
8
|
+
@conf
|
9
|
+
end
|
10
|
+
|
11
|
+
def configure(config_path=nil, &blk)
|
12
|
+
return self if @conf
|
13
|
+
|
14
|
+
if config_path.is_a?(String)
|
15
|
+
raise "Could not find configration file: #{config_path}" unless File.exists?(config_path)
|
16
|
+
|
17
|
+
require 'configuration'
|
18
|
+
code= <<-__END
|
19
|
+
Configuration('global') do
|
20
|
+
#{File.read(config_path)}
|
21
|
+
end
|
22
|
+
__END
|
23
|
+
@conf = eval(code)
|
24
|
+
else
|
25
|
+
@conf = Configuration.for('global', &blk)
|
26
|
+
end
|
27
|
+
|
28
|
+
self
|
29
|
+
end
|
30
|
+
|
31
|
+
def run_initializers()
|
32
|
+
raise "Complete the configuration prior to run_initializers()." if @conf.nil?
|
33
|
+
initializer_hooks.each { |n|
|
34
|
+
n.call
|
35
|
+
}
|
36
|
+
end
|
37
|
+
|
38
|
+
def initializer_hooks(&blk)
|
39
|
+
@initializer_hooks ||= []
|
40
|
+
if blk
|
41
|
+
@initializer_hooks << blk
|
42
|
+
end
|
43
|
+
@initializer_hooks
|
44
|
+
end
|
45
|
+
|
46
|
+
end
|
47
|
+
|
48
|
+
initializer_hooks {
|
49
|
+
Dcmgr.class_eval {
|
50
|
+
DCMGR_ROOT = ENV['DCMGR_ROOT'] || File.expand_path('../../', __FILE__)
|
51
|
+
}
|
52
|
+
}
|
53
|
+
|
54
|
+
# Add conf/initializers/*.rb loader
|
55
|
+
initializer_hooks {
|
56
|
+
initializers_root = File.expand_path('config/initializers', DCMGR_ROOT)
|
57
|
+
|
58
|
+
if File.directory?(initializers_root)
|
59
|
+
Dir.glob("#{initializers_root}/*.rb") { |f|
|
60
|
+
::Kernel.load(f)
|
61
|
+
}
|
62
|
+
end
|
63
|
+
}
|
64
|
+
|
65
|
+
autoload :Logger, 'dcmgr/logger'
|
66
|
+
|
67
|
+
module Models
|
68
|
+
autoload :Base, 'dcmgr/models/base'
|
69
|
+
|
70
|
+
CREATE_TABLE_CLASSES=[:Account,:Tag,:TagMapping,:FrontendSystem,
|
71
|
+
:Image,:HostPool,:RequestLog,:Instance,
|
72
|
+
:NetfilterGroup, :NetfilterRule,
|
73
|
+
:StoragePool,:Volume,:VolumeSnapshot,
|
74
|
+
:InstanceNetfilterGroup,
|
75
|
+
:InstanceSpec, :InstanceNic, :Network, :IpLease,
|
76
|
+
:SshKeyPair].freeze
|
77
|
+
autoload :BaseNew, 'dcmgr/models/base_new'
|
78
|
+
autoload :Account, 'dcmgr/models/account'
|
79
|
+
autoload :Tag, 'dcmgr/models/tag'
|
80
|
+
autoload :TagMapping, 'dcmgr/models/tag_mapping'
|
81
|
+
autoload :AccountResource, 'dcmgr/models/account_resource'
|
82
|
+
autoload :Instance, 'dcmgr/models/instance'
|
83
|
+
autoload :Image, 'dcmgr/models/image'
|
84
|
+
autoload :HostPool, 'dcmgr/models/host_pool'
|
85
|
+
autoload :RequestLog, 'dcmgr/models/request_log'
|
86
|
+
autoload :FrontendSystem, 'dcmgr/models/frontend_system'
|
87
|
+
autoload :StoragePool, 'dcmgr/models/storage_pool'
|
88
|
+
autoload :Volume, 'dcmgr/models/volume'
|
89
|
+
autoload :VolumeSnapshot, 'dcmgr/models/volume_snapshot'
|
90
|
+
autoload :NetfilterGroup, 'dcmgr/models/netfilter_group'
|
91
|
+
autoload :NetfilterRule, 'dcmgr/models/netfilter_rule'
|
92
|
+
autoload :InstanceSpec, 'dcmgr/models/instance_spec'
|
93
|
+
autoload :InstanceNic, 'dcmgr/models/instance_nic'
|
94
|
+
autoload :Network, 'dcmgr/models/network'
|
95
|
+
autoload :IpLease, 'dcmgr/models/ip_lease'
|
96
|
+
autoload :InstanceNetfilterGroup, 'dcmgr/models/instance_netfilter_group'
|
97
|
+
autoload :SshKeyPair, 'dcmgr/models/ssh_key_pair'
|
98
|
+
end
|
99
|
+
|
100
|
+
module Endpoints
|
101
|
+
autoload :CoreAPI, 'dcmgr/endpoints/core_api'
|
102
|
+
autoload :Metadata, 'dcmgr/endpoints/metadata'
|
103
|
+
end
|
104
|
+
|
105
|
+
module NodeModules
|
106
|
+
autoload :StaCollector, 'dcmgr/node_modules/sta_collector'
|
107
|
+
autoload :HvaCollector, 'dcmgr/node_modules/hva_collector'
|
108
|
+
end
|
109
|
+
|
110
|
+
module Stm
|
111
|
+
autoload :VolumeContext, 'dcmgr/stm/volume_context'
|
112
|
+
autoload :SnapshotContext, 'dcmgr/stm/snapshot_context'
|
113
|
+
autoload :Instance, 'dcmgr/stm/instance'
|
114
|
+
end
|
115
|
+
end
|