dopv 0.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +23 -0
- data/.rspec +2 -0
- data/ChangeLog.md +456 -0
- data/Gemfile +6 -0
- data/Gemfile.lock +260 -0
- data/Guardfile +22 -0
- data/LICENSE.txt +177 -0
- data/README.md +214 -0
- data/Rakefile +6 -0
- data/bin/dopv +4 -0
- data/dopv.gemspec +52 -0
- data/lib/dopv.rb +166 -0
- data/lib/dopv/cli.rb +54 -0
- data/lib/dopv/cli/command_add.rb +37 -0
- data/lib/dopv/cli/command_export.rb +26 -0
- data/lib/dopv/cli/command_import.rb +32 -0
- data/lib/dopv/cli/command_list.rb +18 -0
- data/lib/dopv/cli/command_remove.rb +29 -0
- data/lib/dopv/cli/command_run.rb +38 -0
- data/lib/dopv/cli/command_update.rb +35 -0
- data/lib/dopv/cli/command_validate.rb +30 -0
- data/lib/dopv/infrastructure.rb +40 -0
- data/lib/dopv/infrastructure/providers/baremetal.rb +12 -0
- data/lib/dopv/infrastructure/providers/base.rb +422 -0
- data/lib/dopv/infrastructure/providers/openstack.rb +308 -0
- data/lib/dopv/infrastructure/providers/ovirt.rb +228 -0
- data/lib/dopv/infrastructure/providers/vsphere.rb +322 -0
- data/lib/dopv/log.rb +14 -0
- data/lib/dopv/persistent_disk.rb +128 -0
- data/lib/dopv/plan.rb +17 -0
- data/lib/dopv/state_store.rb +87 -0
- data/lib/dopv/version.rb +3 -0
- data/spec/data/hooks/test_hook_script_1 +9 -0
- data/spec/data/hooks/test_hook_script_2 +10 -0
- data/spec/data/plans/test-plan-1.yaml +140 -0
- data/spec/spec_helper.rb +112 -0
- data/spec/unit/dopv/dopv_spec.rb +7 -0
- data/spec/unit/dopv/persistent_disk_spec.rb +38 -0
- data/spec/unit/dopv/plan_spec.rb +34 -0
- data/spec/unit/dopv/version_spec.rb +17 -0
- metadata +401 -0
@@ -0,0 +1,37 @@
|
|
1
|
+
module Dopv
|
2
|
+
module Cli
|
3
|
+
|
4
|
+
def self.command_add(base)
|
5
|
+
base.class_eval do
|
6
|
+
|
7
|
+
desc 'Add a new plan file to the plan store'
|
8
|
+
arg_name 'plan_file'
|
9
|
+
|
10
|
+
command :add do |c|
|
11
|
+
c.desc 'Update the plan if it already exists in plan store'
|
12
|
+
c.switch [:update, :u], :negatable => false
|
13
|
+
|
14
|
+
c.action do |global_options, options, args|
|
15
|
+
help_now!('Add takes exactly one argument, a plan file.') if
|
16
|
+
args.empty? || args.length != 1
|
17
|
+
|
18
|
+
plan_file = args[0]
|
19
|
+
|
20
|
+
exit_now!("The plan file #{plan_file} must be a readable file.") unless
|
21
|
+
File.file?(plan_file) && File.readable?(plan_file)
|
22
|
+
|
23
|
+
begin
|
24
|
+
puts Dopv.add(plan_file)
|
25
|
+
rescue DopCommon::PlanExistsError => e
|
26
|
+
if options[:update]
|
27
|
+
puts Dopv.update_plan(plan_file, {})
|
28
|
+
else
|
29
|
+
raise "#{e}, please use 'dopv update' first, or use -u|--update flag to add this plan forcibly."
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
@@ -0,0 +1,26 @@
|
|
1
|
+
module Dopv
|
2
|
+
module Cli
|
3
|
+
|
4
|
+
def self.command_export(base)
|
5
|
+
base.class_eval do
|
6
|
+
|
7
|
+
desc 'Export the internal data disks state into a local file'
|
8
|
+
arg_name 'plan_name data_disks_file'
|
9
|
+
command :export do |c|
|
10
|
+
c.action do |global_options, options, args|
|
11
|
+
help_now!('Export takes exactly two arguments, plan name and data disks file.') if
|
12
|
+
args.empty? || args.length != 2
|
13
|
+
|
14
|
+
plan_name, data_disks_file = args
|
15
|
+
data_disks_dir = File.dirname(data_disks_file)
|
16
|
+
|
17
|
+
exit_now!("The #{data_disks_dir} must be a directory writable by the process.") unless
|
18
|
+
File.directory?(data_disks_dir) && File.writable?(data_disks_dir)
|
19
|
+
|
20
|
+
Dopv.export_state_file(plan_name, data_disks_file)
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
@@ -0,0 +1,32 @@
|
|
1
|
+
module Dopv
|
2
|
+
module Cli
|
3
|
+
|
4
|
+
def self.command_import(base)
|
5
|
+
base.class_eval do
|
6
|
+
|
7
|
+
desc 'Import data disks from a file into internal state store of the given plan'
|
8
|
+
arg_name 'plan_name data_disks_file'
|
9
|
+
command :import do |c|
|
10
|
+
c.desc 'Force plan import'
|
11
|
+
c.switch [:f, :force], :negatable => false
|
12
|
+
|
13
|
+
c.action do |global_options, options, args|
|
14
|
+
help_now!('Import takes exactly two arguments, a plan name and data disks file.') if
|
15
|
+
args.empty? || args.length != 2
|
16
|
+
|
17
|
+
plan_name, data_disks_file = args
|
18
|
+
|
19
|
+
exit_now!("The #{data_disks_file} must be a readable file.") unless
|
20
|
+
File.file?(data_disks_file) && File.readable?(data_disks_file)
|
21
|
+
|
22
|
+
if !Dopv.export_state(plan_name).empty? && !options[:force]
|
23
|
+
exit_now!("The internal plan's state is not empty, please use the '-f|--force' flag to overwrite.")
|
24
|
+
end
|
25
|
+
|
26
|
+
Dopv.import_state_file(plan_name, data_disks_file)
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
@@ -0,0 +1,18 @@
|
|
1
|
+
module Dopv
|
2
|
+
module Cli
|
3
|
+
|
4
|
+
def self.command_list(base)
|
5
|
+
base.class_eval do
|
6
|
+
|
7
|
+
desc 'List plans stored in the plan store'
|
8
|
+
|
9
|
+
command :list do |c|
|
10
|
+
c.action do |global_options,options,args|
|
11
|
+
puts Dopv.list
|
12
|
+
end
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|
17
|
+
end
|
18
|
+
|
@@ -0,0 +1,29 @@
|
|
1
|
+
module Dopv
|
2
|
+
module Cli
|
3
|
+
|
4
|
+
def self.command_remove(base)
|
5
|
+
base.class_eval do
|
6
|
+
|
7
|
+
desc 'Remove existing plan from the plan store'
|
8
|
+
arg_name 'plan_name'
|
9
|
+
|
10
|
+
command :remove do |c|
|
11
|
+
c.desc 'Keep the DOPi state file'
|
12
|
+
c.switch [:k, :keep_dopi_state], :negatable => false
|
13
|
+
|
14
|
+
c.desc 'Remove the DOPv state file (THIS REMOVES THE DISK STATE!)'
|
15
|
+
c.switch [:r, :remove_dopv_state], :negatable => false
|
16
|
+
|
17
|
+
c.action do |global_options, options, args|
|
18
|
+
help_now!('Remove take exactly one argument, a plan name.') if
|
19
|
+
args.empty? || args.length != 1
|
20
|
+
|
21
|
+
plan_name = args[0]
|
22
|
+
|
23
|
+
Dopv.remove(plan_name, !options[:keep_dopi_state], options[:remove_dopv_state])
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
@@ -0,0 +1,38 @@
|
|
1
|
+
module Dopv
|
2
|
+
module Cli
|
3
|
+
|
4
|
+
def self.command_run(base, action)
|
5
|
+
base.class_eval do
|
6
|
+
|
7
|
+
desc "#{action.capitalize} a plan."
|
8
|
+
arg_name 'plan_name'
|
9
|
+
|
10
|
+
command action do |c|
|
11
|
+
if action == :undeploy
|
12
|
+
c.desc 'Remove data disks from the state and cloud provider.'
|
13
|
+
c.switch [:rmdisk, :r], :default_value => false
|
14
|
+
end
|
15
|
+
|
16
|
+
DopCommon::Cli.node_select_options(c)
|
17
|
+
|
18
|
+
c.action do |global_options, options, args|
|
19
|
+
options[:run_for_nodes] = DopCommon::Cli.parse_node_select_options(options)
|
20
|
+
|
21
|
+
help_now!("#{action.capitalize} takes exactly one argument, a plan name.") if
|
22
|
+
args.empty? || args.length > 1
|
23
|
+
|
24
|
+
plan_name = args[0]
|
25
|
+
|
26
|
+
begin
|
27
|
+
case action
|
28
|
+
when :deploy then Dopv.deploy(plan_name, options)
|
29
|
+
when :undeploy then Dopv.undeploy(plan_name, options)
|
30
|
+
when :refresh then Dopv.refresh(plan_name, options)
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
@@ -0,0 +1,35 @@
|
|
1
|
+
module Dopv
|
2
|
+
module Cli
|
3
|
+
|
4
|
+
def self.command_update(base)
|
5
|
+
base.class_eval do
|
6
|
+
|
7
|
+
desc 'Update the plan and/or the plan state for a given plan yaml or plan name.'
|
8
|
+
arg_name 'plan_file_or_name'
|
9
|
+
|
10
|
+
command :update do |c|
|
11
|
+
c.desc 'Remove the existing disk information and start with a clean state.'
|
12
|
+
c.switch [:clear, :c], :default_value => false
|
13
|
+
|
14
|
+
c.desc 'Ignore the update and set the state version to the latest version.'
|
15
|
+
c.switch [:ignore, :i], :default_value => false
|
16
|
+
|
17
|
+
c.action do |global_options, options, args|
|
18
|
+
help_now!('Update takes exactly one argument, the plan name or file.') if
|
19
|
+
args.empty? || args.length != 1
|
20
|
+
|
21
|
+
plan = args[0]
|
22
|
+
|
23
|
+
if Dopv.list.include?(plan)
|
24
|
+
Dopv.update_state(plan, options)
|
25
|
+
elsif File.file?(plan) && File.readable?(plan)
|
26
|
+
Dopv.update_plan(plan, options)
|
27
|
+
else
|
28
|
+
exit_now!("No such plan '#{plan}' in the store or the plan file doesn't exist or is unreadable.")
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
@@ -0,0 +1,30 @@
|
|
1
|
+
module Dopv
|
2
|
+
module Cli
|
3
|
+
|
4
|
+
def self.command_validate(base)
|
5
|
+
base.class_eval do
|
6
|
+
|
7
|
+
desc 'Validate a plan file.'
|
8
|
+
arg_name 'plan_file'
|
9
|
+
|
10
|
+
command :validate do |c|
|
11
|
+
c.action do |global_options, options, args|
|
12
|
+
help_now!('Validate takes excatly one argument, a plan file') if
|
13
|
+
args.empty? || args.length != 1
|
14
|
+
|
15
|
+
plan_file = args[0]
|
16
|
+
|
17
|
+
exit_now!("The #{plan_file} must exist and be a readable file") unless
|
18
|
+
File.file?(plan_file) && File.readable?(plan_file)
|
19
|
+
|
20
|
+
if Dopv.valid?(plan_file)
|
21
|
+
puts('Plan is valid.')
|
22
|
+
else
|
23
|
+
exit_now!('Plan is NOT valid!')
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
@@ -0,0 +1,40 @@
|
|
1
|
+
require 'dopv/infrastructure/providers/base'
|
2
|
+
|
3
|
+
module Dopv
|
4
|
+
module Infrastructure
|
5
|
+
TMP = '/tmp'
|
6
|
+
|
7
|
+
PROVIDER_BASE = 'dopv/infrastructure/providers'
|
8
|
+
|
9
|
+
PROVIDER_CLASSES = {
|
10
|
+
:ovirt => 'Ovirt',
|
11
|
+
:rhev => 'Ovirt',
|
12
|
+
:openstack => 'OpenStack',
|
13
|
+
:vsphere => 'Vsphere',
|
14
|
+
:vmware => 'Vsphere',
|
15
|
+
:baremetal => 'BareMetal'
|
16
|
+
}
|
17
|
+
|
18
|
+
def self.load_provider(provider)
|
19
|
+
require "#{PROVIDER_BASE}/#{PROVIDER_CLASSES[provider].downcase}"
|
20
|
+
klass_name = "Dopv::Infrastructure::#{PROVIDER_CLASSES[provider]}"
|
21
|
+
klass_name.split('::').inject(Object) { |res, i| res.const_get(i) }
|
22
|
+
end
|
23
|
+
|
24
|
+
def self.bootstrap_node(plan, state_store)
|
25
|
+
provider = load_provider(plan.infrastructure.provider)
|
26
|
+
provider.bootstrap_node(plan, state_store)
|
27
|
+
end
|
28
|
+
|
29
|
+
def self.destroy_node(plan, state_store, destroy_data_volumes=false)
|
30
|
+
provider = load_provider(plan.infrastructure.provider)
|
31
|
+
provider.destroy_node(plan, state_store, destroy_data_volumes)
|
32
|
+
end
|
33
|
+
|
34
|
+
def self.refresh_node(plan, state_store)
|
35
|
+
provider = load_provider(plan.infrastructure.provider)
|
36
|
+
provider.refresh_node(plan, state_store)
|
37
|
+
end
|
38
|
+
|
39
|
+
end
|
40
|
+
end
|
@@ -0,0 +1,422 @@
|
|
1
|
+
require 'forwardable'
|
2
|
+
require 'uri'
|
3
|
+
require 'fog'
|
4
|
+
require 'open3'
|
5
|
+
require 'dop_common/utils'
|
6
|
+
|
7
|
+
module Dopv
|
8
|
+
module Infrastructure
|
9
|
+
class ProviderError < StandardError
|
10
|
+
def exit_code
|
11
|
+
4
|
12
|
+
end
|
13
|
+
end
|
14
|
+
|
15
|
+
class Base
|
16
|
+
extend Forwardable
|
17
|
+
include DopCommon::Utils
|
18
|
+
|
19
|
+
MAX_RETRIES = 5
|
20
|
+
|
21
|
+
attr_reader :data_disks_db
|
22
|
+
def_delegators :@plan, :nodename, :fqdn, :hostname, :domainname, :dns
|
23
|
+
def_delegators :@plan, :timezone
|
24
|
+
def_delegators :@plan, :full_clone?, :image, :cores, :memory, :storage, :flavor
|
25
|
+
def_delegators :@plan, :infrastructure, :infrastructure_properties
|
26
|
+
def_delegator :@plan, :interfaces, :interfaces_config
|
27
|
+
def_delegator :@plan, :data_disks, :volumes_config
|
28
|
+
def_delegators :@plan, :credentials
|
29
|
+
def_delegators :@plan, :hooks
|
30
|
+
|
31
|
+
def self.bootstrap_node(plan, state_store)
|
32
|
+
new(plan, state_store).bootstrap_node
|
33
|
+
end
|
34
|
+
|
35
|
+
def self.destroy_node(plan, state_store, destroy_data_volumes=false)
|
36
|
+
new(plan, state_store).destroy_node(destroy_data_volumes)
|
37
|
+
end
|
38
|
+
|
39
|
+
def self.refresh_node(plan, state_store)
|
40
|
+
new(plan, state_store).refresh_node
|
41
|
+
end
|
42
|
+
|
43
|
+
def initialize(plan, state_store)
|
44
|
+
@compute_provider = nil
|
45
|
+
@plan = plan
|
46
|
+
@state_store = state_store
|
47
|
+
@data_disks_db = Dopv::PersistentDisk::DB.new(state_store, nodename)
|
48
|
+
end
|
49
|
+
|
50
|
+
def bootstrap_node
|
51
|
+
begin
|
52
|
+
unless get_node_instance
|
53
|
+
execute_hook(:pre_create_vm, true)
|
54
|
+
node_instance = create_node_instance
|
55
|
+
add_node_nics(node_instance)
|
56
|
+
add_node_data_volumes(node_instance)
|
57
|
+
add_node_affinities(node_instance)
|
58
|
+
start_node_instance(node_instance)
|
59
|
+
execute_hook(:post_create_vm, true)
|
60
|
+
record_node_instance(node_instance)
|
61
|
+
else
|
62
|
+
::Dopv::log.warn("Node #{nodename}: Already exists.")
|
63
|
+
# TODO: Ask Marcel what would be a purpose/use case of this
|
64
|
+
execute_hook(:pre_create_vm, false)
|
65
|
+
execute_hook(:post_create_vm, false)
|
66
|
+
end
|
67
|
+
rescue Exception => e
|
68
|
+
::Dopv::log.error("Node #{nodename}: #{e}")
|
69
|
+
destroy_node_instance(node_instance)
|
70
|
+
raise ProviderError, "Node #{nodename}: #{e}."
|
71
|
+
end
|
72
|
+
end
|
73
|
+
|
74
|
+
def destroy_node(destroy_data_volumes=false)
|
75
|
+
node_instance = get_node_instance
|
76
|
+
if node_instance
|
77
|
+
execute_hook(:pre_destroy_vm, true)
|
78
|
+
destroy_node_instance(node_instance, destroy_data_volumes)
|
79
|
+
execute_hook(:post_destroy_vm, true)
|
80
|
+
erase_node_instance(node_instance)
|
81
|
+
else
|
82
|
+
# TODO: Ask Marcel what would be a purpose/use case of this
|
83
|
+
execute_hook(:pre_destroy_vm, false)
|
84
|
+
execute_hook(:post_destroy_vm, false)
|
85
|
+
end
|
86
|
+
end
|
87
|
+
|
88
|
+
def refresh_node
|
89
|
+
node_instance = get_node_instance
|
90
|
+
if node_instance
|
91
|
+
record_node_instance(node_instance)
|
92
|
+
else
|
93
|
+
erase_node_instance(node_instance)
|
94
|
+
end
|
95
|
+
end
|
96
|
+
|
97
|
+
private
|
98
|
+
|
99
|
+
def provider_username
|
100
|
+
@provider_username ||= infrastructure.credentials.username
|
101
|
+
end
|
102
|
+
|
103
|
+
def provider_password
|
104
|
+
@provider_passowrd ||= infrastructure.credentials.password
|
105
|
+
end
|
106
|
+
|
107
|
+
def provider_url
|
108
|
+
@provider_url ||= infrastructure.endpoint.to_s
|
109
|
+
end
|
110
|
+
|
111
|
+
def provider_host
|
112
|
+
@provider_host ||= infrastructure.endpoint.host
|
113
|
+
end
|
114
|
+
|
115
|
+
def provider_port
|
116
|
+
@provider_port ||= infrastructure.endpoint.port
|
117
|
+
end
|
118
|
+
|
119
|
+
def provider_scheme
|
120
|
+
@provider_scheme ||= infrastructure.endpoint.scheme
|
121
|
+
end
|
122
|
+
|
123
|
+
def provider_ssl?
|
124
|
+
provider_scheme == 'https'
|
125
|
+
end
|
126
|
+
|
127
|
+
def root_password
|
128
|
+
cred = credentials.find { |c| c.type == :username_password && c.username == 'root' } if
|
129
|
+
@root_password.nil?
|
130
|
+
@root_password ||= cred.nil? ? nil : cred.password
|
131
|
+
end
|
132
|
+
|
133
|
+
def root_ssh_pubkeys
|
134
|
+
cred = credentials.find_all { |c| c.type == :ssh_key && c.username == 'root' } if
|
135
|
+
@root_ssh_pubkeys.nil?
|
136
|
+
@root_ssh_pubkey ||= cred.empty? ? [] : cred.collect { |k| k.public_key }.uniq
|
137
|
+
end
|
138
|
+
|
139
|
+
def administrator_password
|
140
|
+
cred = credentials.find { |c| c.type == :username_password && c.username == 'Administrator' } if
|
141
|
+
@administrator_password.nil?
|
142
|
+
@administrator_password ||= cred.nil? ? nil : cred.password
|
143
|
+
end
|
144
|
+
|
145
|
+
def administrator_fullname
|
146
|
+
'Administrator'
|
147
|
+
end
|
148
|
+
|
149
|
+
def keep_ha?
|
150
|
+
@keep_ha ||= infrastructure_properties.keep_ha?
|
151
|
+
end
|
152
|
+
|
153
|
+
def compute_provider
|
154
|
+
Dopv::log.info("Node #{nodename}: Creating compute provider.") unless @compute_provider
|
155
|
+
@compute_provider ||= @compute_connection_opts ? ::Fog::Compute.new(@compute_connection_opts) : nil
|
156
|
+
end
|
157
|
+
|
158
|
+
def datacenter(filters={})
|
159
|
+
@datacenter ||= compute_provider.datacenters(filters).find do |d|
|
160
|
+
if d.is_a?(Hash) && d.has_key?(:name)
|
161
|
+
d[:name] == infrastructure_properties.datacenter
|
162
|
+
elsif d.respond_to?(:name)
|
163
|
+
d.name == infrastructure_properties.datacenter
|
164
|
+
else
|
165
|
+
raise ProviderError, "Unsupported datacenter class #{d.class}"
|
166
|
+
end
|
167
|
+
end
|
168
|
+
raise ProviderError, "No such data center #{infrastructure_properties.datacenter}" unless @datacenter
|
169
|
+
@datacenter
|
170
|
+
end
|
171
|
+
|
172
|
+
def cluster(filters={})
|
173
|
+
@cluster ||= compute_provider.clusters(filters).find { |c| c.name == infrastructure_properties.cluster }
|
174
|
+
raise ProviderError, "No such cluster #{infrastructure_properties.cluster}" unless @cluster
|
175
|
+
@cluster
|
176
|
+
end
|
177
|
+
|
178
|
+
def template(filters={})
|
179
|
+
raise ProviderError, "No template defined" unless image
|
180
|
+
@template ||= if compute_provider.respond_to?(:templates)
|
181
|
+
compute_provider.templates.all(filters).find { |t| t.name == image }
|
182
|
+
elsif compute_provider.respond_to?(:images)
|
183
|
+
compute_provider.images.all(filters).find { |t| t.name == image }
|
184
|
+
else
|
185
|
+
raise ProviderError, "The provider does not to have template/image collection"
|
186
|
+
end
|
187
|
+
raise ProviderError, "No such template #{image}" unless @template
|
188
|
+
@template
|
189
|
+
end
|
190
|
+
|
191
|
+
def get_node_instance(filters = {})
|
192
|
+
retries = 0
|
193
|
+
compute_provider.servers.all(filters).find { |n| n.name == nodename }
|
194
|
+
rescue => e
|
195
|
+
errmsg = "Node #{nodename}: An error occured while searching for a node: #{e}."
|
196
|
+
retries += 1
|
197
|
+
if retries <= MAX_RETRIES
|
198
|
+
Dopv.log.warn("#{errmsg} Retrying (##{retries}).")
|
199
|
+
sleep 1
|
200
|
+
retry
|
201
|
+
else
|
202
|
+
raise ProviderError, "#{errmsg}. Bailing out"
|
203
|
+
end
|
204
|
+
end
|
205
|
+
|
206
|
+
def node_instance_ready?(node_instance)
|
207
|
+
node_instance.ready?
|
208
|
+
end
|
209
|
+
|
210
|
+
def node_instance_stopped?(node_instance)
|
211
|
+
node_instance.stopped?
|
212
|
+
end
|
213
|
+
|
214
|
+
def wait_for_task_completion(node_instance)
|
215
|
+
end
|
216
|
+
|
217
|
+
def create_node_instance
|
218
|
+
Dopv::log.info("Node #{nodename}: Creating node instance.")
|
219
|
+
node_instance = compute_provider.servers.create(@node_creation_opts)
|
220
|
+
wait_for_task_completion(node_instance)
|
221
|
+
node_instance
|
222
|
+
end
|
223
|
+
|
224
|
+
def destroy_node_instance(node_instance, destroy_data_volumes=false)
|
225
|
+
if node_instance
|
226
|
+
stop_node_instance(node_instance)
|
227
|
+
|
228
|
+
volumes = data_disks_db.volumes
|
229
|
+
volumes.each do |v|
|
230
|
+
if destroy_data_volumes
|
231
|
+
::Dopv::log.warn("Node #{nodename} Destroying data volume #{v.name}.")
|
232
|
+
begin
|
233
|
+
destroy_node_volume(node_instance, v)
|
234
|
+
rescue
|
235
|
+
::Dopv::log.error("Could not destroy data volume #{v.name}. Please fix manually.")
|
236
|
+
end
|
237
|
+
erase_node_data_volume(v)
|
238
|
+
else
|
239
|
+
::Dopv::log.debug("Node #{nodename} Detaching data volume #{v.name}.")
|
240
|
+
begin
|
241
|
+
detach_node_volume(node_instance, v)
|
242
|
+
rescue
|
243
|
+
::Dopv::log.warn("Could not detach data volume #{v.name}.")
|
244
|
+
end
|
245
|
+
end
|
246
|
+
end
|
247
|
+
|
248
|
+
::Dopv::log.warn("Node #{nodename}: Destroying node.")
|
249
|
+
node_instance.destroy rescue nil
|
250
|
+
end
|
251
|
+
end
|
252
|
+
|
253
|
+
def reload_node_instance(node_instance)
|
254
|
+
node_instance.reload
|
255
|
+
end
|
256
|
+
|
257
|
+
def customize_node_instance(node_instance)
|
258
|
+
end
|
259
|
+
|
260
|
+
def start_node_instance(node_instance)
|
261
|
+
stop_node_instance(node_instance)
|
262
|
+
::Dopv::log.info("Node #{nodename}: Starting node.")
|
263
|
+
customize_node_instance(node_instance)
|
264
|
+
end
|
265
|
+
|
266
|
+
def stop_node_instance(node_instance)
|
267
|
+
reload_node_instance(node_instance)
|
268
|
+
unless node_instance_stopped?(node_instance)
|
269
|
+
::Dopv::log.info("Node #{nodename}: Stopping node.")
|
270
|
+
wait_for_task_completion(node_instance)
|
271
|
+
node_instance.stop
|
272
|
+
reload_node_instance(node_instance)
|
273
|
+
end
|
274
|
+
end
|
275
|
+
|
276
|
+
def add_node_nic(node_instance, attrs)
|
277
|
+
nic = node_instance.interfaces.create(attrs)
|
278
|
+
node_instance.interfaces.reload
|
279
|
+
nic
|
280
|
+
end
|
281
|
+
|
282
|
+
def update_node_nic(node_instance, nic, attrs)
|
283
|
+
nic.save(attrs)
|
284
|
+
node_instance.interfaces.reload
|
285
|
+
end
|
286
|
+
|
287
|
+
def add_node_nics(node_instance)
|
288
|
+
end
|
289
|
+
|
290
|
+
def remove_node_nics(node_instance)
|
291
|
+
Dopv::log.debug("Node #{nodename}: Removing (possible) network interfaces defined by template.")
|
292
|
+
if block_given?
|
293
|
+
node_instance.interfaces.each { |nic| yield(node_instance, nic) }
|
294
|
+
else
|
295
|
+
node_instance.interfaces.each(&:destroy) rescue nil
|
296
|
+
end
|
297
|
+
node_instance.interfaces.reload
|
298
|
+
end
|
299
|
+
|
300
|
+
def add_node_affinity(node_instance, affinity)
|
301
|
+
end
|
302
|
+
|
303
|
+
def remove_node_affinity(node_instance, affinity)
|
304
|
+
end
|
305
|
+
|
306
|
+
def add_node_volume(node_instance, config)
|
307
|
+
node_instance.volumes.create(config)
|
308
|
+
end
|
309
|
+
|
310
|
+
def update_node_volume(node_instance, volume, attrs)
|
311
|
+
node_instance.update_volume(attrs.merge(:id => volume.id))
|
312
|
+
wait_for_task_completion(node_instance)
|
313
|
+
node_instance.volumes.reload
|
314
|
+
volume
|
315
|
+
end
|
316
|
+
|
317
|
+
def destroy_node_volume(node_instance, volume)
|
318
|
+
end
|
319
|
+
|
320
|
+
def attach_node_volume(node_instance, volume)
|
321
|
+
end
|
322
|
+
|
323
|
+
def detach_node_volume(node_instance, volume)
|
324
|
+
end
|
325
|
+
|
326
|
+
def add_node_data_volumes(node_instance)
|
327
|
+
::Dopv::log.info("Node #{nodename}: Adding data volumes.")
|
328
|
+
|
329
|
+
::Dopv::log.debug("Node #{nodename}: Loading data volumes DB.")
|
330
|
+
data_volumes = data_disks_db.volumes
|
331
|
+
|
332
|
+
# Check if persistent disks DB is consistent
|
333
|
+
::Dopv::log.debug("Node #{nodename}: Checking data volumes DB integrity.")
|
334
|
+
data_volumes.each do |dv|
|
335
|
+
# Disk exists in state DB but not in plan
|
336
|
+
unless volumes_config.find { |cv| dv.name == cv.name }
|
337
|
+
err_msg = "Inconsistent data volumes DB: Volume #{dv.name} exists in DB but not in plan"
|
338
|
+
raise ProviderError, err_msg
|
339
|
+
end
|
340
|
+
end
|
341
|
+
volumes_config.each do |cv|
|
342
|
+
# Disk exists in a plan but it is not recorded in the state DB for a
|
343
|
+
# given node
|
344
|
+
if !data_volumes.empty? && !data_volumes.find { |dv| cv.name == dv.name }
|
345
|
+
::Dopv::log.warn("Node #{nodename}: Data volume #{cv.name} exists in plan but not in DB.")
|
346
|
+
end
|
347
|
+
end
|
348
|
+
|
349
|
+
# Attach all persistent disks
|
350
|
+
data_volumes.each do |dv|
|
351
|
+
::Dopv::log.debug("Node #{nodename}: Attaching data volume #{dv.name} [#{dv.id}].")
|
352
|
+
begin
|
353
|
+
attach_node_volume(node_instance, dv)
|
354
|
+
rescue Exception => e
|
355
|
+
err_msg = "An error occured while attaching data volume #{dv.name}: #{e}"
|
356
|
+
raise ProviderError, err_msg
|
357
|
+
end
|
358
|
+
end
|
359
|
+
|
360
|
+
# Create those disks that do not exist in peristent disks DB and
|
361
|
+
# record them into DB
|
362
|
+
volumes_config.each do |cv|
|
363
|
+
unless data_disks_db.volumes.find { |v| v.name == cv.name }
|
364
|
+
::Dopv::log.debug("Node #{nodename}: Creating disk #{cv.name} [#{cv.size.g} G].")
|
365
|
+
volume = add_node_volume(node_instance, cv)
|
366
|
+
record_node_data_volume(volume) unless volume.nil?
|
367
|
+
end
|
368
|
+
end
|
369
|
+
end
|
370
|
+
|
371
|
+
def record_node_data_volume(volume)
|
372
|
+
::Dopv::log.debug("Node #{nodename}: Recording data volume #{volume[:name]} into data volumes DB.")
|
373
|
+
data_disks_db << volume.merge(:node => nodename)
|
374
|
+
end
|
375
|
+
|
376
|
+
def erase_node_data_volume(volume)
|
377
|
+
::Dopv::log.debug("Node #{nodename}: Erasing data volume #{volume.name} from data volumes DB.")
|
378
|
+
data_disks_db.delete(volume)
|
379
|
+
end
|
380
|
+
|
381
|
+
def add_node_affinity(node_instance, name)
|
382
|
+
end
|
383
|
+
|
384
|
+
def add_node_affinities(node_instance)
|
385
|
+
infrastructure_properties.affinity_groups.each { |a| add_node_affinity(node_instance, a) }
|
386
|
+
end
|
387
|
+
|
388
|
+
def execute_hook(hook_name, state_changed = false)
|
389
|
+
has_changes = state_changed ? 1 : 0
|
390
|
+
hooks.send(hook_name).each do |prog|
|
391
|
+
prog_name = File.basename(prog)
|
392
|
+
::Dopv::log.info("Node #{nodename}: Executing #{hook_name}[#{prog_name}].")
|
393
|
+
o, e, s = Open3.capture3(sanitize_env, "#{prog} #{nodename} #{has_changes}", :unsetenv_others => true)
|
394
|
+
::Dopv::log.debug("Node #{nodename}: #{hook_name}[#{prog_name}] standard output:\n#{o.chomp}")
|
395
|
+
::Dopv::log.warn("Node #{nodename}: #{hook_name}[#{prog_name}] non-zero exit status #{s.exitstatus}") unless s.success?
|
396
|
+
::Dopv::log.debug("Node #{nodename}: #{hook_name}[#{prog_name}] standard error:\n#{e.chomp}") unless e.chomp.empty?
|
397
|
+
end
|
398
|
+
end
|
399
|
+
|
400
|
+
def record_node_instance(node_instance)
|
401
|
+
@state_store.transaction do
|
402
|
+
@state_store[:nodes] ||= {}
|
403
|
+
@state_store[:nodes][nodename] ||= {}
|
404
|
+
@state_store[:nodes][nodename][:ip_addresses] = get_node_ip_addresses(node_instance)
|
405
|
+
end
|
406
|
+
end
|
407
|
+
|
408
|
+
def erase_node_instance(node_instance)
|
409
|
+
@state_store.transaction do
|
410
|
+
if @state_store[:nodes]
|
411
|
+
@state_store[:nodes].delete(nodename)
|
412
|
+
end
|
413
|
+
end
|
414
|
+
end
|
415
|
+
|
416
|
+
def get_node_ip_addresses(node_instance)
|
417
|
+
[]
|
418
|
+
end
|
419
|
+
end
|
420
|
+
end
|
421
|
+
end
|
422
|
+
|