foreman_opennebula 2.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/LICENSE +619 -0
- data/README.md +26 -0
- data/Rakefile +47 -0
- data/app/assets/javascripts/foreman_opennebula/scheduler_hint_filter_selected.js +30 -0
- data/app/assets/javascripts/foreman_opennebula/vmgroup_selected.js +30 -0
- data/app/controllers/foreman_opennebula/hosts_controller.rb +21 -0
- data/app/helpers/concerns/foreman_opennebula/form_helper_extensions.rb +9 -0
- data/app/helpers/concerns/foreman_opennebula/hosts_helper_extensions.rb +20 -0
- data/app/models/concerns/fog_extensions/opennebula/flavor.rb +109 -0
- data/app/models/concerns/fog_extensions/opennebula/interface.rb +11 -0
- data/app/models/concerns/fog_extensions/opennebula/server.rb +135 -0
- data/app/models/concerns/foreman_opennebula/key_pair_compute_resource.rb +80 -0
- data/app/models/foreman_opennebula/opennebula.rb +221 -0
- data/app/views/api/v2/compute_resources/opennebula.json.rabl +1 -0
- data/app/views/compute_resources/form/_opennebula.html.erb +6 -0
- data/app/views/compute_resources/show/_opennebula.html.erb +4 -0
- data/app/views/compute_resources_vms/form/opennebula/_base.html.erb +85 -0
- data/app/views/compute_resources_vms/form/opennebula/_network.html.erb +2 -0
- data/app/views/compute_resources_vms/form/opennebula/_scheduler_hint_data.html.erb +13 -0
- data/app/views/compute_resources_vms/form/opennebula/_vmgroup_role.html.erb +6 -0
- data/app/views/compute_resources_vms/index/_opennebula.html.erb +26 -0
- data/app/views/compute_resources_vms/show/_opennebula.html.erb +47 -0
- data/app/views/images/form/_opennebula.html.erb +4 -0
- data/config/routes.rb +6 -0
- data/lib/foreman_opennebula/engine.rb +59 -0
- data/lib/foreman_opennebula/version.rb +3 -0
- data/lib/foreman_opennebula.rb +4 -0
- data/lib/tasks/foreman_opennebula_tasks.rake +48 -0
- data/locale/Makefile +60 -0
- data/locale/en/foreman_opennebula.po +19 -0
- data/locale/foreman_opennebula.pot +19 -0
- data/locale/gemspec.rb +2 -0
- data/package.json +22 -0
- data/test/factories/foreman_opennebula_factories.rb +5 -0
- data/test/test_plugin_helper.rb +6 -0
- data/test/unit/foreman_opennebula_test.rb +11 -0
- data/webpack/components/MemorySizeInput/MemorySizeInput.css +4 -0
- data/webpack/components/MemorySizeInput/MemorySizeInput.js +83 -0
- data/webpack/components/MemorySizeInput/index.js +3 -0
- data/webpack/global_index.js +4 -0
- data/webpack/index.js +6 -0
- metadata +145 -0
data/Rakefile
ADDED
@@ -0,0 +1,47 @@
|
|
1
|
+
#!/usr/bin/env rake
|
2
|
+
begin
|
3
|
+
require 'bundler/setup'
|
4
|
+
rescue LoadError
|
5
|
+
puts 'You must `gem install bundler` and `bundle install` to run rake tasks'
|
6
|
+
end
|
7
|
+
begin
|
8
|
+
require 'rdoc/task'
|
9
|
+
rescue LoadError
|
10
|
+
require 'rdoc/rdoc'
|
11
|
+
require 'rake/rdoctask'
|
12
|
+
RDoc::Task = Rake::RDocTask
|
13
|
+
end
|
14
|
+
|
15
|
+
RDoc::Task.new(:rdoc) do |rdoc|
|
16
|
+
rdoc.rdoc_dir = 'rdoc'
|
17
|
+
rdoc.title = 'ForemanOpennebula'
|
18
|
+
rdoc.options << '--line-numbers'
|
19
|
+
rdoc.rdoc_files.include('README.rdoc')
|
20
|
+
rdoc.rdoc_files.include('lib/**/*.rb')
|
21
|
+
end
|
22
|
+
|
23
|
+
APP_RAKEFILE = File.expand_path('test/dummy/Rakefile', __dir__)
|
24
|
+
|
25
|
+
Bundler::GemHelper.install_tasks
|
26
|
+
|
27
|
+
require 'rake/testtask'
|
28
|
+
|
29
|
+
Rake::TestTask.new(:test) do |t|
|
30
|
+
t.libs << 'lib'
|
31
|
+
t.libs << 'test'
|
32
|
+
t.pattern = 'test/**/*_test.rb'
|
33
|
+
t.verbose = false
|
34
|
+
end
|
35
|
+
|
36
|
+
task default: :test
|
37
|
+
|
38
|
+
begin
|
39
|
+
require 'rubocop/rake_task'
|
40
|
+
RuboCop::RakeTask.new
|
41
|
+
rescue => _
|
42
|
+
puts 'Rubocop not loaded.'
|
43
|
+
end
|
44
|
+
|
45
|
+
task :default do
|
46
|
+
Rake::Task['rubocop'].execute
|
47
|
+
end
|
@@ -0,0 +1,30 @@
|
|
1
|
+
function schedulerHintFilterSelected(item) {
|
2
|
+
var scheduler_hint_filter = $(item).val();
|
3
|
+
|
4
|
+
if (scheduler_hint_filter === '') {
|
5
|
+
$('#scheduler_hint_data_wrapper').empty();
|
6
|
+
} else {
|
7
|
+
var url = $(item).attr('data-url');
|
8
|
+
var data = serializeForm().replace('method=patch', 'method=post');
|
9
|
+
|
10
|
+
tfm.tools.showSpinner();
|
11
|
+
$.ajax({
|
12
|
+
type: 'post',
|
13
|
+
url: url,
|
14
|
+
data: data,
|
15
|
+
error: function(jqXHR, status, error) {
|
16
|
+
$('#scheduler_hint_data_wrapper').html(
|
17
|
+
sprintf(
|
18
|
+
__('Error loading scheduler hint filters information: %s'),
|
19
|
+
error
|
20
|
+
)
|
21
|
+
);
|
22
|
+
$('#compute_resource_tab a').addClass('tab-error');
|
23
|
+
},
|
24
|
+
success: function(result) {
|
25
|
+
$('#scheduler_hint_data_wrapper').html(result);
|
26
|
+
reloadOnAjaxComplete(item);
|
27
|
+
},
|
28
|
+
});
|
29
|
+
}
|
30
|
+
}
|
@@ -0,0 +1,30 @@
|
|
1
|
+
function vmgroupSelected(item) {
|
2
|
+
var vmgroup = $(item).val();
|
3
|
+
|
4
|
+
if (vmgroup === '') {
|
5
|
+
$('#vmgroup_role_wrapper').empty();
|
6
|
+
} else {
|
7
|
+
var url = $(item).attr('data-url');
|
8
|
+
var data = serializeForm().replace('method=patch', 'method=post');
|
9
|
+
|
10
|
+
tfm.tools.showSpinner();
|
11
|
+
$.ajax({
|
12
|
+
type: 'post',
|
13
|
+
url: url,
|
14
|
+
data: data,
|
15
|
+
error: function(jqXHR, status, error) {
|
16
|
+
$('#vmgroup_role_wrapper').html(
|
17
|
+
sprintf(
|
18
|
+
__('Error loading available roles: %s'),
|
19
|
+
error
|
20
|
+
)
|
21
|
+
);
|
22
|
+
$('#compute_resource_tab a').addClass('tab-error');
|
23
|
+
},
|
24
|
+
success: function(result) {
|
25
|
+
$('#vmgroup_role_wrapper').html(result);
|
26
|
+
reloadOnAjaxComplete(item);
|
27
|
+
},
|
28
|
+
});
|
29
|
+
}
|
30
|
+
}
|
@@ -0,0 +1,21 @@
|
|
1
|
+
module ForemanOpennebula
|
2
|
+
class HostsController < ::HostsController
|
3
|
+
before_action :ajax_request, :only => [:vmgroup_selected, :scheduler_hint_filter_selected]
|
4
|
+
|
5
|
+
def vmgroup_selected
|
6
|
+
return not_found unless params[:host]
|
7
|
+
refresh_host
|
8
|
+
Taxonomy.as_taxonomy @organization, @location do
|
9
|
+
render :partial => 'compute_resources_vms/form/opennebula/vmgroup_role'
|
10
|
+
end
|
11
|
+
end
|
12
|
+
|
13
|
+
def scheduler_hint_filter_selected
|
14
|
+
return not_found unless params[:host]
|
15
|
+
refresh_host
|
16
|
+
Taxonomy.as_taxonomy @organization, @location do
|
17
|
+
render :partial => 'compute_resources_vms/form/opennebula/scheduler_hint_data'
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
@@ -0,0 +1,20 @@
|
|
1
|
+
module ForemanOpennebula
|
2
|
+
module HostsHelperExtensions
|
3
|
+
extend ActiveSupport::Concern
|
4
|
+
|
5
|
+
included do
|
6
|
+
def provider_partial_exist?(compute_resource, partial)
|
7
|
+
return false unless compute_resource
|
8
|
+
|
9
|
+
compute_resource_name = compute_resource.provider.downcase
|
10
|
+
|
11
|
+
return false if controller_name == 'compute_attributes' &&
|
12
|
+
compute_resource_name == 'opennebula' && partial == 'network'
|
13
|
+
|
14
|
+
ActionController::Base.view_paths.any? do |path|
|
15
|
+
File.exist?(File.join(path, 'compute_resources_vms', 'form', compute_resource_name, "_#{partial}.html.erb"))
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
@@ -0,0 +1,109 @@
|
|
1
|
+
module FogExtensions
|
2
|
+
module OpenNebula
|
3
|
+
module Flavor
|
4
|
+
extend ActiveSupport::Concern
|
5
|
+
|
6
|
+
included do
|
7
|
+
attribute :cpu_model
|
8
|
+
attribute :nic_default
|
9
|
+
attribute :pci
|
10
|
+
attribute :vmgroup
|
11
|
+
attribute :template_id
|
12
|
+
|
13
|
+
def to_s
|
14
|
+
'' + get_cpu \
|
15
|
+
+ get_vcpu \
|
16
|
+
+ get_memory \
|
17
|
+
+ get_disk \
|
18
|
+
+ get_nic \
|
19
|
+
+ get_os \
|
20
|
+
+ get_graphics \
|
21
|
+
+ get_pci \
|
22
|
+
+ get_raw \
|
23
|
+
+ get_sched_requirements \
|
24
|
+
+ get_sched_ds_requirements \
|
25
|
+
+ get_sched_rank \
|
26
|
+
+ get_sched_ds_rank \
|
27
|
+
+ get_context \
|
28
|
+
+ get_user_variables \
|
29
|
+
+ get_cpu_model \
|
30
|
+
+ get_nic_default \
|
31
|
+
+ get_vmgroup \
|
32
|
+
+ get_template_id
|
33
|
+
end
|
34
|
+
|
35
|
+
def get_cpu_model
|
36
|
+
return '' unless attributes[:cpu_model]
|
37
|
+
|
38
|
+
ret = "CPU_MODEL=#{attributes[:cpu_model]}\n"
|
39
|
+
ret.tr!('{', '[')
|
40
|
+
ret.tr!('}', ']')
|
41
|
+
ret.delete!('>')
|
42
|
+
ret
|
43
|
+
end
|
44
|
+
|
45
|
+
def get_nic_default
|
46
|
+
return '' unless attributes[:nic_default]
|
47
|
+
|
48
|
+
ret = "NIC_DEFAULT=#{attributes[:nic_default]}\n"
|
49
|
+
ret.tr!('{', '[')
|
50
|
+
ret.tr!('}', ']')
|
51
|
+
ret.delete!('>')
|
52
|
+
ret
|
53
|
+
end
|
54
|
+
|
55
|
+
def get_vmgroup
|
56
|
+
return '' unless attributes[:vmgroup]
|
57
|
+
|
58
|
+
ret = "VMGROUP=#{attributes[:vmgroup]}\n"
|
59
|
+
ret.tr!('{', '[')
|
60
|
+
ret.tr!('}', ']')
|
61
|
+
ret.delete!('>')
|
62
|
+
ret
|
63
|
+
end
|
64
|
+
|
65
|
+
def get_template_id
|
66
|
+
return '' unless attributes[:template_id]
|
67
|
+
|
68
|
+
ret = "TEMPLATE_ID=#{attributes[:template_id]}\n"
|
69
|
+
ret.tr!('{', '[')
|
70
|
+
ret.tr!('}', ']')
|
71
|
+
ret.delete!('>')
|
72
|
+
ret
|
73
|
+
end
|
74
|
+
|
75
|
+
def get_nic
|
76
|
+
return '' if nic.nil?
|
77
|
+
|
78
|
+
ret = ''
|
79
|
+
if nic.is_a? Array
|
80
|
+
nic.each do |n|
|
81
|
+
next if n.vnet.nil?
|
82
|
+
val = [%(MODEL="#{n.model}"), %(NETWORK_ID="#{n.vnet.id}")]
|
83
|
+
val << %(IP="#{n.ip}") if n.ip.present?
|
84
|
+
ret += %(NIC=[#{val.join(',')}]\n)
|
85
|
+
end
|
86
|
+
end
|
87
|
+
ret
|
88
|
+
end
|
89
|
+
|
90
|
+
def get_pci
|
91
|
+
return '' unless attributes[:pci]
|
92
|
+
|
93
|
+
ret = ''
|
94
|
+
if attributes[:pci].is_a? Array
|
95
|
+
attributes[:pci].each do |pci|
|
96
|
+
ret += "PCI=#{pci}\n"
|
97
|
+
end
|
98
|
+
else
|
99
|
+
ret = "PCI=#{attributes[:pci]}\n"
|
100
|
+
end
|
101
|
+
ret.tr!('{', '[')
|
102
|
+
ret.tr!('}', ']')
|
103
|
+
ret.delete!('>')
|
104
|
+
ret
|
105
|
+
end
|
106
|
+
end
|
107
|
+
end
|
108
|
+
end
|
109
|
+
end
|
@@ -0,0 +1,135 @@
|
|
1
|
+
module FogExtensions
|
2
|
+
module OpenNebula
|
3
|
+
module Server
|
4
|
+
extend ActiveSupport::Concern
|
5
|
+
include ActionView::Helpers::NumberHelper
|
6
|
+
|
7
|
+
attr_writer :template_id, :image_id, :disk_size, :vmgroup_id,
|
8
|
+
:vmgroup_role, :scheduler_hint_filter, :scheduler_hint_data
|
9
|
+
|
10
|
+
included do
|
11
|
+
def cpu
|
12
|
+
onevm_object.present? ? onevm_object['TEMPLATE/CPU'] : attributes[:cpu]
|
13
|
+
end
|
14
|
+
|
15
|
+
def vcpu
|
16
|
+
onevm_object.present? ? onevm_object['TEMPLATE/VCPU'] : attributes[:vcpu]
|
17
|
+
end
|
18
|
+
|
19
|
+
def start
|
20
|
+
onevm_object.resume
|
21
|
+
end
|
22
|
+
|
23
|
+
def stop
|
24
|
+
onevm_object.poweroff(true)
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
def reboot
|
29
|
+
onevm_object.reboot
|
30
|
+
true
|
31
|
+
end
|
32
|
+
|
33
|
+
def reset
|
34
|
+
onevm_object.reboot(true)
|
35
|
+
true
|
36
|
+
end
|
37
|
+
|
38
|
+
def template_id
|
39
|
+
onevm_object.present? ? onevm_object['TEMPLATE/TEMPLATE_ID'] : @template_id
|
40
|
+
end
|
41
|
+
|
42
|
+
def vmgroup_id
|
43
|
+
onevm_object.present? ? onevm_object['TEMPLATE/VMGROUP/VMGROUP_ID'] : @vmgroup_id
|
44
|
+
end
|
45
|
+
|
46
|
+
def vmgroup_role
|
47
|
+
onevm_object.present? ? onevm_object['TEMPLATE/VMGROUP/ROLE'] : @vmgroup_role
|
48
|
+
end
|
49
|
+
|
50
|
+
def sched_requirements
|
51
|
+
return unless onevm_object
|
52
|
+
onevm_object['USER_TEMPLATE/SCHED_REQUIREMENTS']
|
53
|
+
end
|
54
|
+
|
55
|
+
def scheduler_hint_filter
|
56
|
+
if sched_requirements
|
57
|
+
case sched_requirements
|
58
|
+
when /^CLUSTER_ID = \d+$/
|
59
|
+
'Cluster'
|
60
|
+
when /^ID = \d+$/
|
61
|
+
'Host'
|
62
|
+
else
|
63
|
+
'Raw'
|
64
|
+
end
|
65
|
+
else
|
66
|
+
@scheduler_hint_filter
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
70
|
+
def scheduler_hint_data
|
71
|
+
if sched_requirements
|
72
|
+
scheduler_hint_filter == 'Raw' ? sched_requirements : sched_requirements[/\d+/]
|
73
|
+
else
|
74
|
+
@scheduler_hint_data
|
75
|
+
end
|
76
|
+
end
|
77
|
+
|
78
|
+
def disks
|
79
|
+
return if onevm_object.nil?
|
80
|
+
[onevm_object.to_hash['VM']['TEMPLATE']['DISK']].flatten.compact.map do |disk|
|
81
|
+
OpenStruct.new(disk.transform_keys(&:downcase))
|
82
|
+
end
|
83
|
+
end
|
84
|
+
|
85
|
+
def image_id
|
86
|
+
disks.try(:first).try(:image_id) || @image_id
|
87
|
+
end
|
88
|
+
|
89
|
+
def disk_size
|
90
|
+
disks.try(:first).try(:size) || @disk_size
|
91
|
+
end
|
92
|
+
|
93
|
+
def interfaces
|
94
|
+
[onevm_object.to_hash['VM']['TEMPLATE']['NIC']].flatten.compact.map do |nic|
|
95
|
+
OpenStruct.new(nic.transform_keys(&:downcase))
|
96
|
+
end
|
97
|
+
end
|
98
|
+
|
99
|
+
def select_nic(fog_nics, nic)
|
100
|
+
fog_nics.detect { |fn| fn.network_id == nic.compute_attributes['vnet'] }
|
101
|
+
end
|
102
|
+
|
103
|
+
def host
|
104
|
+
onevm_object['HISTORY_RECORDS/HISTORY[last()]/HOSTNAME']
|
105
|
+
end
|
106
|
+
|
107
|
+
def sched_message
|
108
|
+
onevm_object['USER_TEMPLATE/SCHED_MESSAGE']
|
109
|
+
end
|
110
|
+
|
111
|
+
def display
|
112
|
+
graphics = onevm_object.to_hash['VM']['TEMPLATE']['GRAPHICS']
|
113
|
+
graphics['TYPE'].downcase!
|
114
|
+
graphics.transform_keys(&:downcase).symbolize_keys
|
115
|
+
end
|
116
|
+
|
117
|
+
def poweroff?
|
118
|
+
(status == 8)
|
119
|
+
end
|
120
|
+
|
121
|
+
def to_s
|
122
|
+
name
|
123
|
+
end
|
124
|
+
|
125
|
+
def vm_description
|
126
|
+
_('%{cpu} CPU, %{vcpu} VCPU, %{memory} memory and %{disk} disk') % {
|
127
|
+
:cpu => cpu,
|
128
|
+
:vcpu => vcpu,
|
129
|
+
:memory => number_to_human_size(memory.to_i.megabytes),
|
130
|
+
:disk => number_to_human_size(disk_size.to_i.megabytes)
|
131
|
+
}
|
132
|
+
end
|
133
|
+
end
|
134
|
+
end
|
135
|
+
end
|
@@ -0,0 +1,80 @@
|
|
1
|
+
module ForemanOpennebula
|
2
|
+
module KeyPairComputeResource
|
3
|
+
extend ActiveSupport::Concern
|
4
|
+
|
5
|
+
included do
|
6
|
+
prepend KeyPairCapabilities
|
7
|
+
has_one :key_pair, :foreign_key => :compute_resource_id, :dependent => :destroy
|
8
|
+
after_create :setup_key_pair
|
9
|
+
after_destroy :destroy_key_pair
|
10
|
+
end
|
11
|
+
|
12
|
+
def key_pairs
|
13
|
+
opennebula_user = available_users.detect { |u| u.name == user }
|
14
|
+
public_key = opennebula_user['TEMPLATE/SSH_PUBLIC_KEY']
|
15
|
+
return unless public_key.present? && SSHKey.valid_ssh_public_key?(public_key)
|
16
|
+
[public_key]
|
17
|
+
end
|
18
|
+
|
19
|
+
def get_compute_key_pairs
|
20
|
+
return [] unless capabilities.include?(:key_pair)
|
21
|
+
active_key = key_pair
|
22
|
+
return [] if key_pairs.nil? || active_key.nil?
|
23
|
+
akey = SSHKey.new(active_key.secret)
|
24
|
+
key_pairs.map do |key|
|
25
|
+
key_fingerprint = SSHKey.fingerprint(key)
|
26
|
+
key_name = key_fingerprint == akey.md5_fingerprint ? active_key.name : 'unknown'
|
27
|
+
ComputeResourceKeyPair.new(key_name, key_fingerprint, active_key.name, active_key.id)
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
def recreate
|
32
|
+
destroy_key_pair
|
33
|
+
setup_key_pair
|
34
|
+
end
|
35
|
+
|
36
|
+
def delete_key_from_resource(remote_key_pair = key_pair.name)
|
37
|
+
logger.info "removing key from compute resource #{name} "\
|
38
|
+
"(#{provider_friendly_name}): #{remote_key_pair}"
|
39
|
+
opennebula_user = available_users.detect { |u| u.name == user }
|
40
|
+
template_hash = opennebula_user.to_hash['USER']['TEMPLATE']
|
41
|
+
template_hash.delete('SSH_PUBLIC_KEY')
|
42
|
+
template_str = template_hash.map { |k, v| "#{k}=\"#{v}\"" }.join("\n")
|
43
|
+
opennebula_user.update(template_str)
|
44
|
+
KeyPair.destroy_by :compute_resource_id => id
|
45
|
+
rescue => e
|
46
|
+
Foreman::Logging.exception(
|
47
|
+
"Failed to delete key pair from #{provider_friendly_name}: #{name}, you "\
|
48
|
+
"might need to cleanup manually: #{e}",
|
49
|
+
e,
|
50
|
+
:level => :warn
|
51
|
+
)
|
52
|
+
end
|
53
|
+
|
54
|
+
private
|
55
|
+
|
56
|
+
def setup_key_pair
|
57
|
+
key = SSHKey.generate(comment: "foreman-#{id}#{Foreman.uuid}")
|
58
|
+
opennebula_user = available_users.detect { |u| u.name == user }
|
59
|
+
template_hash = opennebula_user.to_hash['USER']['TEMPLATE']
|
60
|
+
template_hash['SSH_PUBLIC_KEY'] = key.ssh_public_key
|
61
|
+
template_str = template_hash.map { |k, v| "#{k}=\"#{v}\"" }.join("\n")
|
62
|
+
opennebula_user.update(template_str)
|
63
|
+
KeyPair.create! :name => key.comment, :compute_resource_id => id, :secret => key.private_key
|
64
|
+
rescue => e
|
65
|
+
Foreman::Logging.exception('Failed to generate key pair', e)
|
66
|
+
destroy_key_pair
|
67
|
+
raise
|
68
|
+
end
|
69
|
+
|
70
|
+
def destroy_key_pair
|
71
|
+
return unless key_pair.present?
|
72
|
+
delete_key_from_resource
|
73
|
+
# If the key pair could not be removed, it will be logged.
|
74
|
+
# Returning 'true' allows this method to not halt the deletion
|
75
|
+
# of the Compute Resource even if the key pair could not be
|
76
|
+
# deleted for some reason (permissions, not found, etc...)
|
77
|
+
true
|
78
|
+
end
|
79
|
+
end
|
80
|
+
end
|