foreman-architect 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/bin/architect +147 -0
- data/bin/foreman-vm +50 -0
- data/bin/worker.rb +101 -0
- data/lib/architect.rb +49 -0
- data/lib/architect/builder/physical.rb +19 -0
- data/lib/architect/builder/virtual.rb +27 -0
- data/lib/architect/config.rb +64 -0
- data/lib/architect/designer.rb +73 -0
- data/lib/architect/log.rb +28 -0
- data/lib/architect/plan.rb +41 -0
- data/lib/architect/plugin.rb +67 -0
- data/lib/architect/plugin/hello_world.rb +46 -0
- data/lib/architect/plugin/ldap_netgroup.rb +114 -0
- data/lib/architect/plugin_manager.rb +64 -0
- data/lib/architect/report.rb +67 -0
- data/lib/architect/version.rb +3 -0
- data/lib/foreman_vm.rb +409 -0
- data/lib/foreman_vm/allocator.rb +49 -0
- data/lib/foreman_vm/buildspec.rb +48 -0
- data/lib/foreman_vm/cluster.rb +83 -0
- data/lib/foreman_vm/config.rb +55 -0
- data/lib/foreman_vm/console.rb +83 -0
- data/lib/foreman_vm/domain.rb +192 -0
- data/lib/foreman_vm/foreman_api.rb +78 -0
- data/lib/foreman_vm/getopt.rb +151 -0
- data/lib/foreman_vm/hypervisor.rb +96 -0
- data/lib/foreman_vm/storage_pool.rb +104 -0
- data/lib/foreman_vm/util.rb +18 -0
- data/lib/foreman_vm/volume.rb +70 -0
- data/lib/foreman_vm/workqueue.rb +58 -0
- data/test/architect/architect_test.rb +24 -0
- data/test/architect/product_service.yaml +33 -0
- data/test/architect/tc_builder_physical.rb +13 -0
- data/test/architect/tc_config.rb +20 -0
- data/test/architect/tc_log.rb +13 -0
- data/test/architect/tc_plugin_ldap_netgroup.rb +39 -0
- data/test/architect/tc_plugin_manager.rb +27 -0
- data/test/tc_allocator.rb +61 -0
- data/test/tc_buildspec.rb +45 -0
- data/test/tc_cluster.rb +20 -0
- data/test/tc_config.rb +12 -0
- data/test/tc_foreman_api.rb +20 -0
- data/test/tc_foremanvm.rb +20 -0
- data/test/tc_hypervisor.rb +37 -0
- data/test/tc_main.rb +19 -0
- data/test/tc_storage_pool.rb +28 -0
- data/test/tc_volume.rb +22 -0
- data/test/tc_workqueue.rb +35 -0
- data/test/ts_all.rb +13 -0
- metadata +226 -0
@@ -0,0 +1,48 @@
|
|
1
|
+
module ForemanAP
|
2
|
+
# A build specification
|
3
|
+
class BuildSpec
|
4
|
+
|
5
|
+
# Short hostname
|
6
|
+
attr_accessor :name
|
7
|
+
# DNS domain name
|
8
|
+
attr_accessor :domain
|
9
|
+
# Number of CPUs
|
10
|
+
attr_accessor :cpus
|
11
|
+
# Amount of memory
|
12
|
+
attr_accessor :memory
|
13
|
+
# Amount of disk space. Multiple disks can be separated with a comma.
|
14
|
+
attr_accessor :disk_capacity
|
15
|
+
# The disk format; either raw or qcow2
|
16
|
+
attr_accessor :disk_format
|
17
|
+
# The libvirt storage pool
|
18
|
+
attr_accessor :storage_pool
|
19
|
+
# Network interface
|
20
|
+
attr_accessor :network_interface
|
21
|
+
|
22
|
+
# Generate output suitable for feeding into the Foreman API
|
23
|
+
def to_foreman_api
|
24
|
+
rec = {}
|
25
|
+
rec['compute_attributes'] = {}
|
26
|
+
rec['compute_attributes']['volumes_attributes'] = disk_capacity_to_api
|
27
|
+
rec
|
28
|
+
end
|
29
|
+
|
30
|
+
private
|
31
|
+
|
32
|
+
# Get the Foreman API equivalent for disk capacity
|
33
|
+
def disk_capacity_to_api
|
34
|
+
res = {}
|
35
|
+
disks = @disk_capacity
|
36
|
+
volume_id = 0
|
37
|
+
disks.split(',').each do |disk_size|
|
38
|
+
res[volume_id.to_s] = {
|
39
|
+
'capacity' => disk_size,
|
40
|
+
'pool_name' => @storage_pool,
|
41
|
+
'format_type' => @disk_format,
|
42
|
+
}
|
43
|
+
volume_id += 1
|
44
|
+
end
|
45
|
+
res
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|
@@ -0,0 +1,83 @@
|
|
1
|
+
module ForemanAP
|
2
|
+
# A cluster of hypervisors.
|
3
|
+
class Cluster
|
4
|
+
|
5
|
+
# Return a handle to the guest domain
|
6
|
+
# [+fqdn+] the FQDN of the guest
|
7
|
+
def guest(fqdn)
|
8
|
+
host = member(find(fqdn)) or raise 'Guest not found'
|
9
|
+
host.guest(fqdn)
|
10
|
+
end
|
11
|
+
|
12
|
+
# Return name of best hypervisor
|
13
|
+
# [+name+] The name of the guest to be added
|
14
|
+
# [+memory+] The total size of the guest in Bytes
|
15
|
+
def best_fit(name, memory)
|
16
|
+
alloc = ForemanAP::Allocator.new
|
17
|
+
@members.each do |hostname|
|
18
|
+
host = member(hostname)
|
19
|
+
alloc.add_host(host.hostname, host.free_memory, host.domains)
|
20
|
+
end
|
21
|
+
alloc.add_guest(name, memory)
|
22
|
+
end
|
23
|
+
|
24
|
+
# DEPRECATED - avoid using these
|
25
|
+
attr_accessor :user, :password
|
26
|
+
|
27
|
+
# A list of the names of all members of the cluster.
|
28
|
+
attr_reader :members
|
29
|
+
|
30
|
+
# Return a list of all Hypervisor objects in the cluster
|
31
|
+
# TODO: replace #members with this function
|
32
|
+
def members2
|
33
|
+
@hv.values
|
34
|
+
end
|
35
|
+
|
36
|
+
# The name of the hypervisor that contains a given virtual machine.
|
37
|
+
# [+vm+] The name of the virtual machine.
|
38
|
+
def find(vm)
|
39
|
+
@members.each do |host|
|
40
|
+
#puts host
|
41
|
+
#puts @hv[host].free_memory
|
42
|
+
#puts @hv[host].domains.join("\n")
|
43
|
+
if @hv[host].domains.include? vm
|
44
|
+
return host
|
45
|
+
end
|
46
|
+
end
|
47
|
+
return nil
|
48
|
+
end
|
49
|
+
|
50
|
+
# A handle to the ForemanAP::Hypervisor object for a member of the cluster.
|
51
|
+
# [+name+] The name of the hypervisor.
|
52
|
+
def member(name)
|
53
|
+
unless @hv.include? name
|
54
|
+
raise ArgumentError, "hypervisor #{name} is not defined"
|
55
|
+
end
|
56
|
+
@hv[name]
|
57
|
+
end
|
58
|
+
|
59
|
+
# Migrate a virtual machine from one host to another (FIXME - UNIMPLEMENTED)
|
60
|
+
# [+guest+] The name of the guest
|
61
|
+
# [+destination+] The target hypervisor
|
62
|
+
def migrate(guest, destination)
|
63
|
+
# TODO: the equivalent of this:
|
64
|
+
# virsh migrate $vm qemu+ssh://${target}-san.brontolabs.local/system --verbose --persistent --undefinesource --live --timeout 60
|
65
|
+
raise 'STUB'
|
66
|
+
end
|
67
|
+
|
68
|
+
# Create an object.
|
69
|
+
# [+members+] A list of names of hypervisors to be members.
|
70
|
+
# [+user+] The libvirtd username.
|
71
|
+
# [+password+] The libvirtd password.
|
72
|
+
def initialize(members, user, password)
|
73
|
+
@members = members
|
74
|
+
@hv = {}
|
75
|
+
@user = user
|
76
|
+
@password = password
|
77
|
+
@members.each do |fqdn|
|
78
|
+
uri = 'qemu+tcp://' + fqdn + '/system'
|
79
|
+
@hv[fqdn] = Hypervisor.new(uri, @user, @password)
|
80
|
+
end
|
81
|
+
end
|
82
|
+
end
|
83
|
+
end
|
@@ -0,0 +1,55 @@
|
|
1
|
+
module ForemanAP
|
2
|
+
# Parse the configuration file and provide configuration variables.
|
3
|
+
#
|
4
|
+
class Config
|
5
|
+
|
6
|
+
require 'yaml'
|
7
|
+
|
8
|
+
# The user account to login to Foreman as.
|
9
|
+
attr_reader :foreman_user
|
10
|
+
# The password to login to Foreman with.
|
11
|
+
attr_reader :foreman_password
|
12
|
+
# The URI of the Foreman server.
|
13
|
+
attr_reader :foreman_uri
|
14
|
+
# The user account to login to libvirtd as.
|
15
|
+
attr_reader :libvirt_user
|
16
|
+
# The password to login to libvirtd with.
|
17
|
+
attr_reader :libvirt_password
|
18
|
+
# DEPRECATED -- do not use
|
19
|
+
attr_reader :foreground
|
20
|
+
# DEPRECATED -- do not use
|
21
|
+
attr_reader :reap_buried_jobs
|
22
|
+
# A list of all hypervisors in the cluster.
|
23
|
+
attr_reader :hypervisors
|
24
|
+
# The name of the shared storage pool to use on all hypervisors.
|
25
|
+
attr_reader :storage_pool
|
26
|
+
# The FQDN of the GlusterFS server
|
27
|
+
attr_reader :glusterfs_server
|
28
|
+
# The email address of the support team to contact if something goes wrong.
|
29
|
+
attr_reader :support_contact_email
|
30
|
+
|
31
|
+
# Create an object
|
32
|
+
# [+conffile+] the path to the configuration file.
|
33
|
+
def initialize(conffile = nil)
|
34
|
+
if conffile.nil?
|
35
|
+
confdir = File.dirname(__FILE__) + '/../../conf'
|
36
|
+
conffile = confdir + '/worker.yaml'
|
37
|
+
end
|
38
|
+
config = {
|
39
|
+
:foreground => true,
|
40
|
+
:reap_buried_jobs => true,
|
41
|
+
}
|
42
|
+
case conffile.kind_of?
|
43
|
+
when String
|
44
|
+
config.merge!(YAML.load_file(conffile))
|
45
|
+
when Hash
|
46
|
+
config.merge! conffile
|
47
|
+
else
|
48
|
+
raise ArgumentError
|
49
|
+
end
|
50
|
+
|
51
|
+
config.each { |k,v| instance_variable_set("@#{k}", v) }
|
52
|
+
config
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
@@ -0,0 +1,83 @@
|
|
1
|
+
module ForemanAP
|
2
|
+
|
3
|
+
# Functions related viewing to the virtual machine console
|
4
|
+
#
|
5
|
+
class ConsoleViewer
|
6
|
+
|
7
|
+
require 'cgi'
|
8
|
+
require 'pty'
|
9
|
+
require 'timeout'
|
10
|
+
|
11
|
+
# If true, output will be formatted for HTML display.
|
12
|
+
attr_accessor :html
|
13
|
+
|
14
|
+
# Create an object.
|
15
|
+
# [+cluster+] a ForemanAP::Cluster object.
|
16
|
+
#
|
17
|
+
def initialize(cluster)
|
18
|
+
@cluster = cluster
|
19
|
+
@html = false
|
20
|
+
@autoclose = nil
|
21
|
+
end
|
22
|
+
|
23
|
+
# Specify a pattern that will cause the console to be automatically
|
24
|
+
# closed when it is found in the output.
|
25
|
+
#
|
26
|
+
# Example: / login:/
|
27
|
+
#
|
28
|
+
def autoclose=(pattern)
|
29
|
+
raise 'Regular expression expected' unless pattern.class == Regexp
|
30
|
+
@autoclose = pattern
|
31
|
+
end
|
32
|
+
|
33
|
+
# Attach to the serial console of the virtual machine.
|
34
|
+
#
|
35
|
+
def attach(guest)
|
36
|
+
host = @cluster.find(guest)
|
37
|
+
puts "Connecting to the serial console of #{guest} via #{host}... "
|
38
|
+
print '<pre>' if @html
|
39
|
+
ENV['LIBVIRT_AUTH_FILE'] = File.dirname(__FILE__) + '/../../conf/auth.conf'
|
40
|
+
begin
|
41
|
+
PTY.spawn("virsh -c qemu+tcp://#{host}/system console #{guest}") do |stdin, stdout, pid|
|
42
|
+
begin
|
43
|
+
# Regularly try to flush the output in a different thread
|
44
|
+
# This allows us to detect when the client hangs up even if
|
45
|
+
# the main thread is blocked trying to read from the VM console.
|
46
|
+
if @html
|
47
|
+
t = Thread.new {
|
48
|
+
while true
|
49
|
+
$stdout.flush or raise 'client has disconnected'
|
50
|
+
sleep(5)
|
51
|
+
end
|
52
|
+
}
|
53
|
+
t.abort_on_exception = true
|
54
|
+
end
|
55
|
+
|
56
|
+
stdout.write ""
|
57
|
+
stdout.flush
|
58
|
+
|
59
|
+
stdin.each do |line|
|
60
|
+
if @html
|
61
|
+
# TODO: translate ANSI colors into HTML colors
|
62
|
+
print CGI::escapeHTML(line).chomp
|
63
|
+
else
|
64
|
+
print line
|
65
|
+
end
|
66
|
+
$stdout.flush or exit(0)
|
67
|
+
if @autoclose and line =~ @autoclose
|
68
|
+
puts "(the console was automatically closed)"
|
69
|
+
exit 0
|
70
|
+
end
|
71
|
+
end
|
72
|
+
rescue Errno::EIO
|
73
|
+
puts "Errno:EIO error, but this probably just means " +
|
74
|
+
"that the process has finished giving output"
|
75
|
+
end
|
76
|
+
end
|
77
|
+
rescue PTY::ChildExited
|
78
|
+
puts "The child process exited!"
|
79
|
+
end
|
80
|
+
print '</pre>' if @html
|
81
|
+
end
|
82
|
+
end
|
83
|
+
end
|
@@ -0,0 +1,192 @@
|
|
1
|
+
module ForemanAP
|
2
|
+
# A virtual machine or container, i.e. a "domain" within libvirt
|
3
|
+
#
|
4
|
+
class Domain
|
5
|
+
|
6
|
+
require 'rexml/document'
|
7
|
+
|
8
|
+
# Return the hostname of the domain
|
9
|
+
def name
|
10
|
+
@dom.name
|
11
|
+
end
|
12
|
+
|
13
|
+
# Return the amount of memory allocated to the domain, in bytes
|
14
|
+
def memory
|
15
|
+
# Convert from KiB to bytes
|
16
|
+
@dom.info.max_mem.to_i * 1024
|
17
|
+
end
|
18
|
+
|
19
|
+
# Return the number of vCPUs allocated to the domain
|
20
|
+
def vcpu_count
|
21
|
+
@dom.info.nr_virt_cpu
|
22
|
+
end
|
23
|
+
|
24
|
+
# Add a storage volume that uses libgfapi.
|
25
|
+
#
|
26
|
+
# [+volume_name+] The name of the volume.
|
27
|
+
# [+host_name+] The FQDN or IP address of the GlusterFS server.
|
28
|
+
# [+device_id+] The position the disk appears on the SCSI bus, starting at one.
|
29
|
+
def add_libgfapi_volume(volume_name, host_name, device_id)
|
30
|
+
# Determine the target disk name.
|
31
|
+
target_dev = 'vd' + ('a'..'z').to_a[device_id - 1]
|
32
|
+
|
33
|
+
disk_xml = REXML::Document.new "
|
34
|
+
<disk type='network' device='disk'>
|
35
|
+
<driver name='qemu' type='raw' cache='none'/>
|
36
|
+
<source protocol='gluster' name='#{volume_name}'>
|
37
|
+
<host name='#{host_name}' port='0'/>
|
38
|
+
</source>
|
39
|
+
<target dev='#{target_dev}' bus='virtio'/>
|
40
|
+
<alias name='virtio-disk#{device_id}'/>
|
41
|
+
</disk>
|
42
|
+
"
|
43
|
+
|
44
|
+
# Modify the domain XML to insert the disk
|
45
|
+
domain_xml = REXML::Document.new(@dom.xml_desc)
|
46
|
+
#puts 'OLD: ' + domain_xml.to_s
|
47
|
+
domain_xml.elements.each('domain/devices') do |ele|
|
48
|
+
ele.add_element(disk_xml.root)
|
49
|
+
end
|
50
|
+
#puts 'NEW: ' + domain_xml.to_s
|
51
|
+
|
52
|
+
@dom.undefine
|
53
|
+
@dom = @conn.define_domain_xml(domain_xml.to_s)
|
54
|
+
end
|
55
|
+
|
56
|
+
# Start (power on) the domain
|
57
|
+
def start
|
58
|
+
@dom.create
|
59
|
+
end
|
60
|
+
|
61
|
+
# Create an object.
|
62
|
+
# [+conn+] A connection to libvirtd on a hypervisor
|
63
|
+
# [+name+] The name of the domain
|
64
|
+
# [+foreman_api+] A handle to a ForemanAP::ForemanAPI object.
|
65
|
+
def initialize(conn, name, foreman_api)
|
66
|
+
@conn = conn
|
67
|
+
@dom = conn.lookup_domain_by_name(name)
|
68
|
+
@foreman_api = foreman_api
|
69
|
+
end
|
70
|
+
end
|
71
|
+
end
|
72
|
+
|
73
|
+
#--
|
74
|
+
###### LEGACY STUFF BELOW HERE
|
75
|
+
|
76
|
+
class ForemanVM
|
77
|
+
|
78
|
+
def snapshot_list
|
79
|
+
virsh("snapshot-list #{self.fqdn}")
|
80
|
+
end
|
81
|
+
|
82
|
+
# Create a snapshot of the virtual machine
|
83
|
+
#
|
84
|
+
def snapshot_create
|
85
|
+
raise 'the VM must be powered down before taking a snapshot' \
|
86
|
+
if domstate != 'running'
|
87
|
+
raise 'a snapshot already exists' if snapshot_list =~ /shutoff/
|
88
|
+
virsh("snapshot-create #{fqdn}")
|
89
|
+
end
|
90
|
+
|
91
|
+
# Revert a virtual machine back to a snapshot
|
92
|
+
#
|
93
|
+
def snapshot_revert
|
94
|
+
raise 'a snapshot does not exist' if snapshot_list =~ /shutoff/
|
95
|
+
stop
|
96
|
+
virsh("snapshot-revert #{fqdn} --current")
|
97
|
+
end
|
98
|
+
|
99
|
+
# Delete a virtual machine snapshot
|
100
|
+
#
|
101
|
+
def snapshot_delete
|
102
|
+
virsh("snapshot-delete #{fqdn} --current")
|
103
|
+
end
|
104
|
+
|
105
|
+
# Power off the virtual machine
|
106
|
+
#
|
107
|
+
def stop
|
108
|
+
if domstate != 'shut off'
|
109
|
+
virsh("destroy #{self.fqdn}")
|
110
|
+
end
|
111
|
+
end
|
112
|
+
|
113
|
+
|
114
|
+
# Get the state of the domain
|
115
|
+
#
|
116
|
+
def domstate
|
117
|
+
virsh("domstate #{self.fqdn}").chomp.chomp
|
118
|
+
end
|
119
|
+
|
120
|
+
# Power on the virtual machine
|
121
|
+
#
|
122
|
+
def start
|
123
|
+
if domstate != 'running'
|
124
|
+
virsh("start #{self.fqdn}")
|
125
|
+
end
|
126
|
+
end
|
127
|
+
|
128
|
+
|
129
|
+
def dumpxml
|
130
|
+
puts virsh("dumpxml --security-info #{self.fqdn}")
|
131
|
+
end
|
132
|
+
|
133
|
+
# Modify the VM definition to stop using libgfapi
|
134
|
+
#
|
135
|
+
def disable_libgfapi
|
136
|
+
|
137
|
+
self.stop
|
138
|
+
|
139
|
+
require 'rexml/document'
|
140
|
+
doc = REXML::Document.new(virsh("dumpxml --security-info #{self.fqdn}"))
|
141
|
+
|
142
|
+
# Convert the file-backed disk into a libgfapi disk
|
143
|
+
doc.elements.each('domain/devices/disk') do |ele|
|
144
|
+
ele.attributes['type'] = 'file'
|
145
|
+
end
|
146
|
+
doc.elements.each('domain/devices/disk') do |ele|
|
147
|
+
ele.delete_element('source')
|
148
|
+
ele.add_element('source', {'file'=>"/gvol/images/#{self.fqdn}-disk1", 'protocol' => 'gluster'})
|
149
|
+
end
|
150
|
+
|
151
|
+
virsh("undefine #{self.fqdn}")
|
152
|
+
virsh("define /dev/stdin >/dev/null 2>&1", doc.to_s)
|
153
|
+
end
|
154
|
+
|
155
|
+
# Modify the VM definition to use libgfapi
|
156
|
+
# +glusterfs_server+ the FQDN of the GlusterFS server
|
157
|
+
def enable_libgfapi(glusterfs_server)
|
158
|
+
require 'rexml/document'
|
159
|
+
doc = REXML::Document.new(virsh("dumpxml --security-info #{self.fqdn}"))
|
160
|
+
|
161
|
+
raise 'cannot enable libgfapi while the VM is running' if domstate == 'running'
|
162
|
+
|
163
|
+
# When cloning or copying, no need to boot from the network
|
164
|
+
if @buildspec['_clone'] or @buildspec['_copy']
|
165
|
+
doc.delete_element "/domain/os/boot[@dev='network']"
|
166
|
+
end
|
167
|
+
|
168
|
+
# Set cache=none just in case
|
169
|
+
# Set the disk type, just in case
|
170
|
+
doc.elements.each('domain/devices/disk/driver') do |ele|
|
171
|
+
ele.attributes['cache'] = 'none'
|
172
|
+
ele.attributes['type'] = @buildspec['disk_format']
|
173
|
+
end
|
174
|
+
|
175
|
+
# Convert the file-backed disk into a libgfapi disk
|
176
|
+
doc.elements.each('domain/devices/disk') do |ele|
|
177
|
+
ele.attributes['type'] = 'network'
|
178
|
+
end
|
179
|
+
diskcount = 1 # XXX-KLUDGE: we should actually look at the disk filename
|
180
|
+
doc.elements.each('domain/devices/disk') do |ele|
|
181
|
+
ele.delete_element('source')
|
182
|
+
ele.add_element('source', {'name'=>"gvol/images/#{self.fqdn}-disk#{diskcount}", 'protocol' => 'gluster'})
|
183
|
+
diskcount += 1
|
184
|
+
end
|
185
|
+
doc.elements.each('domain/devices/disk/source') do |ele|
|
186
|
+
ele.add_element('host', {'name'=>glusterfs_server, 'transport'=>'tcp', 'port'=>'0'})
|
187
|
+
end
|
188
|
+
|
189
|
+
virsh("undefine #{self.fqdn}")
|
190
|
+
virsh("define /dev/stdin >/dev/null 2>&1", doc.to_s)
|
191
|
+
end
|
192
|
+
end
|