foreman-architect 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (50) hide show
  1. data/bin/architect +147 -0
  2. data/bin/foreman-vm +50 -0
  3. data/bin/worker.rb +101 -0
  4. data/lib/architect.rb +49 -0
  5. data/lib/architect/builder/physical.rb +19 -0
  6. data/lib/architect/builder/virtual.rb +27 -0
  7. data/lib/architect/config.rb +64 -0
  8. data/lib/architect/designer.rb +73 -0
  9. data/lib/architect/log.rb +28 -0
  10. data/lib/architect/plan.rb +41 -0
  11. data/lib/architect/plugin.rb +67 -0
  12. data/lib/architect/plugin/hello_world.rb +46 -0
  13. data/lib/architect/plugin/ldap_netgroup.rb +114 -0
  14. data/lib/architect/plugin_manager.rb +64 -0
  15. data/lib/architect/report.rb +67 -0
  16. data/lib/architect/version.rb +3 -0
  17. data/lib/foreman_vm.rb +409 -0
  18. data/lib/foreman_vm/allocator.rb +49 -0
  19. data/lib/foreman_vm/buildspec.rb +48 -0
  20. data/lib/foreman_vm/cluster.rb +83 -0
  21. data/lib/foreman_vm/config.rb +55 -0
  22. data/lib/foreman_vm/console.rb +83 -0
  23. data/lib/foreman_vm/domain.rb +192 -0
  24. data/lib/foreman_vm/foreman_api.rb +78 -0
  25. data/lib/foreman_vm/getopt.rb +151 -0
  26. data/lib/foreman_vm/hypervisor.rb +96 -0
  27. data/lib/foreman_vm/storage_pool.rb +104 -0
  28. data/lib/foreman_vm/util.rb +18 -0
  29. data/lib/foreman_vm/volume.rb +70 -0
  30. data/lib/foreman_vm/workqueue.rb +58 -0
  31. data/test/architect/architect_test.rb +24 -0
  32. data/test/architect/product_service.yaml +33 -0
  33. data/test/architect/tc_builder_physical.rb +13 -0
  34. data/test/architect/tc_config.rb +20 -0
  35. data/test/architect/tc_log.rb +13 -0
  36. data/test/architect/tc_plugin_ldap_netgroup.rb +39 -0
  37. data/test/architect/tc_plugin_manager.rb +27 -0
  38. data/test/tc_allocator.rb +61 -0
  39. data/test/tc_buildspec.rb +45 -0
  40. data/test/tc_cluster.rb +20 -0
  41. data/test/tc_config.rb +12 -0
  42. data/test/tc_foreman_api.rb +20 -0
  43. data/test/tc_foremanvm.rb +20 -0
  44. data/test/tc_hypervisor.rb +37 -0
  45. data/test/tc_main.rb +19 -0
  46. data/test/tc_storage_pool.rb +28 -0
  47. data/test/tc_volume.rb +22 -0
  48. data/test/tc_workqueue.rb +35 -0
  49. data/test/ts_all.rb +13 -0
  50. metadata +226 -0
@@ -0,0 +1,3 @@
1
+ module ForemanArchitect
2
+ VERSION = "0.1.0"
3
+ end
@@ -0,0 +1,409 @@
1
+ #
2
+ # Manage a cluster of KVM hosts using Foreman
3
+ #
4
+ # Author: Mark Heily <mark.heily@bronto.com>
5
+ #
6
+ class ForemanVM
7
+
8
+ require 'foreman_vm/allocator'
9
+ require 'foreman_vm/buildspec'
10
+ require 'foreman_vm/cluster'
11
+ require 'foreman_vm/console'
12
+ require 'foreman_vm/config'
13
+ require 'foreman_vm/domain'
14
+ require 'foreman_vm/foreman_api'
15
+ require 'foreman_vm/getopt'
16
+ require 'foreman_vm/hypervisor'
17
+ require 'foreman_vm/workqueue'
18
+ require 'foreman_vm/util'
19
+
20
+
21
+ require 'bundler/setup'
22
+ require 'libvirt'
23
+ require 'logger'
24
+ require 'resolv'
25
+
26
+ attr_reader :action, :config
27
+
28
+ attr_accessor :log, :cluster, :workqueue, :console
29
+
30
+ # Check if a VM with a given +hostname+ exists.
31
+ def vm_exists?(hostname)
32
+ @cluster.find(hostname).nil? ? false : true
33
+ end
34
+
35
+ def compute_resource=(txt)
36
+ # KLUDGE: use shortnames because Foreman does
37
+ @buildspec['compute_resource'] = txt.gsub(/\..*/, '')
38
+ end
39
+
40
+ # Get the FQDN of the VM
41
+ #
42
+ def fqdn
43
+ @buildspec['name'] + '.' + @buildspec['domain']
44
+ end
45
+
46
+ # Get the hypervisor that hosts the the VM
47
+ #
48
+ def hypervisor
49
+ @buildspec['compute_resource'] ||= @cluster.find(fqdn)
50
+ end
51
+
52
+ # Set the VM hostname
53
+ #
54
+ def name=(arg)
55
+ if arg =~ /(.*?)\.(.*)/
56
+ @hostname = $1
57
+ @buildspec['name'] = $1
58
+ @buildspec['domain'] = $2
59
+ else
60
+ @hostname = arg
61
+ @buildspec['name'] = arg
62
+ end
63
+ end
64
+
65
+ # Update the build specification
66
+ #
67
+ def buildspec=(spec)
68
+ @buildspec.merge! spec
69
+ end
70
+
71
+ # Ask the user for their Foreman password
72
+ #
73
+ def ask_password
74
+ printf 'Enter your Foreman password: '
75
+ system "stty -echo"
76
+ @password = STDIN.gets.chomp
77
+ system "stty echo"
78
+ end
79
+
80
+ # Run a virsh command
81
+ #
82
+ def virsh(command,xml=nil)
83
+
84
+ # KLUDGE: virsh requires the FQDN of the hypervisor
85
+ # while foreman uses shortname
86
+ hypervisor_fqdn = hypervisor
87
+ unless hypervisor_fqdn =~ /\./
88
+ hypervisor_fqdn += '.' + `dnsdomainname`.chomp
89
+ end
90
+
91
+ ENV['LIBVIRT_AUTH_FILE'] = File.dirname(__FILE__) + '/../conf/auth.conf'
92
+ buf = "virsh -c qemu+tcp://#{hypervisor_fqdn}/system " + command
93
+ @log.info "running virsh #{command} on #{hypervisor_fqdn}"
94
+ if xml.nil?
95
+ res = `#{buf}`
96
+ raise "virsh command returned #{$?}: #{buf}" if $? != 0;
97
+ else
98
+ f = IO.popen(buf, 'w')
99
+ f.puts xml
100
+ f.close
101
+ #XXX-FIXME error check
102
+ res = '(FIXME -- NEED TO CAPTURE STDOUT)'
103
+ end
104
+ return res
105
+ end
106
+
107
+
108
+ # Destroy a virtual machine
109
+ #
110
+ def delete
111
+ # Check if it uses libgfapi. If so, we need to disable it
112
+ # if self.dumpxml =~ /protocol='gluster'/
113
+ # self.stop
114
+ # self.disable_libgfapi
115
+ # end
116
+
117
+ # Call 'virsh destroy' to kill the VM
118
+ begin
119
+ @foreman_api.request(:delete, "/hosts/#{self.fqdn}", {'id' => self.fqdn})
120
+ rescue
121
+ # Try again, to workaround a bug where the first deletion fails..
122
+ @foreman_api.request(:delete, "/hosts/#{self.fqdn}", {'id' => self.fqdn})
123
+
124
+ # When the bug hits, the volume is left behind. force it's deletion
125
+ #
126
+ # Horrible kludge: hardcoded something that will allow the delete to work
127
+ gvol = @cluster.member(@cluster.members[0]).storage_pool('gvol')
128
+ gvol.refresh
129
+ gvol.volume("#{self.fqdn}-disk1").delete
130
+ end
131
+ end
132
+
133
+ # View the list of deferred jobs
134
+ # (TODO: stop leaking Beanstalkd details)
135
+ def job_status
136
+ 'job status: ' + @workqueue.jobs
137
+ end
138
+
139
+ # Submit a deferred job
140
+ #
141
+ def defer(action)
142
+ @workqueue.enqueue({
143
+ 'user' => @user,
144
+ 'action' => action,
145
+ 'buildspec' => @buildspec,
146
+ 'api_version' => 1,
147
+ })
148
+ end
149
+
150
+ # Rebuild a virtual machine
151
+ #
152
+ def rebuild
153
+ # Determine the hypervisor
154
+ @buildspec['compute_resource'] = @cluster.find(fqdn)
155
+
156
+ refresh_storage_pool(@buildspec)
157
+
158
+ # Destroy the puppet certificate
159
+ # XXX-KLUDGE
160
+ system "curl -X DELETE http://util-stage-001.brontolabs.local:8443/puppet/ca/#{fqdn}"
161
+
162
+ # TODO: if spec['_copy']...
163
+ if @buildspec['_clone']
164
+ virsh "destroy #{self.fqdn}" if domstate == 'running'
165
+ @buildspec['disk_format'] = 'qcow2'
166
+ enable_libgfapi @config.glusterfs_server
167
+ clone_volume
168
+ start()
169
+ else
170
+ # Build via Kickstart:
171
+ #
172
+
173
+ # Destroy the puppet certificate, and enable build at the next boot
174
+ @foreman_api.request(:put, "/hosts/#{fqdn}", { 'host' => {'build' => '1' }})
175
+
176
+ # Call 'virsh destroy' to kill the VM, then power it back on
177
+ stop
178
+ sleep 3
179
+ start
180
+ end
181
+ end
182
+
183
+ # Create storage and attach it to the virtual machine
184
+ #
185
+ def create_storage
186
+ pool = @config.storage_pool
187
+ host = @cluster.member(@cluster.find(fqdn))
188
+ disk_number = 0
189
+ @buildspec['disk_capacity'].split(',').each do |disk_size|
190
+ disk_number += 1
191
+ capacity = normalize_memory(disk_size)
192
+ puts "create #{fqdn} - #{capacity}"
193
+ basedir = '/gvol/images' #XXX-HARDCODED
194
+ path = basedir + '/' + fqdn + '-disk' + disk_number.to_s
195
+ @cluster.guest(fqdn).add_libgfapi_volume(
196
+ "gvol/images/#{fqdn}-disk#{disk_number.to_s}",
197
+ @config.glusterfs_server,
198
+ disk_number
199
+ )
200
+ host.storage_pool(pool).create_volume(path, capacity)
201
+ end
202
+ end
203
+
204
+ # Build a new virtual machine
205
+ #
206
+ def create
207
+ spec = @buildspec
208
+
209
+ # If no compute resource is given, select the one with the most
210
+ # available memory.
211
+ spec['compute_resource'] ||= @cluster.best_fit(spec['name'], normalize_memory(spec['memory'])).gsub(/\..*/, '')
212
+
213
+ if spec['_clone'] == true
214
+ #FIXME: does not belong here
215
+ spec['disk_format'] = 'qcow2'
216
+ spec['storage_pool'] = 'gvol'
217
+ end
218
+
219
+ refresh_storage_pool(spec)
220
+
221
+ rec = {
222
+ 'domain_id' => @foreman_api.get_id('domains', spec['domain']),
223
+ 'subnet_id' => @foreman_api.get_id('subnets', spec['subnet']),
224
+ 'name' => spec['name'],
225
+ 'build' => "true",
226
+ 'enabled' => "true",
227
+
228
+ # XXX-FIXME: hardcoded, should not use this..
229
+ #'compute_profile_id' => '5',
230
+
231
+ 'compute_resource_id' => @foreman_api.get_id('compute_resources', spec['compute_resource']) ,
232
+ 'environment_id' => @foreman_api.get_id('environments', spec['environment']),
233
+ 'managed' => true,
234
+ 'hostgroup_id' => @foreman_api.get_id('hostgroups', spec['hostgroup'], 'title'),
235
+ 'provision_method' => spec['provision_method'],
236
+ 'compute_attributes' => {
237
+ 'memory' => normalize_memory(spec['memory']),
238
+ 'image_id' => spec['image_id'],
239
+ 'nics_attributes' => {
240
+ '0' => {
241
+ 'bridge' => spec['network_interface'],
242
+ 'model' => 'virtio',
243
+ 'type' => 'bridge',
244
+ }
245
+ },
246
+ 'interfaces_attributes' => {
247
+ '0' => {
248
+ 'bridge' => spec['network_interface'],
249
+ 'model' => 'virtio',
250
+ 'type' => 'bridge'
251
+ },
252
+ },
253
+ 'cpus' => spec['cpus'],
254
+ 'start' => '0',
255
+ 'volumes_attributes' => {
256
+ '0' => {
257
+ 'capacity' => spec['disk_capacity'],
258
+ 'pool_name' => spec['storage_pool'],
259
+ 'format_type' => spec['disk_format'],
260
+ }
261
+ }
262
+ }
263
+ }
264
+ if spec['organization']
265
+ rec['organization_id'] = @foreman_api.get_id('organizations', spec['organization'], 'title')
266
+ end
267
+ if spec['owner']
268
+ rec['owner_id'] = @foreman_api.get_id('users', spec['owner'], 'login')
269
+ end
270
+ if spec['provision_method'] == 'image'
271
+ rec['image_id'] = 3
272
+ rec['image_name'] = 'centos6-generic'
273
+ rec['compute_attributes']['image_id'] = spec['image_id']
274
+ end
275
+ if spec['_clone'] or spec['_copy']
276
+ rec['build'] = false
277
+ end
278
+
279
+ # configure the volumes
280
+ # TODO: use a BuildSpec object for everything.
281
+ spec2 = ForemanAP::BuildSpec.new
282
+ spec2.disk_capacity = spec['disk_capacity']
283
+ spec2.storage_pool = spec['storage_pool']
284
+ spec2.disk_format = spec['disk_format']
285
+ ### XXX-TESTING:
286
+ rec['compute_attributes']['volumes_attributes'] = {}
287
+ ###rec['compute_attributes']['volumes_attributes'] = spec2.to_foreman_api['compute_attributes']['volumes_attributes']
288
+ #pp rec
289
+ #raise 'FIXME'
290
+
291
+ @foreman_api.request(:post, "/hosts", rec)
292
+
293
+ raise 'FIXME - not implemented' if spec['_clone'] == true or ['spec_copy'] == true
294
+
295
+ # Create volumes and attach them to the VM
296
+ create_storage
297
+
298
+ #DEADWOOD:
299
+ #####if spec['_clone'] == true
300
+ ##### clone_volume
301
+ #####elsif spec['_copy'] == true
302
+ ##### copy_volume
303
+ #####else
304
+ ##### # crude way to fix the permissions
305
+ ##### wipe_volume
306
+ #####end
307
+ #####enable_libgfapi if spec['_libgfapi']
308
+
309
+ #FIXME: implement this
310
+ #raise 'Duplicate IP address' if ip_address_in_use? $GET_THE_ADDRESS_HERE
311
+
312
+ @cluster.guest(fqdn).start
313
+
314
+ # Attach to the console
315
+ if spec['console']
316
+ guest = spec['name'] + '.' + spec['domain']
317
+ host = spec['compute_resource'] + '.' + spec['domain']
318
+ console_attach(host, guest)
319
+ end
320
+ end
321
+
322
+ def initialize
323
+ @log = Logger.new(STDERR)
324
+ @action = nil
325
+ @config = ForemanAP::Config.new
326
+ @cluster = ForemanAP::Cluster.new(
327
+ @config.hypervisors,
328
+ @config.libvirt_user,
329
+ @config.libvirt_password)
330
+ #FIXME: reenable this: @workqueue = ForemanAP::Workqueue.new('foreman-vm')
331
+ @console = ForemanAP::ConsoleViewer.new(@cluster)
332
+
333
+
334
+ # TODO: transition to using @config.foreman_user everywhere
335
+ # instead of @user/@password
336
+ #
337
+ if @config.foreman_user
338
+ @user = @config.foreman_user
339
+ @password = @config.foreman_password
340
+ else
341
+ @user = ENV['USER']
342
+ @password = nil
343
+ end
344
+ @foreman_api = ForemanAP::ForemanAPI.new(@config.foreman_uri, @user, @password)
345
+
346
+ # Build specifications
347
+ @buildspec = {
348
+ 'cpus' => '1', # Number of vCPUs
349
+ 'memory' => '536870912', # Memory, in bytes (default: 512MB)
350
+ 'disk_capacity' => '20G',
351
+ 'disk_format' => 'raw',
352
+ 'storage_pool' => 'vm-corp-004',
353
+ 'domain' => `dnsdomainname`.chomp,
354
+ 'network_interface' => 'vnet0.201',
355
+ 'provision_method' => 'build',
356
+ 'owner' => 'nil',
357
+ # 'image_id' => '/srv/images/centos6-generic-template.qcow2',
358
+ 'console' => false,
359
+
360
+ #Hidden for testing purposes
361
+ '_clone' => false,
362
+ '_copy' => false,
363
+ '_libgfapi' => true,
364
+ '_disk_backing_file' => '/var/lib/libvirt/images/centos6-dude-template.qcow2',
365
+ }
366
+ end
367
+
368
+ private
369
+
370
+ # Allow memory to be specified with a G/M/K suffix
371
+ # Returns the number of bytes.
372
+ def normalize_memory(s)
373
+ if s =~ /G$/
374
+ ($`.to_i * (1024**3)).to_s
375
+ elsif s =~ /M$/
376
+ ($`.to_i * (1024**2)).to_s
377
+ elsif s =~ /K$/
378
+ ($`.to_i * 1024).to_s
379
+ else
380
+ s
381
+ end
382
+ end
383
+
384
+ # Refresh a storage pool to detect changes made by other hypervisors
385
+ def refresh_storage_pool(spec)
386
+ fqdn = spec['compute_resource']
387
+ fqdn += '.' + spec['domain'] unless fqdn =~ /\./
388
+ @log.debug "refreshing the #{@config.storage_pool} pool on #{fqdn}"
389
+ @cluster.member(fqdn).storage_pool(@config.storage_pool).refresh
390
+ end
391
+
392
+ # Returns true if an IP address is already in use.
393
+ # Verify that Foreman did not allocate an IP address that is currently in use
394
+ # This is some extra sanity checking that should be handled in Foreman, but
395
+ # it has failed to detect conflicts in the past.
396
+ def ip_address_in_use?(ipaddr)
397
+ raise ArgumentError if ipaddr =~ /[^0-9.]/
398
+ has_ping = system "ping -c 3 -W5 #{ipaddr} > /dev/null"
399
+ begin
400
+ name = Resolv.new.getname ipaddr
401
+ has_dns = true
402
+ rescue Resolv::ResolvError
403
+ has_dns = false
404
+ end
405
+ #puts "has_ping=#{has_ping.to_s} has_dns=#{has_dns.to_s}"
406
+ return (has_ping or has_dns)
407
+ end
408
+
409
+ end
@@ -0,0 +1,49 @@
1
+
2
+ module ForemanAP
3
+ # Adds guests to correct hypervisor.
4
+ class Allocator
5
+ def initialize
6
+ @host = []
7
+ end
8
+
9
+ # Add information about a hypervisor
10
+ #
11
+ # [+name+] the name of the host
12
+ # [+free_memory+] how much free memory, in bytes
13
+ # [+guests+] a list of the names of each VM on the host
14
+ #
15
+ def add_host(name,free_memory,guests)
16
+ @host.push({
17
+ :name => name,
18
+ :free_memory => free_memory,
19
+ :guests => guests
20
+ })
21
+ end
22
+
23
+ # Find the best hypervisor that meets the allocation policy
24
+ #
25
+ # [+name+] the name of the guest
26
+ # [+memory+] the amount of memory the guest needs, in bytes
27
+ #
28
+ # Returns the name of the most suitable hypervisor.
29
+ # If no hypervisor is suitable, it returns nil.
30
+ def add_guest(name,memory)
31
+ # Sort by most free memory
32
+ host_tmp = @host.sort_by { |x| -x[:free_memory] }
33
+ # Delete from list if not enough memory for guest
34
+ host_tmp.delete_if { |x| x[:free_memory] < memory.to_i }
35
+ # Check if guest already exists and returns nil if so
36
+ @host.each { |x| return nil if x[:guests].include?(name) }
37
+ # Delete from list if vm type exists, unless it deletes all then return best host
38
+ pre = name.gsub(/[0-9]/, '')
39
+ suitable = host_tmp.dup.delete_if { |x| x[:guests].grep(/^#{pre}/).any? }
40
+ if suitable.any?
41
+ return suitable[0][:name]
42
+ elsif host_tmp.any?
43
+ return host_tmp[0][:name]
44
+ else
45
+ return nil
46
+ end
47
+ end
48
+ end
49
+ end