foreman-architect 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. data/bin/architect +147 -0
  2. data/bin/foreman-vm +50 -0
  3. data/bin/worker.rb +101 -0
  4. data/lib/architect.rb +49 -0
  5. data/lib/architect/builder/physical.rb +19 -0
  6. data/lib/architect/builder/virtual.rb +27 -0
  7. data/lib/architect/config.rb +64 -0
  8. data/lib/architect/designer.rb +73 -0
  9. data/lib/architect/log.rb +28 -0
  10. data/lib/architect/plan.rb +41 -0
  11. data/lib/architect/plugin.rb +67 -0
  12. data/lib/architect/plugin/hello_world.rb +46 -0
  13. data/lib/architect/plugin/ldap_netgroup.rb +114 -0
  14. data/lib/architect/plugin_manager.rb +64 -0
  15. data/lib/architect/report.rb +67 -0
  16. data/lib/architect/version.rb +3 -0
  17. data/lib/foreman_vm.rb +409 -0
  18. data/lib/foreman_vm/allocator.rb +49 -0
  19. data/lib/foreman_vm/buildspec.rb +48 -0
  20. data/lib/foreman_vm/cluster.rb +83 -0
  21. data/lib/foreman_vm/config.rb +55 -0
  22. data/lib/foreman_vm/console.rb +83 -0
  23. data/lib/foreman_vm/domain.rb +192 -0
  24. data/lib/foreman_vm/foreman_api.rb +78 -0
  25. data/lib/foreman_vm/getopt.rb +151 -0
  26. data/lib/foreman_vm/hypervisor.rb +96 -0
  27. data/lib/foreman_vm/storage_pool.rb +104 -0
  28. data/lib/foreman_vm/util.rb +18 -0
  29. data/lib/foreman_vm/volume.rb +70 -0
  30. data/lib/foreman_vm/workqueue.rb +58 -0
  31. data/test/architect/architect_test.rb +24 -0
  32. data/test/architect/product_service.yaml +33 -0
  33. data/test/architect/tc_builder_physical.rb +13 -0
  34. data/test/architect/tc_config.rb +20 -0
  35. data/test/architect/tc_log.rb +13 -0
  36. data/test/architect/tc_plugin_ldap_netgroup.rb +39 -0
  37. data/test/architect/tc_plugin_manager.rb +27 -0
  38. data/test/tc_allocator.rb +61 -0
  39. data/test/tc_buildspec.rb +45 -0
  40. data/test/tc_cluster.rb +20 -0
  41. data/test/tc_config.rb +12 -0
  42. data/test/tc_foreman_api.rb +20 -0
  43. data/test/tc_foremanvm.rb +20 -0
  44. data/test/tc_hypervisor.rb +37 -0
  45. data/test/tc_main.rb +19 -0
  46. data/test/tc_storage_pool.rb +28 -0
  47. data/test/tc_volume.rb +22 -0
  48. data/test/tc_workqueue.rb +35 -0
  49. data/test/ts_all.rb +13 -0
  50. metadata +226 -0
@@ -0,0 +1,70 @@
1
+ module ForemanAP
2
+ # A disk volume attached to a virtual machine.
3
+ class Volume
4
+ # The format of the volume. Currently, only :raw is supported.
5
+ def format
6
+ case @vol.info.type
7
+ when 0
8
+ :raw
9
+ else
10
+ raise 'unknown volume type: ' + @sph.info.type
11
+ end
12
+ end
13
+
14
+ # Delete the volume.
15
+ def delete
16
+ @vol.delete
17
+ true
18
+ end
19
+
20
+ def initialize(pool, name)
21
+ @pool = pool
22
+ @name = name
23
+ @vol = pool.lookup_volume_by_name(name)
24
+ end
25
+ end
26
+ end
27
+
28
+ #--
29
+ ## Legacy code below here
30
+
31
+ class ForemanVM
32
+ # Delete the disk volume associated with the VM
33
+ #
34
+ def delete_volume
35
+ virsh "vol-delete #{self.fqdn}-disk1 --pool #{@buildspec['storage_pool']}"
36
+ end
37
+
38
+ # Wipe an existing disk volume to fix the permissions
39
+ # This is sadly needed to get the uid:gid to be qemu:qemu
40
+ #
41
+ def wipe_volume
42
+ delete_volume
43
+
44
+ # BUG: We would like to do this, but it creates the file owned by root:root
45
+ #virsh "vol-create-as --pool #{@buildspec['storage_pool']} --name #{fqdn()}-disk1 --capacity 30G --format qcow2 --backing-vol #{@buildspec['_disk_backing_file']} --backing-vol-format qcow2"
46
+ #
47
+ # WORKAROUND: use an XML volume definition to set the owner/group
48
+ #
49
+ xml = "<volume>
50
+ <name>#{fqdn}-disk1</name>
51
+ <key>/gvol/images/#{fqdn}-disk1</key>
52
+ <source>
53
+ </source>
54
+ <capacity unit='bytes'>32212254720</capacity>
55
+ <allocation unit='bytes'>197120</allocation>
56
+ <target>
57
+ <path>/gvol/images/#{fqdn}-disk1</path>
58
+ <format type='raw'/>
59
+ <permissions>
60
+ <mode>0660</mode>
61
+ <owner>107</owner>
62
+ <group>107</group>
63
+ </permissions>
64
+ </target>
65
+ </volume>
66
+ "
67
+ @log.debug "creating volume: #{xml}"
68
+ virsh("vol-create --pool gvol --file /dev/stdin >/dev/null", xml)
69
+ end
70
+ end
@@ -0,0 +1,58 @@
1
+ module ForemanAP
2
+
3
+ # Allow jobs to be placed on a workqueue and processed later.
4
+ class Workqueue
5
+
6
+ require 'rubygems'
7
+ require 'beaneater'
8
+ require 'json'
9
+ require 'pp'
10
+
11
+ def initialize(tube_name = 'foreman-vm')
12
+ @tube_name = tube_name
13
+ @beanstalk = Beaneater::Pool.new(['localhost:11300'])
14
+ @tube = @beanstalk.tubes[tube_name]
15
+ end
16
+
17
+ # Add an item to the queue
18
+ def enqueue(item)
19
+ @tube.put JSON.pretty_generate(item)
20
+ end
21
+
22
+ # Remove a job from the queue, and return the body
23
+ def dequeue
24
+ job = @tube.reserve
25
+ result = JSON.parse(job.body)
26
+ job.delete
27
+ result
28
+ end
29
+
30
+ # Remove all jobs from the queue
31
+ def clear
32
+ @tube.clear
33
+ true
34
+ end
35
+
36
+ # Return a list of all jobs
37
+ # (TODO: try to avoid leaking beanstalkd details)
38
+ def jobs
39
+ buf = "job stats:\n\n"
40
+ buf += @tube.stats.inspect + "\n\n\n"
41
+ buf
42
+ end
43
+
44
+ # Process all jobs
45
+ def process_all_jobs
46
+ @beanstalk.jobs.register(@tube_name) do |job|
47
+ yield JSON.parse(job.body)
48
+ end
49
+
50
+ @beanstalk.jobs.process!
51
+ end
52
+
53
+ # Check if there is a worker waiting for jobs
54
+ def worker?
55
+ @tube.stats.current_watching > 0
56
+ end
57
+ end
58
+ end
@@ -0,0 +1,24 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require 'tempfile'
4
+ require 'minitest/autorun'
5
+ require 'minitest'
6
+ require 'yaml'
7
+
8
+ class ArchitectTest < Minitest::Test
9
+ require 'architect'
10
+
11
+ protected
12
+
13
+ def architect(config = nil)
14
+ if config.nil?
15
+ Architect.new
16
+ else
17
+ f = Tempfile.new('architect-test-config.yaml')
18
+ f.write(config.to_yaml)
19
+ f.close
20
+ File.chmod(0600, f.path)
21
+ Architect.new(conffile: f.path)
22
+ end
23
+ end
24
+ end
@@ -0,0 +1,33 @@
1
+ ---
2
+ defaults:
3
+ environment: staging
4
+ hostgroup: staging/generic
5
+ domain: brontolabs.local
6
+ subnet: staging_200
7
+ network_interface: vnet0.200
8
+ cpus: 1
9
+ memory: 2G
10
+ disk_capacity: 20G,20G
11
+ storage_pool: gvol
12
+ owner: mark.heily
13
+ instances:
14
+ - product-stg-001:
15
+ memory: 2G
16
+ - product-stg-002:
17
+ memory: 2G
18
+ - productapi-stg-001:
19
+ memory: 2G
20
+ network_interface: vnet0.202
21
+ subnet: dmz_202
22
+ - productapi-stg-002:
23
+ memory: 2G
24
+ network_interface: vnet0.202
25
+ subnet: dmz_202
26
+ - productdb-stg-001:
27
+ memory: 4G
28
+ - productsearch-stg-001:
29
+ memory: 4G
30
+ disk_capacity: 20G,40G
31
+ - productsearch-stg-002:
32
+ memory: 4G
33
+ disk_capacity: 20G,40G
@@ -0,0 +1,13 @@
1
+ #!/usr/bin/env ruby
2
+ #
3
+ # Tests for the PhysicalMachineBuilder class
4
+ #
5
+
6
+ require_relative 'architect_test'
7
+
8
+ class TestPhysicalMachineBuilder < ArchitectTest
9
+ #def test_exists?
10
+ # spec = { instance_type: 'physical' }
11
+ # architect.builder(spec).exists? spec.keys[0]
12
+ #end
13
+ end
@@ -0,0 +1,20 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require_relative 'architect_test'
4
+
5
+ class TestConfig < ArchitectTest
6
+
7
+ def test_initialize_from_hash
8
+ cfg = Architect::Config.new({ 'domain' => 'foo' } )
9
+ assert_equal 'foo', cfg.domain
10
+ end
11
+
12
+ def test_initialize_from_file
13
+ f = Tempfile.new('test_register.yaml')
14
+ f.write({ 'domain' => 'foo', 'plugins' => { 'hello_world' => {} } }.to_yaml)
15
+ f.close
16
+ File.chmod(0600, f.path)
17
+ cfg = Architect::Config.new(f.path)
18
+ assert_equal 'foo', cfg.domain
19
+ end
20
+ end
@@ -0,0 +1,13 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require_relative 'architect_test'
4
+
5
+ class TestLog < ArchitectTest
6
+
7
+ def test_logger
8
+ Architect::Log.info 'hi'
9
+ Architect::Log.warn 'this is a warning'
10
+ Architect::Log.debug 'this is a debug'
11
+ end
12
+
13
+ end
@@ -0,0 +1,39 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require 'minitest/autorun'
4
+
5
+ class TestLDAPNetGroupPlugin < MiniTest::Test
6
+ require 'architect/plugin/ldap_netgroup'
7
+
8
+ def test_name
9
+ assert_equal 'ldap_netgroup', plugin.name
10
+ end
11
+
12
+ def test_configure
13
+ @plugin = LDAPNetgroupPlugin.new
14
+ plugin.configure({
15
+ host: 'localhost',
16
+ port: '10389',
17
+ bind_dn: 'cn=foreman-vm',
18
+ bind_password: 'password123',
19
+ base_dn: 'ou=Netgroup,dc=example,dc=com',
20
+ nis_domain: 'example.com',
21
+ })
22
+ end
23
+
24
+ def setup
25
+ @plugin = LDAPNetgroupPlugin.new
26
+ plugin.configure({
27
+ host: nil,
28
+ port: nil,
29
+ bind_dn: nil,
30
+ bind_password: nil,
31
+ base_dn: nil,
32
+ nis_domain: nil,
33
+ })
34
+ end
35
+
36
+ private
37
+
38
+ attr_accessor :plugin
39
+ end
@@ -0,0 +1,27 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require_relative 'architect_test'
4
+
5
+ class TestPluginManager < ArchitectTest
6
+ require 'architect/plugin_manager'
7
+
8
+ MOCK_CONFIG = {
9
+ 'hello_world' => {
10
+ hello: 'bar',
11
+ quiet: 'true',
12
+ }
13
+ }
14
+
15
+ def test_initialize
16
+ pm = Architect::PluginManager.new(MOCK_CONFIG)
17
+ refute_nil pm
18
+ refute_nil pm.plugins.hello_world
19
+ end
20
+
21
+ # Test if the plugin can be configured
22
+ def test_configure
23
+ pm = Architect::PluginManager.new(MOCK_CONFIG)
24
+ pm.plugins.hello_world.configure(MOCK_CONFIG['hello_world'])
25
+ assert_equal 'bar', pm.plugins.hello_world.config.hello
26
+ end
27
+ end
@@ -0,0 +1,61 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require 'minitest/autorun'
4
+ require 'foreman_vm/allocator'
5
+
6
+ class TestCluster < Minitest::Test
7
+
8
+ # A mockup of the class we want to write
9
+ #
10
+ # Goals:
11
+ # * Allocate new VMs on the hosts with the most free memory
12
+ # * Do not allocate all instances of a VM type
13
+ # on the same hypervisor. Example: if there is only one hypervisor
14
+ # with a VM named 'web1', do not create a VM named 'web2' here.
15
+ #
16
+ # * (Stretch goal) Try to spread out VMs across multiple hosts where possible
17
+ #
18
+
19
+ # Try to add a host
20
+ def test_add_host
21
+ alloc = ForemanAP::Allocator.new
22
+ assert alloc.add_host('host1', 1024, ['guest1', 'guest2'])
23
+ end
24
+
25
+ # Try to add a guest
26
+ def test_alloc_guest
27
+ alloc = ForemanAP::Allocator.new
28
+ assert(alloc.add_host('host1', 1024, ['guest1', 'guest2']))
29
+ assert(alloc.add_guest('guest3', 512))
30
+ end
31
+
32
+ # Try to add a guest exceeds free memory of host
33
+ def test_unable_to_add_guest
34
+ alloc = ForemanAP::Allocator.new
35
+ assert(alloc.add_host('host1', 1024, ['guest1', 'guest2']))
36
+ assert_nil(alloc.add_guest('guest3', 4096))
37
+ end
38
+
39
+ # Verify guest is put on host with most free memory
40
+ def test_free_memory
41
+ alloc = ForemanAP::Allocator.new
42
+ assert(alloc.add_host('host1', 2048, ['a1', 'a2']))
43
+ assert(alloc.add_host('host2', 1024, ['a1', 'a2']))
44
+ assert_equal('host1', alloc.add_guest('b1', 512))
45
+ end
46
+
47
+ # Try to add guest that already exists
48
+ def test_duplicate_guest
49
+ alloc = ForemanAP::Allocator.new
50
+ assert(alloc.add_host('host1', 2048, ['guest1', 'guest2']))
51
+ assert_nil(alloc.add_guest('guest2', 512))
52
+ end
53
+
54
+ # Ensure VM Instance of similar type is spread out to different hypervisors
55
+ def test_affinity
56
+ alloc = ForemanAP::Allocator.new
57
+ assert(alloc.add_host('host1', 2048, ['guest1', 'guest2']))
58
+ assert(alloc.add_host('host2', 4096, ['guest1', 'web1']))
59
+ assert_equal('host1', alloc.add_guest('web2', 512))
60
+ end
61
+ end
@@ -0,0 +1,45 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require 'minitest/autorun'
4
+
5
+ class TestBuildspec < MiniTest::Test
6
+ require 'foreman_vm/buildspec'
7
+
8
+ def test_initialize
9
+ refute ForemanAP::BuildSpec.new.nil?
10
+ end
11
+
12
+ def test_assignment
13
+ spec = ForemanAP::BuildSpec.new
14
+ assert_equal('hostname', spec.name = 'hostname')
15
+ assert_equal(1, spec.cpus = 1)
16
+ assert_equal('4G', spec.memory = '4G')
17
+ assert_equal('20G,40G', spec.disk_capacity = '20G,40G')
18
+ assert_equal('raw', spec.disk_format = 'raw')
19
+ assert_equal('default', spec.storage_pool = 'default')
20
+ assert_equal('acme.com', spec.domain = 'acme.com')
21
+ assert_equal('vnet0.101', spec.network_interface = 'vnet0.101')
22
+ end
23
+
24
+ #def test_fancy
25
+ # spec = ForemanAP::BuildSpec.new
26
+ # spec.specify do
27
+ # hostname 'foo'
28
+ # end
29
+ # assert_equal('foo', spec.name)
30
+ #end
31
+
32
+ # Test the generation of the 'volumes_attributes' Foreman API section
33
+ def test_volumes_attributes
34
+ spec = ForemanAP::BuildSpec.new
35
+ spec.disk_format = 'raw'
36
+ spec.disk_capacity = '20G,40G'
37
+ spec.storage_pool = 'default'
38
+ assert_equal({
39
+ 'volumes_attributes'=> {
40
+ '1'=>{'format_type'=>'raw', 'pool_name'=>'default', 'capacity'=>'40G'},
41
+ '0'=>{'format_type'=>'raw', 'pool_name'=>'default', 'capacity'=>'20G'}
42
+ }
43
+ }, spec.to_foreman_api)
44
+ end
45
+ end
@@ -0,0 +1,20 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require "minitest/autorun"
4
+
5
+ class TestCluster < MiniTest::Test
6
+ require 'foreman_vm'
7
+
8
+ def setup
9
+ @config = ForemanAP::Config.new
10
+ @cluster = nil
11
+ end
12
+
13
+ def test_initialize
14
+ @cluster = ForemanAP::Cluster.new(
15
+ @config.hypervisors,
16
+ @config.libvirt_user,
17
+ @config.libvirt_password)
18
+ assert_not_nil @cluster
19
+ end
20
+ end
@@ -0,0 +1,12 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require 'minitest/autorun'
4
+
5
+ class TestConfig < MiniTest::Test
6
+ require 'foreman_vm'
7
+
8
+ def test_initialize
9
+ @config = ForemanAP::Config.new
10
+ assert_not_nil(@config)
11
+ end
12
+ end