beaker 2.27.0 → 2.28.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (98) hide show
  1. checksums.yaml +8 -8
  2. data/HISTORY.md +378 -2
  3. data/README.md +1 -3
  4. data/acceptance/config/acceptance-options.rb +3 -0
  5. data/acceptance/fixtures/files/failing_shell_script.txt +3 -0
  6. data/acceptance/fixtures/files/retry_script.txt +14 -0
  7. data/acceptance/fixtures/files/shell_script_with_output.txt +3 -0
  8. data/acceptance/fixtures/files/simple_text_file.txt +3 -0
  9. data/acceptance/fixtures/files/sles-11-x86_64.repo +5 -0
  10. data/acceptance/lib/helpers/test_helper.rb +111 -0
  11. data/acceptance/tests/base/dsl/helpers/configuration_test.rb +17 -0
  12. data/acceptance/tests/base/dsl/helpers/host_helpers/add_system32_hosts_entry_test.rb +50 -0
  13. data/acceptance/tests/base/dsl/helpers/host_helpers/backup_the_file_test.rb +64 -0
  14. data/acceptance/tests/base/dsl/helpers/host_helpers/check_for_package_test.rb +100 -0
  15. data/acceptance/tests/base/dsl/helpers/host_helpers/create_remote_file_test.rb +201 -0
  16. data/acceptance/tests/base/dsl/helpers/host_helpers/create_tmpdir_on_test.rb +68 -0
  17. data/acceptance/tests/base/dsl/helpers/host_helpers/curl_on_test.rb +48 -0
  18. data/acceptance/tests/base/dsl/helpers/host_helpers/curl_with_retries_test.rb +28 -0
  19. data/acceptance/tests/base/dsl/helpers/host_helpers/deploy_package_repo_test.rb +142 -0
  20. data/acceptance/tests/base/dsl/helpers/host_helpers/echo_on_test.rb +13 -0
  21. data/acceptance/tests/base/dsl/helpers/host_helpers/install_package_test.rb +67 -0
  22. data/acceptance/tests/base/dsl/helpers/host_helpers/on_test.rb +109 -0
  23. data/acceptance/tests/base/dsl/helpers/host_helpers/retry_on_test.rb +42 -0
  24. data/acceptance/tests/base/dsl/helpers/host_helpers/rsync_to_test.rb +143 -0
  25. data/acceptance/tests/base/dsl/helpers/host_helpers/run_cron_on_test.rb +242 -0
  26. data/acceptance/tests/base/dsl/helpers/host_helpers/run_script_on_test.rb +69 -0
  27. data/acceptance/tests/base/dsl/helpers/host_helpers/run_script_test.rb +53 -0
  28. data/acceptance/tests/base/dsl/helpers/host_helpers/scp_from_test.rb +53 -0
  29. data/acceptance/tests/base/dsl/helpers/host_helpers/scp_to_test.rb +54 -0
  30. data/acceptance/tests/base/dsl/helpers/host_helpers/shell_test.rb +51 -0
  31. data/acceptance/tests/base/dsl/helpers/host_helpers/upgrade_package_test.rb +92 -0
  32. data/acceptance/tests/base/dsl/install_utils/clone_git_repo_on_test.rb +47 -0
  33. data/acceptance/tests/base/{host.rb → host_test.rb} +2 -0
  34. data/acceptance/tests/hypervisor/{communication.rb → communication_test.rb} +0 -0
  35. data/acceptance/tests/load_path_bootstrap.rb +10 -0
  36. data/acceptance/tests/puppet/{install_smoke.rb → install_smoke_test.rb} +0 -0
  37. data/beaker.gemspec +1 -1
  38. data/docs/Access-the-Live-Test-Console-with-Pry.md +305 -0
  39. data/docs/Argument-Processing-and-Precedence.md +325 -0
  40. data/docs/Beaker-Installation.md +59 -0
  41. data/docs/Beaker-Owners-and-Reviewers.md +16 -0
  42. data/docs/Beaker-Test-Tagging.md +76 -0
  43. data/docs/Beaker-with-Masterless-Puppet.md +42 -0
  44. data/docs/Creating-A-Test-Environment.md +91 -0
  45. data/docs/Docker-Support.md +129 -0
  46. data/docs/EC2-Support.md +77 -0
  47. data/docs/Example-Vagrant-Hosts-Files.md +66 -0
  48. data/docs/File-a-Beaker-Bug.md +3 -0
  49. data/docs/Google-Compute-Engine-Support.md +41 -0
  50. data/docs/How-To-Beaker.md +7 -0
  51. data/docs/How-To-Use-User-Password-Authentication-with-Beaker.md +53 -0
  52. data/docs/How-to-Write-a-Beaker-Test-for-a-Module.md +145 -0
  53. data/docs/Lets-Write-a-Test.md +73 -0
  54. data/docs/Openstack-Support.md +32 -0
  55. data/docs/Overview.md +31 -0
  56. data/docs/README.md +43 -0
  57. data/docs/Roles-What-Are-They.md +44 -0
  58. data/docs/Shared-Options-for-Executing-Beaker-Commands.md +43 -0
  59. data/docs/Solaris-Support.md +10 -0
  60. data/docs/The-Beaker-DSL.md +283 -0
  61. data/docs/The-Command-Line.md +24 -0
  62. data/docs/Types,-Puppet-4,-and-the-All-In-One-Agent.md +13 -0
  63. data/docs/VMWare-Fusion-Support.md +36 -0
  64. data/docs/Vagrant-Support.md +47 -0
  65. data/docs/beaker-vs.-beaker-rspec.md +87 -0
  66. data/docs/hosts/README.md +8 -0
  67. data/docs/hosts/eos.md +30 -0
  68. data/docs/hypervisors/README.md +8 -0
  69. data/docs/hypervisors/aws.md +143 -0
  70. data/docs/vSphere-Support.md +46 -0
  71. data/lib/beaker/dsl/helpers/host_helpers.rb +2 -4
  72. data/lib/beaker/dsl/helpers/puppet_helpers.rb +45 -11
  73. data/lib/beaker/dsl/install_utils/foss_utils.rb +20 -23
  74. data/lib/beaker/dsl/install_utils/module_utils.rb +4 -0
  75. data/lib/beaker/dsl/install_utils/pe_utils.rb +2 -6
  76. data/lib/beaker/dsl/patterns.rb +1 -1
  77. data/lib/beaker/host.rb +3 -0
  78. data/lib/beaker/host/eos.rb +56 -0
  79. data/lib/beaker/host/mac/pkg.rb +1 -1
  80. data/lib/beaker/host/unix/pkg.rb +36 -0
  81. data/lib/beaker/hypervisor/aws_sdk.rb +1 -1
  82. data/lib/beaker/hypervisor/vagrant.rb +3 -3
  83. data/lib/beaker/logger.rb +1 -0
  84. data/lib/beaker/options/presets.rb +5 -0
  85. data/lib/beaker/shared/host_manager.rb +7 -4
  86. data/lib/beaker/version.rb +1 -1
  87. data/spec/beaker/dsl/helpers/puppet_helpers_spec.rb +87 -0
  88. data/spec/beaker/dsl/install_utils/foss_utils_spec.rb +53 -20
  89. data/spec/beaker/dsl/install_utils/module_utils_spec.rb +1 -1
  90. data/spec/beaker/dsl/install_utils/pe_utils_spec.rb +7 -14
  91. data/spec/beaker/host/eos_spec.rb +65 -0
  92. data/spec/beaker/host/unix/pkg_spec.rb +42 -0
  93. data/spec/beaker/hypervisor/aws_sdk_spec.rb +17 -56
  94. data/spec/beaker/hypervisor/vagrant_spec.rb +1 -1
  95. data/spec/beaker/logger_spec.rb +1 -1
  96. data/spec/mocks.rb +0 -13
  97. metadata +76 -6
  98. data/acceptance/tests/dsl/install_utils/clone_git_repo_on.rb +0 -50
@@ -0,0 +1,8 @@
1
+ # The Hosts Directory
2
+
3
+ This directory contains docs explaining any peculiarities or details of a particular
4
+ OS's host implementation.
5
+
6
+ If you don't see a file here for an OS, then it's either not yet documented
7
+ (feel free to help us out here!), or it should conform to our normal host
8
+ abstraction assumptions.
@@ -0,0 +1,30 @@
1
+ # EOS - Arista
2
+
3
+ EOS is the network device OS from Arista. You can get more details from their
4
+ [product page](https://www.arista.com/en/products/eos).
5
+
6
+ # Supported Hypervisors
7
+
8
+ EOS has only been developed and tested as a
9
+ [vmpooler](https://github.com/puppetlabs/vmpooler) host.
10
+
11
+ This doesn't mean that it can't be used in another hypervisor, but that
12
+ Beaker doesn't specifically deal with the details of that hypervisor in creating
13
+ EOS hosts, if there is anything specific to EOS that will need to be done in
14
+ provisioning steps.
15
+
16
+ # Supported Installation Methods
17
+
18
+ ## Puppet Enterprise
19
+
20
+ `install_pe` should "just work".
21
+
22
+ ## Open Source
23
+
24
+ In order to install a puppet-agent against an EOS host, you'll have to use the
25
+ [`install_puppet_agent_dev_repo_on`](blob/master/lib/beaker/dsl/install_utils/foss_utils.rb#L1085)
26
+ method.
27
+
28
+ It reaches out to the EOS-specific host code for any information that it needs.
29
+ You can check out [these methods](blob/master/lib/beaker/host/eos.rb) if you
30
+ need more information about this.
@@ -0,0 +1,8 @@
1
+ # The Hypervisors Directory
2
+
3
+ This directory contains docs explaining any peculiarities or details of a particular
4
+ hypervisor's implementation.
5
+
6
+ If you don't see a file here for a hypervisor, then it's either not yet documented
7
+ (feel free to help us out here!), or it should conform to our normal hypervisor
8
+ assumptions.
@@ -0,0 +1,143 @@
1
+ # Amazon Web Services - Elastic Compute Cloud (EC2)
2
+
3
+ EC2 is a "web service that provides resizable compute capacity in the cloud."
4
+
5
+ [EC2 site](https://aws.amazon.com/ec2/).
6
+
7
+ # Getting Started
8
+
9
+ ### Requirements
10
+
11
+ - Get EC2 access from your IT dept, particularly your `aws_access_key_id` & `aws_secret_access_key`.
12
+ - put these values into your [~/.fog file](http://fog.io/about/getting_started.html).
13
+
14
+ ### Setup Amazon Image Config
15
+
16
+ The Amazon Image Config file in Beaker is the file that specifies which Amazon
17
+ Machine Image (AMI) should be used for a host and which EC2 region that host
18
+ should be generated into.
19
+
20
+ The text in this file follows this form:
21
+
22
+ AMI:
23
+ <host-vmname-value>:
24
+ :image:
25
+ :<type>: <ami-id>
26
+ :<type>: <ami-id>
27
+ :region: <region-id>
28
+ <host-vmname-value>:
29
+ ...
30
+
31
+ The `host-vmname-value` is an ID used to reference one of these particular AMI
32
+ definitions. It is applied to a host via the `vmname` key in the hosts file.
33
+
34
+ The `type` variable is an arbitrary key that you can use to specify the different
35
+ types of that host platform that you can be testing with. Note that this value
36
+ will be grabbed automatically via the value for the host's `snapshot` key.
37
+ For example, we tend to use `:pe` and `:foss` for these values.
38
+
39
+ The `ami-id` variable is the AMI ID as specified by Amazon. You can see the AMI
40
+ ID pattern in EC2's
41
+ [Find a Linux AMI]
42
+ (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html)
43
+ page, particularly in the "using the Images page" section's step 7. For some
44
+ examples of AMI IDs, check out their
45
+ [Amazon Linux AMI page](https://aws.amazon.com/amazon-linux-ami/).
46
+
47
+ The `region-id` variable represents the EC2 region ID from AWS. For reference,
48
+ checkout EC2's
49
+ [Regions and Availability Zones page]
50
+ (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html).
51
+ An example of a region ID is `eu-west-1` for the Ireland data center.
52
+
53
+ This file is by default located at `config/image_templates/ec2.yaml`. This is a
54
+ relative path from Beaker's execution location, and can be overridden using the
55
+ `:ec2_yaml` key in a CONFIG section of a host file if required.
56
+
57
+ ### Create a Hosts File to Use
58
+
59
+ An EC2 hosts file looks like a typical hosts file, except that there are a
60
+ number of required properties that need to be added to every host in order for
61
+ the AWS hypervisor to provision hosts properly. They come in this form:
62
+
63
+ <hostname>:
64
+ <usual stuff, roles, etc>
65
+ vmname: <host-vmname-value>
66
+ hypervisor: ec2
67
+ snapshot: <type>
68
+ amisize: <ami-size>
69
+
70
+ The `host-vmname-value` references the ID created in the Amazon Image Config file
71
+ above. If not provided, Beaker will try to name an AMI Config using the host's
72
+ platform string.
73
+
74
+ The `type` references the type variable in the Amazon Image Config file as well,
75
+ so this key picks out the particular AMI ID from the set available for this type
76
+ of host platform.
77
+
78
+ The `ami-size` variable refers to
79
+ [instance types](https://aws.amazon.com/ec2/instance-types/) by their model name.
80
+ Some examples of these values are "m3.large", "c4.xlarge", and "r3.8xlarge". The
81
+ default value if this key is not provided used by Beaker is "m1.small".
82
+
83
+
84
+ # AWS Keys
85
+
86
+ For any particular Beaker run, a new EC2 ssh key with a name of the form
87
+
88
+ Beaker-<username>-<sanitized_hostname>-<aws_keyname_modifier>-<readable_timestamp>
89
+
90
+ will be created at the beginning of the run, & cleaned up at the end of the run.
91
+
92
+ Everything up to `aws_keyname_modifier` will be the same if run from the same
93
+ user on the same machine no matter when it's run. This means that if you're
94
+ running from a CI system, all of these values will usually be the same, depending
95
+ on your setup.
96
+
97
+ `aws_keyname_modifier` will by default be a 10 digit random number string.
98
+ `readable_timestamp`'s most fine grained unit is nanoseconds. Between the two of
99
+ these, every Beaker run will generate a unique ssh key name.
100
+
101
+ These keys are deleted automatically as a part of the cleanup process at the end
102
+ of a Beaker run.
103
+
104
+ # Zombie Killing
105
+
106
+ If an EC2 host stays around after a Beaker run, we refer to it as a zombie :).
107
+ Normal Beaker execution should not create zombies, but a common use case that
108
+ can result in zombies is using the `--preserve-hosts` options.
109
+
110
+ If you would like to be sure that you're not running up your EC2 bill via any
111
+ leftover preserved hosts in your EC2 system, we recommend creating a zombie
112
+ killing Beaker job.
113
+
114
+ To setup a zombie killing job, you'll need a Beaker test that kills all the
115
+ zombies (referred to later as `kill.rb`):
116
+
117
+ ec2 = AwsSdk.new( [], options )
118
+ ec2.kill_zombies( 0 )
119
+
120
+ Refer to the
121
+ [Rubydoc for the `kill_zombies` method]
122
+ (http://www.rubydoc.info/github/puppetlabs/beaker/Beaker/AwsSdk#kill_zombies-instance_method)
123
+ to learn more about it's
124
+ parameters. Running this should be as simple as this:
125
+
126
+ # beaker --tests kill.rb
127
+
128
+ Note that the second argument is tested as a regex against key names, so you
129
+ could use the key pattern described above to wipeout machines that match a
130
+ pattern such as "Beaker-johnsmith", and it will catch all keys for the "johnsmith"
131
+ user.
132
+
133
+ ### How Do I Find Out My Key Prefix?
134
+
135
+ In order to find out your key pattern as used by Beaker, just kick off a Beaker
136
+ run to generate an EC2 host. When you do this, you should see lines that look
137
+ like so:
138
+
139
+ aws-sdk: Launch instance
140
+ aws-sdk: Ensure key pair exists, create if not
141
+ [AWS EC2 200 0.142666 0 retries] describe_key_pairs(:filters=>[{:name=>"key-name",:values=>["Beaker-johnsmith-Johns-Ubuntu-2-local"]}])
142
+
143
+ The values string in that line is what you're looking for.
@@ -0,0 +1,46 @@
1
+ The harness can use vms and snapshots that live within vSphere as well.
2
+ To do this create a `~/.fog` file with your vSphere credentials:
3
+
4
+ ### example .fog file ###
5
+
6
+ :default:
7
+ :vsphere_server: 'vsphere.example.com'
8
+ :vsphere_username: 'joe'
9
+ :vsphere_password: 'MyP@$$w0rd'
10
+ :vmpooler_token: 'randomtokentext'
11
+
12
+ The vmpooler_token can be used with https://github.com/puppetlabs/vmpooler. Users with Puppet Labs credentials can follow directions for getting and using tokens at https://confluence.puppetlabs.com/display/QE/Generating+and+using+vmpooler+tokens.
13
+
14
+ These follow the conventions used by Cloud Provisioner and Fog.
15
+
16
+ There are two possible `hypervisor` hypervisor-types to use for vSphere testing, `vsphere` and `vcloud`.
17
+
18
+ ### `hypervisor: vsphere`
19
+ This option locates an existing static VM, optionally reverts it to a pre-existing snapshot, and runs tests on it.
20
+
21
+ ### `hypervisor: vcloud`
22
+ This option clones a new VM from a pre-existing template, runs tests on the newly-provisioned clone, then deletes the clone once testing completes.
23
+
24
+ The `vcloud` option requires a slightly-modified test configuration file, specifying both the target template as well as three additional parameters in the 'CONFIG' section ('datastore', 'resourcepool', and 'folder').
25
+
26
+ #### example vcloud hosts file ###
27
+ HOSTS:
28
+ master-vm:
29
+ roles:
30
+ - master
31
+ - agent
32
+ - dashboard
33
+ platform: ubuntu-10.04-amd64
34
+ template: ubuntu-1004-x86_64
35
+ hypervisor: vcloud
36
+ agent-vm:
37
+ roles:
38
+ - agent
39
+ platform: ubuntu-10.04-i386
40
+ template: ubuntu-1004-i386
41
+ hypervisor: vcloud
42
+ CONFIG:
43
+ consoleport: 443
44
+ datastore: instance0
45
+ resourcepool: Delivery/Quality Assurance/FOSS/Dynamic
46
+ folder: delivery/Quality Assurance/FOSS/Dynamic
@@ -396,7 +396,6 @@ module Beaker
396
396
  on host, "curl --tlsv1 %s" % cmd, opts, &block
397
397
  end
398
398
 
399
-
400
399
  def curl_with_retries(desc, host, url, desired_exit_codes, max_retries = 60, retry_interval = 1)
401
400
  opts = {
402
401
  :desired_exit_codes => desired_exit_codes,
@@ -423,6 +422,8 @@ module Beaker
423
422
  # @option opts [Float] :retry_interval (1) number of seconds
424
423
  # that we'll wait between tries
425
424
  # @option opts [Boolean] :verbose (false)
425
+ #
426
+ # @return [Result] Result object of the last command execution
426
427
  def retry_on(host, command, opts = {}, &block)
427
428
  option_exit_codes = opts[:desired_exit_codes]
428
429
  option_max_retries = opts[:max_retries].to_i
@@ -529,7 +530,6 @@ module Beaker
529
530
  # @param [String] val The string to 'echo' on the host(s)
530
531
  # @return [String, Array<String> The echo'ed value(s) returned by the host(s)
531
532
  def echo_on hosts, val
532
- #val = val.gsub(/"/, "\"").gsub(/\(/, "\(")
533
533
  block_on hosts do |host|
534
534
  if host.is_powershell?
535
535
  host.exec(Command.new("echo #{val}")).stdout.chomp
@@ -538,8 +538,6 @@ module Beaker
538
538
  end
539
539
  end
540
540
  end
541
-
542
-
543
541
  end
544
542
  end
545
543
  end
@@ -300,8 +300,18 @@ module Beaker
300
300
  new_conf
301
301
  end
302
302
 
303
+ # Restarts the named puppet service
304
+ #
305
+ # @param [Host] host Host the service runs on
306
+ # @param [String] service Name of the service to restart
307
+ # @param [Fixnum] curl_retries Number of times to retry the restart command
308
+ # @param [Fixnum] port Port to check status at
309
+ #
310
+ # @return [Result] Result of last status check
303
311
  # @!visibility private
304
- def bounce_service host, service, curl_retries = 120
312
+ def bounce_service host, service, curl_retries = nil, port = nil
313
+ curl_retries = 120 if curl_retries.nil?
314
+ port = options[:puppetserver_port] if port.nil?
305
315
  if host.graceful_restarts?
306
316
  apachectl_path = host.is_pe? ? "#{host['puppetsbindir']}/apache2ctl" : 'apache2ctl'
307
317
  host.exec(Command.new("#{apachectl_path} graceful"))
@@ -309,7 +319,7 @@ module Beaker
309
319
  host.exec puppet_resource('service', service, 'ensure=stopped')
310
320
  host.exec puppet_resource('service', service, 'ensure=running')
311
321
  end
312
- curl_with_retries(" #{service} ", host, "https://localhost:8140", [35, 60], curl_retries)
322
+ curl_with_retries(" #{service} ", host, "https://localhost:#{port}", [35, 60], curl_retries)
313
323
  end
314
324
 
315
325
  # Runs 'puppet apply' on a remote host, piping manifest through stdin
@@ -382,8 +392,8 @@ module Beaker
382
392
  # by the caller; this can be used for additional
383
393
  # validation, etc.
384
394
  #
385
- # @return [Array<Result>, Result] An array of results, or a result object.
386
- # Check {#run_block_on} for more details on this.
395
+ # @return [Array<Result>, Result, nil] An array of results, a result object,
396
+ # or nil. Check {#run_block_on} for more details on this.
387
397
  def apply_manifest_on(host, manifest, opts = {}, &block)
388
398
  block_on host do | host |
389
399
  on_options = {}
@@ -596,21 +606,45 @@ module Beaker
596
606
  stub_forge_on(default, forge_host)
597
607
  end
598
608
 
599
- def sleep_until_puppetdb_started(host)
600
- curl_with_retries("start puppetdb", host, "http://localhost:8080", 0, 120)
609
+ # Waits until a successful curl check has happened against puppetdb
610
+ #
611
+ # @param [Host] host Host puppetdb is on
612
+ # @param [Fixnum] nonssl_port Port to make the HTTP status check over
613
+ # @param [Fixnum] ssl_port Port to make the HTTPS status check over
614
+ #
615
+ # @return [Result] Result of the last HTTPS status check
616
+ def sleep_until_puppetdb_started(host, nonssl_port = nil, ssl_port = nil)
617
+ nonssl_port = options[:puppetdb_port_nonssl] if nonssl_port.nil?
618
+ ssl_port = options[:puppetdb_port_ssl] if ssl_port.nil?
619
+ curl_with_retries("start puppetdb", host, "http://localhost:#{nonssl_port}", 0, 120)
601
620
  curl_with_retries("start puppetdb (ssl)",
602
- host, "https://#{host.node_name}:8081", [35, 60])
621
+ host, "https://#{host.node_name}:#{ssl_port}", [35, 60])
603
622
  end
604
623
 
605
- def sleep_until_puppetserver_started(host)
624
+ # Waits until a successful curl check has happened against puppetserver
625
+ #
626
+ # @param [Host] host Host puppetserver is on
627
+ # @param [Fixnum] port Port to make the HTTPS status check over
628
+ #
629
+ # @return [Result] Result of the last HTTPS status check
630
+ def sleep_until_puppetserver_started(host, port = nil)
631
+ port = options[:puppetserver_port] if port.nil?
606
632
  curl_with_retries("start puppetserver (ssl)",
607
- host, "https://#{host.node_name}:8140", [35, 60])
633
+ host, "https://#{host.node_name}:#{port}", [35, 60])
608
634
  end
609
635
 
610
- def sleep_until_nc_started(host)
636
+ # Waits until a successful curl check has happaned against node classifier
637
+ #
638
+ # @param [Host] host Host node classifier is on
639
+ # @param [Fixnum] port Port to make the HTTPS status check over
640
+ #
641
+ # @return [Result] Result of the last HTTPS status check
642
+ def sleep_until_nc_started(host, port = nil)
643
+ port = options[:nodeclassifier_port] if port.nil?
611
644
  curl_with_retries("start nodeclassifier (ssl)",
612
- host, "https://#{host.node_name}:4433", [35, 60])
645
+ host, "https://#{host.node_name}:#{port}", [35, 60])
613
646
  end
647
+
614
648
  #stops the puppet agent running on the host
615
649
  # @param [Host, Array<Host>, String, Symbol] agent One or more hosts to act upon,
616
650
  # or a role (String or Symbol) that identifies one or more hosts.
@@ -289,7 +289,7 @@ module Beaker
289
289
  host[:version] = opts[:version]
290
290
 
291
291
  # Certain install paths may not create the config dirs/files needed
292
- on host, "mkdir -p #{host['puppetpath']}" unless host[:type] =~ /aio/
292
+ host.mkdir_p host['puppetpath'] unless host[:type] =~ /aio/
293
293
  on host, "echo '' >> #{host.puppet['hiera_config']}"
294
294
  end
295
295
  end
@@ -592,7 +592,7 @@ module Beaker
592
592
  block_on hosts do |host|
593
593
  link = "#{opts[:win_download_url]}/#{host['dist']}.msi"
594
594
  if not link_exists?( link )
595
- raise "Puppet #{version} at #{link} does not exist!"
595
+ raise "Puppet MSI at #{link} does not exist!"
596
596
  end
597
597
 
598
598
 
@@ -861,11 +861,10 @@ module Beaker
861
861
  case variant
862
862
  when /^(fedora|el|centos)$/
863
863
  variant = (($1 == 'centos') ? 'el' : $1)
864
+ remote = "%s/puppetlabs-release%s-%s-%s.noarch.rpm" % [opts[:release_yum_repo_url],
865
+ repo_name, variant, version]
864
866
 
865
- rpm = "puppetlabs-release%s-%s-%s.noarch.rpm" % [repo_name, variant, version]
866
- remote = URI.join( opts[:release_yum_repo_url], rpm )
867
-
868
- on host, "rpm --replacepkgs -ivh #{remote}"
867
+ host.install_package_with_rpm(remote, '--replacepkgs', {:package_proxy => opts[:package_proxy]})
869
868
 
870
869
  when /^(debian|ubuntu|cumulus)$/
871
870
  deb = "puppetlabs-release%s-%s.deb" % [repo_name, codename]
@@ -1107,6 +1106,10 @@ module Beaker
1107
1106
  onhost_copy_base = opts[:copy_dir_external]
1108
1107
 
1109
1108
  case variant
1109
+ when /^eos/
1110
+ release_path_end, release_file = host.get_puppet_agent_package_info(
1111
+ opts[:puppet_collection], opts[:puppet_agent_version] )
1112
+ release_path << release_path_end
1110
1113
  when /^(fedora|el|centos|sles)$/
1111
1114
  variant = ((variant == 'centos') ? 'el' : variant)
1112
1115
  release_path << "#{variant}/#{version}/#{opts[:puppet_collection]}/#{arch}"
@@ -1193,29 +1196,22 @@ module Beaker
1193
1196
  raise "No repository installation step for #{variant} yet..."
1194
1197
  end
1195
1198
 
1196
- onhost_copied_file = File.join(onhost_copy_base, release_file)
1197
- fetch_http_file( release_path, release_file, copy_dir_local)
1198
- scp_to host, File.join(copy_dir_local, release_file), onhost_copy_base
1199
+ if host['platform'] =~ /eos/
1200
+ host.get_remote_file( "#{release_path}/#{release_file}" )
1201
+ else
1202
+ onhost_copied_file = File.join(onhost_copy_base, release_file)
1203
+ fetch_http_file( release_path, release_file, copy_dir_local)
1204
+ scp_to host, File.join(copy_dir_local, release_file), onhost_copy_base
1205
+ end
1199
1206
 
1200
1207
  case variant
1208
+ when /^eos/
1209
+ host.install_from_file( release_file )
1201
1210
  when /^(fedora|el|centos|sles)$/
1202
1211
  on host, "rpm -ivh #{onhost_copied_file}"
1203
1212
  when /^(aix)$/
1204
1213
  # NOTE: AIX does not support repo management. This block assumes
1205
1214
  # that the desired rpm has been mirrored to the 'repos' location.
1206
- #
1207
- # NOTE: tar is a dependency for puppet packages on AIX. So,
1208
- # we install it prior to the 'repo' file.
1209
- tar_pkg_path = "ftp://ftp.software.ibm.com/aix/freeSoftware/aixtoolbox/RPMS/ppc/tar"
1210
- if version == "5.3" then
1211
- tar_pkg_file = "tar-1.14-2.aix5.1.ppc.rpm"
1212
- else
1213
- tar_pkg_file = "tar-1.22-1.aix6.1.ppc.rpm"
1214
- end
1215
- fetch_http_file( tar_pkg_path, tar_pkg_file, copy_dir_local)
1216
- scp_to host, File.join(copy_dir_local, tar_pkg_file), onhost_copy_base
1217
- onhost_copied_tar_file = File.join(onhost_copy_base, tar_pkg_file)
1218
- on host, "rpm -ivh #{onhost_copied_tar_file}"
1219
1215
 
1220
1216
  # install the repo file
1221
1217
  on host, "rpm -ivh #{onhost_copied_file}"
@@ -1403,9 +1399,10 @@ NOASK
1403
1399
  cmdline_args = ''
1404
1400
  # query packages
1405
1401
  case host[:platform]
1402
+ when /cumulus/
1403
+ pkgs = on(host, "dpkg-query -l | awk '{print $2}' | grep -E '(^pe-|puppet)'", :acceptable_exit_codes => [0,1]).stdout.chomp.split(/\n+/)
1406
1404
  when /aix/
1407
1405
  pkgs = on(host, "rpm -qa | grep -E '(^pe-|puppet)'", :acceptable_exit_codes => [0,1]).stdout.chomp.split(/\n+/)
1408
- pkgs.concat on(host, "rpm -q tar", :acceptable_exit_codes => [0,1]).stdout.chomp.split(/\n+/)
1409
1406
  when /solaris-10/
1410
1407
  cmdline_args = '-a noask'
1411
1408
  pkgs = on(host, "pkginfo | egrep '(^pe-|puppet)' | cut -f2 -d ' '", :acceptable_exit_codes => [0,1]).stdout.chomp.split(/\n+/)