beaker 3.14.0 → 3.15.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,15 +1,15 @@
1
1
  ---
2
2
  !binary "U0hBMQ==":
3
3
  metadata.gz: !binary |-
4
- NjhmMGI0YmEzZmIzMGM1MjM3ZjU5MzZiZWE0OGNmMzYyNTc4MmJjZA==
4
+ NjRlOGQzYjVmMjI1MDFhYTQzZTRhNzZhOTM0Njg1MzI1ZGY1NmU4Nw==
5
5
  data.tar.gz: !binary |-
6
- Y2IzYjA2YjBiZGQ1MmI4MGFlNDgxNDM0NDFlMTlmY2Q5ZTEyOWM5MA==
6
+ MzUxODYzYWY0ZWQ0YjM5OWU5ODQzNTY4OGFlOWU4YzlhYTQ3NzM2OQ==
7
7
  SHA512:
8
8
  metadata.gz: !binary |-
9
- OTRlZThiZGY3ODViOWNiZmI0Y2VmZWEwYzRlOGUwZDVkZDQ4ZTdlYTc3NmI3
10
- NWQ3NzYyYjU3MDA1YTkwYmViYzk1MWZkNGY0NWYyNjM1NGI0ZDU2YzU4ZWJm
11
- ZGMxMDJlNDc5ZWRmMDZjZmU4NjU0OWI2NWRiNjg2Njk0ZDg1N2M=
9
+ OTY1NmExNTVjODU2OGU1Zjg2Yjk4N2NmYmY2ODBjODNiYjIxZWExZjNjYzM2
10
+ NDJlNmIwYTcyNTIzOGU3YjUxMTJiODIyOGNjNGYzYTE5NDVlZDBlNzc2NjRj
11
+ Yjc5NGFkMzNiMDAxOTY2OGNlZWFiMGVhMTgyYjczMzM0YzljN2I=
12
12
  data.tar.gz: !binary |-
13
- OGNjYjc4NTY1N2ExYTQ4NzlkYjE3NjJkMmYzM2NhZTI0NDc0MWE1MWYwZmFh
14
- ODE5OTUwMjA3Mzk0YTJhYWUyZTg3NDkwYTJhYTgyOWYxNjM1Yzk0OTUwZTJk
15
- ODhiZDA2YzRkN2ZlOTU5ZmZlOGVkMmU0OGJjYzQ2NTQ2OTE2YzI=
13
+ N2QzYTg1NjZiOTAxYWY4YjQ3ODdiYjkyNjViN2Q5ZDg2OTA0MmNmMTRhNWVl
14
+ YTk1MTAwZTBmZTlhMzU2Mzk0NjU5OWUwZjU0YjYyODdjMmE4YWZhNDlmOGYz
15
+ MzczMjQ5YjVmZWIwMGJmN2RmYTcyNTE3ZjY0YjJlZTViNzY1ZDA=
@@ -56,9 +56,9 @@ hosts.each do |host|
56
56
  step "#{host} Selected architecture #{arch}"
57
57
 
58
58
  revision = if arch == 'x64'
59
- '2.0.0-x64'
59
+ '2.1.x-x64'
60
60
  else
61
- '1.9.3-x86'
61
+ '2.1.x-x86'
62
62
  end
63
63
 
64
64
  step "#{host} Install ruby from git using revision #{revision}"
@@ -1,9 +1,11 @@
1
1
  test_name 'External Resources Test' do
2
2
  step 'Verify EPEL resources are up and available' do
3
3
  def epel_url_test(el_version)
4
- url = "#{options[:epel_url]}/epel-release-latest-#{el_version}.noarch.rpm"
4
+ url_base = options[:epel_url]
5
+ url_base = options[:epel_url_archive] if el_version == 5
6
+ url = "#{url_base}/epel-release-latest-#{el_version}.noarch.rpm"
5
7
  curl_headers_result = default.exec(Command.new("curl -I #{url}"))
6
- assert_match(/200 OK/, curl_headers_result.stdout, "EPEL #{el_version} should be reachable")
8
+ assert_match(/200 OK/, curl_headers_result.stdout, "EPEL #{el_version} should be reachable at #{url}")
7
9
  end
8
10
 
9
11
  step 'Verify el_version numbers 5,6,7 are found on the epel resource' do
@@ -18,7 +18,24 @@ An example of a `.fog` file with just the vmpooler details is below:
18
18
  :default:
19
19
  :vmpooler_token: 'randomtokentext'
20
20
  ```
21
+ # Additional Disks
22
+ Using the vmpooler API, Beaker enables you to attach additional storage disks in the host configuration file. The disks are added at the time the VM is created. Logic for using the disk must go into your tests.
23
+
24
+ Simply add the `disks` key and a list containing the sizes(in GB) of the disks you want to create and attach to that host.
25
+ For example, to create 2 disks sized 8GB and 16GB to example-box:
26
+
27
+ ```yaml
28
+ example-box:
29
+ disks:
30
+ - 8
31
+ - 16
32
+ roles:
33
+ - satellite
34
+ platform: el-7-x86_64
35
+ hypervisor: vmpooler
36
+ template: redhat-7-x86_64
37
+ ```
21
38
 
22
39
  Users with Puppet credentials can follow our instructions for getting & using
23
40
  vmpooler tokens in our
24
- [internal documentation](https://confluence.puppetlabs.com/pages/viewpage.action?spaceKey=SRE&title=Generating+and+using+vmpooler+tokens).
41
+ [internal documentation](https://confluence.puppetlabs.com/pages/viewpage.action?spaceKey=SRE&title=Generating+and+using+vmpooler+tokens).
@@ -10,6 +10,8 @@ To do this create a `~/.fog` file with your vSphere credentials:
10
10
 
11
11
  These follow the conventions used by Cloud Provisioner and Fog.
12
12
 
13
+ >Note: Your fog credential file location may be specified in the 'CONFIG' section using the 'dot_fog' setting
14
+
13
15
  There are two possible `hypervisor` hypervisor-types to use for vSphere testing, `vsphere` and `vcloud`.
14
16
 
15
17
  ### `hypervisor: vsphere`
@@ -18,7 +20,7 @@ This option locates an existing static VM, optionally reverts it to a pre-existi
18
20
  ### `hypervisor: vcloud`
19
21
  This option clones a new VM from a pre-existing template, runs tests on the newly-provisioned clone, then deletes the clone once testing completes.
20
22
 
21
- The `vcloud` option requires a slightly-modified test configuration file, specifying both the target template as well as three additional parameters in the 'CONFIG' section ('datastore', 'resourcepool', and 'folder').
23
+ The `vcloud` option requires a slightly-modified test configuration file, specifying both the target template as well as three additional parameters in the 'CONFIG' section ('datastore', 'datacenter', and 'folder'). Optionally, a resourcepool may be specified via the 'resourcepool' setting in the 'CONFIG' section. Template can be expressed in the 'HOSTS' section, or you can set the template to be used via the `BEAKER_vcloud_template` environment variable.
22
24
 
23
25
  #### example vcloud hosts file ###
24
26
  HOSTS:
@@ -38,6 +40,7 @@ The `vcloud` option requires a slightly-modified test configuration file, specif
38
40
  hypervisor: vcloud
39
41
  CONFIG:
40
42
  consoleport: 443
43
+ datacenter: testdc
41
44
  datastore: instance0
42
45
  resourcepool: Delivery/Quality Assurance/FOSS/Dynamic
43
46
  folder: delivery/Quality Assurance/FOSS/Dynamic
@@ -73,12 +73,13 @@ module Beaker
73
73
  env_string = host.environment_string( env )
74
74
  prepend_commands = host.prepend_commands( cmd, pc, :cmd_exe => @cmdexe )
75
75
  if host[:platform] =~ /cisco/ && host[:user] != 'root'
76
- append_command = '"'
76
+ append_command = '"'
77
77
  cmd = cmd.gsub('"') { '\\"' }
78
78
  end
79
79
 
80
80
  # This will cause things like `puppet -t -v agent` which is maybe bad.
81
- cmd_line_array = [env_string, prepend_commands, cmd, options_string, args_string, append_command]
81
+ cmd_line_array = [env_string, prepend_commands, cmd, options_string, args_string]
82
+ cmd_line_array << append_command unless (cmd =~ /ntpdate/ && host[:platform] =~ /cisco_nexus/)
82
83
  cmd_line_array.compact.reject( &:empty? ).join( ' ' )
83
84
  end
84
85
 
@@ -70,8 +70,12 @@ module Cisco
70
70
  # @return [String] Command string as needed for this host
71
71
  def prepend_commands(command = '', user_pc = '', opts = {})
72
72
  return user_pc unless command.index('vsh').nil?
73
+ if self[:platform] =~ /cisco_nexus/
74
+ return user_pc unless command.index('ntpdate').nil?
75
+ end
73
76
 
74
- prepend_cmds = ''
77
+ prepend_cmds = 'source /etc/profile;'
78
+ prepend_cmds << " sudo sh -c \"" if self[:user] != 'root'
75
79
  if self[:vrf]
76
80
  prepend_cmds << "ip netns exec #{self[:vrf]} "
77
81
  end
@@ -93,8 +97,7 @@ module Cisco
93
97
  # will ensure the environment is correctly set for the
94
98
  # given host.
95
99
  def environment_string env
96
- prestring = 'source /etc/profile;'
97
- prestring << " sudo sh -c \"" if self[:user] != 'root'
100
+ prestring = ''
98
101
  return prestring if env.empty?
99
102
  env_array = self.environment_variable_string_pair_array( env )
100
103
  environment_string = env_array.join(' ')
@@ -53,6 +53,9 @@ module Beaker
53
53
  case
54
54
  when host['platform'] =~ /sles-/
55
55
  ntp_command = "sntp #{ntp_server}"
56
+ when host['platform'] =~ /cisco_nexus/
57
+ ntp_server = host.exec(Command.new("getent hosts #{NTPSERVER} | head -n1 |cut -d \" \" -f1"), :acceptable_exit_codes => [0]).stdout
58
+ ntp_command = "sudo -E sh -c 'export DCOS_CONTEXT=2;/isan/bin/ntpdate -u -t 20 #{ntp_server}'"
56
59
  else
57
60
  ntp_command = "ntpdate -u -t 20 #{ntp_server}"
58
61
  end
@@ -252,9 +255,11 @@ module Beaker
252
255
  when el_based?(host) && ['5','6','7'].include?(host['platform'].version)
253
256
  result = host.exec(Command.new('rpm -qa | grep epel-release'), :acceptable_exit_codes => [0,1])
254
257
  if result.exit_code == 1
255
- host.exec(Command.new("rpm -i#{debug_opt} #{opts[:epel_url]}/epel-release-latest-#{host['platform'].version}.noarch.rpm"))
258
+ url_base = opts[:epel_url]
259
+ url_base = opts[:epel_url_archive] if host['platform'].version == '5'
260
+ host.exec(Command.new("rpm -i#{debug_opt} #{url_base}/epel-release-latest-#{host['platform'].version}.noarch.rpm"))
256
261
  #update /etc/yum.repos.d/epel.repo for new baseurl
257
- host.exec(Command.new("sed -i -e 's;#baseurl.*$;baseurl=#{Regexp.escape("#{opts[:epel_url]}/#{host['platform'].version}")}/\$basearch;' /etc/yum.repos.d/epel.repo"))
262
+ host.exec(Command.new("sed -i -e 's;#baseurl.*$;baseurl=#{Regexp.escape("#{url_base}/#{host['platform'].version}")}/\$basearch;' /etc/yum.repos.d/epel.repo"))
258
263
  #remove mirrorlist
259
264
  host.exec(Command.new("sed -i -e '/mirrorlist/d' /etc/yum.repos.d/epel.repo"))
260
265
  host.exec(Command.new('yum clean all && yum makecache'))
@@ -35,29 +35,29 @@ module Beaker
35
35
  raise 'You must specify an Openstack tenant (:openstack_tenant) for OpenStack instances!' unless @options[:openstack_tenant]
36
36
  raise 'You must specify an Openstack network (:openstack_network) for OpenStack instances!' unless @options[:openstack_network]
37
37
 
38
- optionhash = {}
39
- optionhash[:provider] = :openstack
40
- optionhash[:openstack_api_key] = @options[:openstack_api_key]
41
- optionhash[:openstack_username] = @options[:openstack_username]
42
- optionhash[:openstack_auth_url] = @options[:openstack_auth_url]
43
- optionhash[:openstack_tenant] = @options[:openstack_tenant]
44
- optionhash[:openstack_region] = @options[:openstack_region] if @options[:openstack_region]
38
+ # Common keystone authentication credentials
39
+ @credentials = {
40
+ :provider => :openstack,
41
+ :openstack_auth_url => @options[:openstack_auth_url],
42
+ :openstack_api_key => @options[:openstack_api_key],
43
+ :openstack_username => @options[:openstack_username],
44
+ :openstack_tenant => @options[:openstack_tenant],
45
+ :openstack_region => @options[:openstack_region],
46
+ }
47
+
48
+ # Keystone version 3 requires users and projects to be scoped
49
+ if @credentials[:openstack_auth_url].include?('/v3/')
50
+ @credentials[:openstack_user_domain] = @options[:openstack_user_domain] || 'Default'
51
+ @credentials[:openstack_project_domain] = @options[:openstack_project_domain] || 'Default'
52
+ end
45
53
 
46
- @compute_client ||= Fog::Compute.new(optionhash)
54
+ @compute_client ||= Fog::Compute.new(@credentials)
47
55
 
48
56
  if not @compute_client
49
57
  raise "Unable to create OpenStack Compute instance (api key: #{@options[:openstack_api_key]}, username: #{@options[:openstack_username]}, auth_url: #{@options[:openstack_auth_url]}, tenant: #{@options[:openstack_tenant]})"
50
58
  end
51
59
 
52
- networkoptionhash = {}
53
- networkoptionhash[:provider] = :openstack
54
- networkoptionhash[:openstack_api_key] = @options[:openstack_api_key]
55
- networkoptionhash[:openstack_username] = @options[:openstack_username]
56
- networkoptionhash[:openstack_auth_url] = @options[:openstack_auth_url]
57
- networkoptionhash[:openstack_tenant] = @options[:openstack_tenant]
58
- networkoptionhash[:openstack_region] = @options[:openstack_region] if @options[:openstack_region]
59
-
60
- @network_client ||= Fog::Network.new(networkoptionhash)
60
+ @network_client ||= Fog::Network.new(@credentials)
61
61
 
62
62
  if not @network_client
63
63
  raise "Unable to create OpenStack Network instance (api_key: #{@options[:openstack_api_key]}, username: #{@options[:openstack_username]}, auth_url: #{@options[:openstack_auth_url]}, tenant: #{@options[:openstack_tenant]})"
@@ -104,15 +104,7 @@ module Beaker
104
104
  # Create a volume client on request
105
105
  # @return [Fog::OpenStack::Volume] OpenStack volume client
106
106
  def volume_client_create
107
- options = {
108
- :provider => :openstack,
109
- :openstack_api_key => @options[:openstack_api_key],
110
- :openstack_username => @options[:openstack_username],
111
- :openstack_auth_url => @options[:openstack_auth_url],
112
- :openstack_tenant => @options[:openstack_tenant],
113
- :openstack_region => @options[:openstack_region],
114
- }
115
- @volume_client ||= Fog::Volume.new(options)
107
+ @volume_client ||= Fog::Volume.new(@credentials)
116
108
  unless @volume_client
117
109
  raise "Unable to create OpenStack Volume instance"\
118
110
  " (api_key: #{@options[:openstack_api_key]},"\
@@ -220,8 +212,9 @@ module Beaker
220
212
 
221
213
  @hosts.each do |host|
222
214
  ip = get_ip
223
- host[:vmhostname] = ip.ip.gsub('.','-') + '.rfc1918.puppetlabs.net'
224
- host[:keyname] = key_name(host)
215
+ hostname = ip.ip.gsub('.','-')
216
+ host[:vmhostname] = hostname + '.rfc1918.puppetlabs.net'
217
+ create_or_associate_keypair(host, hostname)
225
218
  @logger.debug "Provisioning #{host.name} (#{host[:vmhostname]})"
226
219
  options = {
227
220
  :flavor_ref => flavor(host[:flavor]).id,
@@ -327,12 +320,11 @@ module Beaker
327
320
  #OpenStack keypairs
328
321
  #
329
322
  #@param [Host] host The OpenStack host to provision
330
- #@return [String] key_name
331
323
  #@api private
332
- def key_name(host)
324
+ def create_or_associate_keypair(host, keyname)
333
325
  if @options[:openstack_keyname]
334
326
  @logger.debug "Adding optional key_name #{@options[:openstack_keyname]} to #{host.name} (#{host[:vmhostname]})"
335
- @options[:openstack_keyname]
327
+ keyname = @options[:openstack_keyname]
336
328
  else
337
329
  @logger.debug "Generate a new rsa key"
338
330
 
@@ -355,11 +347,12 @@ module Beaker
355
347
 
356
348
  type = key.ssh_type
357
349
  data = [ key.to_blob ].pack('m0')
358
- @logger.debug "Creating Openstack keypair for public key '#{type} #{data}'"
359
- @compute_client.create_key_pair host[:vmhostname], "#{type} #{data}"
350
+ @logger.debug "Creating Openstack keypair '#{keyname}' for public key '#{type} #{data}'"
351
+ @compute_client.create_key_pair keyname, "#{type} #{data}"
360
352
  host['ssh'][:key_data] = [ key.to_pem ]
361
- host[:vmhostname]
362
353
  end
354
+
355
+ host[:keyname] = keyname
363
356
  end
364
357
  end
365
358
  end
@@ -10,6 +10,7 @@ module Beaker
10
10
 
11
11
  raise 'You must specify a datastore for vCloud instances!' unless @options['datastore']
12
12
  raise 'You must specify a folder for vCloud instances!' unless @options['folder']
13
+ raise 'You must specify a datacenter for vCloud instances!' unless @options['datacenter']
13
14
  @vsphere_credentials = VsphereHelper.load_config(@options[:dot_fog])
14
15
  end
15
16
 
@@ -50,6 +51,16 @@ module Beaker
50
51
  end
51
52
  end
52
53
 
54
+ # Directly borrowed from openstack hypervisor
55
+ def enable_root(host)
56
+ if host['user'] != 'root'
57
+ copy_ssh_to_root(host, @options)
58
+ enable_root_login(host, @options)
59
+ host['user'] = 'root'
60
+ host.close
61
+ end
62
+ end
63
+
53
64
  def create_clone_spec host
54
65
  # Add VM annotation
55
66
  configSpec = RbVmomi::VIM.VirtualMachineConfigSpec(
@@ -76,8 +87,8 @@ module Beaker
76
87
 
77
88
  # Put the VM in the specified folder and resource pool
78
89
  relocateSpec = RbVmomi::VIM.VirtualMachineRelocateSpec(
79
- :datastore => @vsphere_helper.find_datastore(@options['datastore']),
80
- :pool => @options['resourcepool'] ? @vsphere_helper.find_pool(@options['resourcepool']) : nil,
90
+ :datastore => @vsphere_helper.find_datastore(@options['datacenter'],@options['datastore']),
91
+ :pool => @options['resourcepool'] ? @vsphere_helper.find_pool(@options['datacenter'],@options['resourcepool']) : nil,
81
92
  :diskMoveType => :moveChildMostDiskBacking
82
93
  )
83
94
 
@@ -95,7 +106,6 @@ module Beaker
95
106
  def provision
96
107
  connect_to_vsphere
97
108
  begin
98
- vsphere_vms = {}
99
109
 
100
110
  try = 1
101
111
  attempts = @options[:timeout].to_i / 5
@@ -109,6 +119,12 @@ module Beaker
109
119
  h['vmhostname'] = generate_host_name
110
120
  end
111
121
 
122
+ if h['template'].nil? and defined?(ENV['BEAKER_vcloud_template'])
123
+ h['template'] = ENV['BEAKER_vcloud_template']
124
+ end
125
+
126
+ raise "Missing template configuration for #{h}. Set template in nodeset or set ENV[BEAKER_vcloud_template]" unless h['template']
127
+
112
128
  if h['template'] =~ /\//
113
129
  templatefolders = h['template'].split('/')
114
130
  h['template'] = templatefolders.pop
@@ -119,7 +135,7 @@ module Beaker
119
135
  vm = {}
120
136
 
121
137
  if templatefolders
122
- vm[h['template']] = @vsphere_helper.find_folder(templatefolders.join('/')).find(h['template'])
138
+ vm[h['template']] = @vsphere_helper.find_folder(@options['datacenter'],templatefolders.join('/')).find(h['template'])
123
139
  else
124
140
  vm = @vsphere_helper.find_vms(h['template'])
125
141
  end
@@ -131,8 +147,9 @@ module Beaker
131
147
  spec = create_clone_spec(h)
132
148
 
133
149
  # Deploy from specified template
134
- tasks << vm[h['template']].CloneVM_Task( :folder => @vsphere_helper.find_folder(@options['folder']), :name => h['vmhostname'], :spec => spec )
150
+ tasks << vm[h['template']].CloneVM_Task( :folder => @vsphere_helper.find_folder(@options['datacenter'],@options['folder']), :name => h['vmhostname'], :spec => spec )
135
151
  end
152
+
136
153
  try = (Time.now - start) / 5
137
154
  @vsphere_helper.wait_for_tasks(tasks, try, attempts)
138
155
  @logger.notify 'Spent %.2f seconds deploying VMs' % (Time.now - start)
@@ -147,15 +164,22 @@ module Beaker
147
164
 
148
165
  try = (Time.now - start) / 5
149
166
  duration = run_and_report_duration do
150
- @hosts.each_with_index do |h, i|
151
- wait_for_dns_resolution(h, try, attempts)
167
+ @hosts.each do |host|
168
+ repeat_fibonacci_style_for 8 do
169
+ @vsphere_helper.find_vms(host['vmhostname'])[host['vmhostname']].summary.guest.ipAddress != nil
170
+ end
171
+ host[:ip] = @vsphere_helper.find_vms(host['vmhostname'])[host['vmhostname']].summary.guest.ipAddress
172
+ enable_root(host)
152
173
  end
153
174
  end
175
+
154
176
  @logger.notify "Spent %.2f seconds waiting for DNS resolution" % duration
177
+
155
178
  rescue => e
156
179
  @vsphere_helper.close
157
180
  report_and_raise(@logger, e, "Vcloud.provision")
158
181
  end
182
+
159
183
  end
160
184
 
161
185
  def cleanup
@@ -215,6 +215,33 @@ module Beaker
215
215
  end
216
216
 
217
217
  @logger.notify 'Spent %.2f seconds tagging VMs' % (Time.now - start)
218
+
219
+ # add additional disks to vm
220
+ @logger.debug 'Looking for disks to add...'
221
+
222
+ @hosts.each do |h|
223
+ hostname = h['vmhostname'].split(".")[0]
224
+
225
+ if h['disks']
226
+ @logger.debug "Found disks for #{hostname}!"
227
+ disks = h['disks']
228
+
229
+ disks.each_with_index do |disk_size, index|
230
+ start = Time.now
231
+
232
+ add_disk(hostname, disk_size)
233
+
234
+ done = wait_for_disk(hostname, disk_size, index)
235
+ if done
236
+ @logger.notify "Spent %.2f seconds adding disk #{index}. " % (Time.now - start)
237
+ else
238
+ raise "Could not verify disk was added after %.2f seconds" % (Time.now - start)
239
+ end
240
+ end
241
+ else
242
+ @logger.debug "No disks to add for #{hostname}"
243
+ end
244
+ end
218
245
  end
219
246
 
220
247
  def cleanup
@@ -246,5 +273,82 @@ module Beaker
246
273
  @logger.notify "Spent %.2f seconds cleaning up" % (Time.now - start)
247
274
  end
248
275
 
276
+ def add_disk(hostname, disk_size)
277
+ @logger.notify "Requesting an additional disk of size #{disk_size}GB for #{hostname}"
278
+
279
+ if !disk_size.to_s.match /[0123456789]/ || size <= '0'
280
+ raise NameError.new "Disk size must be an integer greater than zero!"
281
+ end
282
+
283
+ begin
284
+ uri = URI.parse(@options[:pooling_api] + '/api/v1/vm/' + hostname + '/disk/' + disk_size.to_s)
285
+
286
+ http = Net::HTTP.new(uri.host, uri.port)
287
+ request = Net::HTTP::Post.new(uri.request_uri)
288
+ request['X-AUTH-TOKEN'] = @credentials[:vmpooler_token]
289
+
290
+ response = http.request(request)
291
+
292
+ parsed = parse_response(response)
293
+
294
+ raise "Response from #{hostname} indicates disk was not added" if !parsed['ok']
295
+
296
+ rescue NameError, RuntimeError, Errno::EINVAL, Errno::ECONNRESET, EOFError,
297
+ Net::HTTPBadResponse, Net::HTTPHeaderSyntaxError, *SSH_EXCEPTIONS => e
298
+ report_and_raise(@logger, e, 'Vmpooler.add_disk')
299
+ end
300
+ end
301
+
302
+ def parse_response(response)
303
+ parsed_response = JSON.parse(response.body)
304
+ end
305
+
306
+ def disk_added?(host, disk_size, index)
307
+ if host['disk'].nil?
308
+ false
309
+ else
310
+ host['disk'][index] == "+#{disk_size}gb"
311
+ end
312
+ end
313
+
314
+ def get_vm(hostname)
315
+ begin
316
+ uri = URI.parse(@options[:pooling_api] + '/vm/' + hostname)
317
+
318
+ http = Net::HTTP.new(uri.host, uri.port)
319
+ request = Net::HTTP::Get.new(uri.request_uri)
320
+
321
+ response = http.request(request)
322
+ rescue RuntimeError, Errno::EINVAL, Errno::ECONNRESET, EOFError,
323
+ Net::HTTPBadResponse, Net::HTTPHeaderSyntaxError, *SSH_EXCEPTIONS => e
324
+ @logger.notify "Failed to connect to vmpooler while getting VM information!"
325
+ end
326
+ end
327
+
328
+ def wait_for_disk(hostname, disk_size, index)
329
+ response = get_vm(hostname)
330
+ parsed = parse_response(response)
331
+
332
+ @logger.notify "Waiting for disk"
333
+
334
+ attempts = 0
335
+
336
+ while (!disk_added?(parsed[hostname], disk_size, index) && attempts < 20)
337
+ sleep 10
338
+ begin
339
+ response = get_vm(hostname)
340
+ parsed = parse_response(response)
341
+ rescue RuntimeError, Errno::EINVAL, Errno::ECONNRESET, EOFError,
342
+ Net::HTTPBadResponse, Net::HTTPHeaderSyntaxError, *SSH_EXCEPTIONS => e
343
+ report_and_raise(@logger, e, "Vmpooler.wait_for_disk")
344
+ end
345
+ print "."
346
+ attempts += 1
347
+ end
348
+
349
+ puts " "
350
+
351
+ disk_added?(parsed[hostname], disk_size, index)
352
+ end
249
353
  end
250
354
  end
@@ -118,29 +118,23 @@ class VsphereHelper
118
118
  vms
119
119
  end
120
120
 
121
- def find_datastore datastorename
122
- datacenter = @connection.serviceInstance.find_datacenter
121
+ def find_datastore(dc,datastorename)
122
+ datacenter = @connection.serviceInstance.find_datacenter(dc)
123
123
  datacenter.find_datastore(datastorename)
124
124
  end
125
125
 
126
- def find_folder foldername
127
- datacenter = @connection.serviceInstance.find_datacenter
128
- base = datacenter.vmFolder
129
- folders = foldername.split('/')
130
- folders.each do |folder|
131
- case base
132
- when RbVmomi::VIM::Folder
133
- base = base.childEntity.find { |f| f.name == folder }
134
- else
135
- abort "Unexpected object type encountered (#{base.class}) while finding folder"
136
- end
126
+ def find_folder(dc,foldername)
127
+ datacenter = @connection.serviceInstance.find_datacenter(dc)
128
+ base = datacenter.vmFolder.traverse(foldername)
129
+ if base != nil
130
+ base
131
+ else
132
+ abort "Failed to find folder #{foldername}"
137
133
  end
138
-
139
- base
140
134
  end
141
135
 
142
- def find_pool poolname
143
- datacenter = @connection.serviceInstance.find_datacenter
136
+ def find_pool(dc,poolname)
137
+ datacenter = @connection.serviceInstance.find_datacenter(dc)
144
138
  base = datacenter.hostFolder
145
139
  pools = poolname.split('/')
146
140
  pools.each do |pool|
@@ -172,6 +172,7 @@ module Beaker
172
172
  :package_proxy => false,
173
173
  :add_el_extras => false,
174
174
  :epel_url => "http://dl.fedoraproject.org/pub/epel",
175
+ :epel_url_archive => 'http://archive.fedoraproject.org/pub/archive/epel',
175
176
  :consoleport => 443,
176
177
  :pe_dir => '/opt/enterprise/dists',
177
178
  :pe_version_file => 'LATEST',
@@ -1,5 +1,4 @@
1
1
  require 'json'
2
- require 'rake'
3
2
  require 'stringio'
4
3
  require 'yaml/store'
5
4
  require 'fileutils'
@@ -1,5 +1,5 @@
1
1
  module Beaker
2
2
  module Version
3
- STRING = '3.14.0'
3
+ STRING = '3.15.0'
4
4
  end
5
5
  end
@@ -20,6 +20,19 @@ module Cisco
20
20
  @platform = 'cisco_nexus-7-x86_64'
21
21
  end
22
22
 
23
+ it 'starts with sourcing the /etc/profile script' do
24
+ answer_correct = 'source /etc/profile;'
25
+ answer_test = host.prepend_commands( 'fake_command' )
26
+ expect( answer_test ).to be === answer_correct
27
+ end
28
+
29
+ it 'uses `sudo` if not root' do
30
+ @options = { :user => 'notroot' }
31
+ answer_correct = "source /etc/profile; sudo sh -c \""
32
+ answer_test = host.prepend_commands( 'fake_command' )
33
+ expect( answer_test ).to be === answer_correct
34
+ end
35
+
23
36
  it 'ends with the :vrf host parameter' do
24
37
  vrf_answer = 'vrf_answer_135246'
25
38
  @options = {
@@ -29,19 +42,27 @@ module Cisco
29
42
  expect( answer_test ).to match( /ip netns exec #{vrf_answer}$/ )
30
43
  end
31
44
 
32
- it 'guards against "vsh" usage (only scenario we dont want prefixing)' do
33
- answer_prepend_commands = 'pc_param_unchanged_13584'
34
- answer_test = host.prepend_commands( 'fake/vsh/command', answer_prepend_commands )
35
- expect( answer_test ).to be === answer_prepend_commands
45
+ it 'guards against "vsh" usage (scenario we never want prefixing)' do
46
+ answer_user_pc = 'pc_param_unchanged_13584'
47
+ answer_test = host.prepend_commands( 'fake/vsh/command', answer_user_pc )
48
+ expect( answer_test ).to be === answer_user_pc
49
+ end
50
+
51
+ it 'guards against "ntpdate" usage (we dont want prefixing on nexus)' do
52
+ answer_user_pc = 'user_pc_param_54321'
53
+ answer_test = host.prepend_commands( 'fake/ntpdate/command', answer_user_pc )
54
+ expect( answer_test ).to be === answer_user_pc
36
55
  end
37
56
 
57
+
38
58
  it 'retains user-specified prepend commands when adding vrf' do
39
59
  @options = {
40
60
  :vrf => 'fakevrf',
41
61
  }
42
62
  answer_prepend_commands = 'prepend'
63
+ answer_correct = 'source /etc/profile;ip netns exec fakevrf prepend'
43
64
  answer_test = host.prepend_commands( 'fake_command', answer_prepend_commands )
44
- expect( answer_test ).to match( /^ip netns exec fakevrf #{answer_prepend_commands}/ )
65
+ expect( answer_test ).to be === answer_correct
45
66
  end
46
67
  end
47
68
 
@@ -51,52 +72,49 @@ module Cisco
51
72
  @platform = 'cisco_ios_xr-6-x86_64'
52
73
  end
53
74
 
75
+ it 'starts with sourcing the /etc/profile script' do
76
+ answer_correct = 'source /etc/profile;'
77
+ answer_test = host.prepend_commands( 'fake_command' )
78
+ expect( answer_test ).to be === answer_correct
79
+ end
80
+
54
81
  it 'does use the :vrf host parameter if provided' do
55
82
  @options = { :vrf => 'tpnns' }
56
83
  answer_test = host.prepend_commands( 'fake_command' )
57
84
  expect( answer_test ).to match( /ip netns exec tpnns/ )
58
85
  end
59
86
 
87
+ it 'does not guard "ntpdate" usage' do
88
+ answer_user_pc = 'user_pc_param_54321'
89
+ answer_correct = 'source /etc/profile;user_pc_param_54321'
90
+ answer_test = host.prepend_commands( 'fake/ntpdate/command', answer_user_pc )
91
+ expect( answer_test ).to be === answer_correct
92
+ end
93
+
60
94
  it 'retains user-specified prepend commands when adding vrf' do
61
95
  @options = { :vrf => 'fakevrf', }
62
96
  answer_prepend_commands = 'prepend'
97
+ answer_correct = 'source /etc/profile;ip netns exec fakevrf prepend'
63
98
  answer_test = host.prepend_commands( 'fake_command', answer_prepend_commands )
64
- expect( answer_test ).to match( /^ip netns exec fakevrf #{answer_prepend_commands}/ )
99
+ expect( answer_test ).to be === answer_correct
65
100
  end
66
101
  end
67
102
  end
68
103
 
69
104
  describe '#environment_string' do
70
105
 
71
- it 'starts with sourcing the /etc/profile script' do
72
- answer_test = host.environment_string( {} )
73
- expect( answer_test ).to match( %r{^source /etc/profile;} )
74
- end
75
-
76
- it 'uses `sudo` if not root' do
77
- @options = { :user => 'notroot' }
78
- answer_test = host.environment_string( {} )
79
- expect( answer_test ).to match( /sudo/ )
80
- end
81
-
82
106
  context 'for cisco_nexus-7' do
83
107
 
84
108
  before :each do
85
109
  @platform = 'cisco_nexus-7-x86_64'
86
110
  end
87
111
 
88
- it 'uses `sudo` if not root' do
89
- @options = { :user => 'notroot' }
90
- env_map = { 'PATH' => '/opt/pants/2' }
91
- answer_test = host.environment_string( env_map )
92
- expect( answer_test ).to match( %r{^source /etc/profile; sudo } )
93
- end
94
-
95
112
  it 'uses `export` if root' do
96
113
  @options = { :user => 'root' }
97
114
  env_map = { 'PATH' => '/opt/pants/2' }
115
+ answer_correct = ' export PATH="/opt/pants/2";'
98
116
  answer_test = host.environment_string( env_map )
99
- expect( answer_test ).to match( %r{^source /etc/profile; export } )
117
+ expect( answer_test ).to be === answer_correct
100
118
  end
101
119
 
102
120
  it 'ends with a semi-colon' do
@@ -108,7 +126,7 @@ module Cisco
108
126
  it 'turns env maps into paired strings correctly' do
109
127
  @options = { :user => 'root' }
110
128
  env_map = { 'var1' => 'ans1', 'VAR2' => 'ans2' }
111
- answer_correct = 'source /etc/profile; export var1="ans1" VAR1="ans1" VAR2="ans2";'
129
+ answer_correct = ' export var1="ans1" VAR1="ans1" VAR2="ans2";'
112
130
  answer_test = host.environment_string( env_map )
113
131
  expect( answer_test ).to be === answer_correct
114
132
  end
@@ -122,15 +140,17 @@ module Cisco
122
140
  it 'uses `sudo` if not root' do
123
141
  @options = { :user => 'notroot' }
124
142
  env_map = { 'PATH' => '/opt/pants/2' }
143
+ answer_correct = ' env PATH="/opt/pants/2"'
125
144
  answer_test = host.environment_string( env_map )
126
- expect( answer_test ).to match( %r{^source /etc/profile; sudo } )
145
+ expect( answer_test ).to be === answer_correct
127
146
  end
128
147
 
129
148
  it 'uses `env` if root' do
130
149
  @options = { :user => 'root' }
131
150
  env_map = { 'PATH' => '/opt/pants/1' }
151
+ answer_correct = ' env PATH="/opt/pants/1"'
132
152
  answer_test = host.environment_string( env_map )
133
- expect( answer_test ).to match( %r{^source /etc/profile; env } )
153
+ expect( answer_test ).to be === answer_correct
134
154
  end
135
155
 
136
156
  it 'does not end with a semi-colon' do
@@ -142,7 +162,7 @@ module Cisco
142
162
  it 'turns env maps into paired strings correctly' do
143
163
  @options = { :user => 'root' }
144
164
  env_map = { 'VAR1' => 'ans1', 'var2' => 'ans2' }
145
- answer_correct = 'source /etc/profile; env VAR1="ans1" var2="ans2" VAR2="ans2"'
165
+ answer_correct = ' env VAR1="ans1" var2="ans2" VAR2="ans2"'
146
166
  answer_test = host.environment_string( env_map )
147
167
  expect( answer_test ).to be === answer_correct
148
168
  end
@@ -304,22 +304,53 @@ describe Beaker do
304
304
  context "add_el_extras" do
305
305
  subject { dummy_class.new }
306
306
 
307
- it "add extras for el-5/6 hosts" do
307
+ it 'adds archived extras for el-5 hosts' do
308
+
309
+ hosts = make_hosts( { :platform => Beaker::Platform.new('el-5-arch'), :exit_code => 1 }, 2 )
310
+ hosts[1][:platform] = Beaker::Platform.new('oracle-5-arch')
311
+
312
+ expect( Beaker::Command ).to receive( :new ).with(
313
+ "rpm -qa | grep epel-release"
314
+ ).exactly( 2 ).times
315
+ expect( Beaker::Command ).to receive( :new ).with(
316
+ "rpm -i http://archive.fedoraproject.org/pub/archive/epel/epel-release-latest-5.noarch.rpm"
317
+ ).exactly( 2 ).times
318
+ expect( Beaker::Command ).to receive( :new ).with(
319
+ "sed -i -e 's;#baseurl.*$;baseurl=http://archive\\.fedoraproject\\.org/pub/archive/epel/5/$basearch;' /etc/yum.repos.d/epel.repo"
320
+ ).exactly( 2 ).times
321
+ expect( Beaker::Command ).to receive( :new ).with(
322
+ "sed -i -e '/mirrorlist/d' /etc/yum.repos.d/epel.repo"
323
+ ).exactly( 2 ).times
324
+ expect( Beaker::Command ).to receive( :new ).with(
325
+ "yum clean all && yum makecache"
326
+ ).exactly( 2 ).times
308
327
 
309
- hosts = make_hosts( { :platform => Beaker::Platform.new('el-5-arch'), :exit_code => 1 }, 6 )
310
- hosts[0][:platform] = Beaker::Platform.new('el-6-arch')
328
+ subject.add_el_extras( hosts, options )
329
+
330
+ end
331
+
332
+ it 'adds extras for el-6 hosts' do
333
+
334
+ hosts = make_hosts( { :platform => Beaker::Platform.new('el-6-arch'), :exit_code => 1 }, 4 )
311
335
  hosts[1][:platform] = Beaker::Platform.new('centos-6-arch')
312
336
  hosts[2][:platform] = Beaker::Platform.new('scientific-6-arch')
313
337
  hosts[3][:platform] = Beaker::Platform.new('redhat-6-arch')
314
- hosts[4][:platform] = Beaker::Platform.new('oracle-5-arch')
315
-
316
- expect( Beaker::Command ).to receive( :new ).with("rpm -qa | grep epel-release").exactly( 6 ).times
317
- expect( Beaker::Command ).to receive( :new ).with("rpm -i http://dl.fedoraproject.org/pub/epel/epel-release-latest-6.noarch.rpm").exactly( 4 ).times
318
- expect( Beaker::Command ).to receive( :new ).with("rpm -i http://dl.fedoraproject.org/pub/epel/epel-release-latest-5.noarch.rpm").exactly( 2 ).times
319
- expect( Beaker::Command ).to receive( :new ).with("sed -i -e 's;#baseurl.*$;baseurl=http://dl\\.fedoraproject\\.org/pub/epel/6/$basearch;' /etc/yum.repos.d/epel.repo").exactly( 4 ).times
320
- expect( Beaker::Command ).to receive( :new ).with("sed -i -e 's;#baseurl.*$;baseurl=http://dl\\.fedoraproject\\.org/pub/epel/5/$basearch;' /etc/yum.repos.d/epel.repo").exactly( 2 ).times
321
- expect( Beaker::Command ).to receive( :new ).with("sed -i -e '/mirrorlist/d' /etc/yum.repos.d/epel.repo").exactly( 6 ).times
322
- expect( Beaker::Command ).to receive( :new ).with("yum clean all && yum makecache").exactly( 6 ).times
338
+
339
+ expect( Beaker::Command ).to receive( :new ).with(
340
+ "rpm -qa | grep epel-release"
341
+ ).exactly( 4 ).times
342
+ expect( Beaker::Command ).to receive( :new ).with(
343
+ "rpm -i http://dl.fedoraproject.org/pub/epel/epel-release-latest-6.noarch.rpm"
344
+ ).exactly( 4 ).times
345
+ expect( Beaker::Command ).to receive( :new ).with(
346
+ "sed -i -e 's;#baseurl.*$;baseurl=http://dl\\.fedoraproject\\.org/pub/epel/6/$basearch;' /etc/yum.repos.d/epel.repo"
347
+ ).exactly( 4 ).times
348
+ expect( Beaker::Command ).to receive( :new ).with(
349
+ "sed -i -e '/mirrorlist/d' /etc/yum.repos.d/epel.repo"
350
+ ).exactly( 4 ).times
351
+ expect( Beaker::Command ).to receive( :new ).with(
352
+ "yum clean all && yum makecache"
353
+ ).exactly( 4 ).times
323
354
 
324
355
  subject.add_el_extras( hosts, options )
325
356
 
@@ -84,7 +84,7 @@ module Beaker
84
84
  openstack.provision
85
85
 
86
86
  @hosts.each do |host|
87
- expect(host[:keyname]).to match(/[_\-0-9a-zA-Z]+/)
87
+ expect(host[:keyname]).to match(/^[_\-0-9a-zA-Z]+$/)
88
88
  end
89
89
  end
90
90
 
@@ -156,5 +156,31 @@ module Beaker
156
156
  openstack.provision_storage mock_host, mock_vm
157
157
  end
158
158
 
159
+ it 'supports keystone v2' do
160
+ credentials = openstack.instance_eval('@credentials')
161
+ expect(credentials[:openstack_user_domain]).to be_nil
162
+ expect(credentials[:openstack_project_domain]).to be_nil
163
+ end
164
+
165
+ it 'supports keystone v3 with implicit arguments' do
166
+ v3_options = options
167
+ v3_options[:openstack_auth_url] = 'https://example.com/identity/v3/auth'
168
+
169
+ credentials = OpenStack.new(@hosts, v3_options).instance_eval('@credentials')
170
+ expect(credentials[:openstack_user_domain]).to eq('Default')
171
+ expect(credentials[:openstack_project_domain]).to eq('Default')
172
+ end
173
+
174
+ it 'supports keystone v3 with explicit arguments' do
175
+ v3_options = options
176
+ v3_options[:openstack_auth_url] = 'https://example.com/identity/v3/auth'
177
+ v3_options[:openstack_user_domain] = 'acme.com'
178
+ v3_options[:openstack_project_domain] = 'R&D'
179
+
180
+ credentials = OpenStack.new(@hosts, v3_options).instance_eval('@credentials')
181
+ expect(credentials[:openstack_user_domain]).to eq('acme.com')
182
+ expect(credentials[:openstack_project_domain]).to eq('R&D')
183
+ end
184
+
159
185
  end
160
186
  end
@@ -9,7 +9,7 @@ module Beaker
9
9
  stub_const( "VsphereHelper", MockVsphereHelper )
10
10
  stub_const( "Net", MockNet )
11
11
  json = double( 'json' )
12
- allow( json ).to receive( :parse ) do |arg|
12
+ allow( json ).to receive( :parse ) do |arg|
13
13
  arg
14
14
  end
15
15
  stub_const( "JSON", json )
@@ -23,6 +23,7 @@ module Beaker
23
23
 
24
24
  opts = make_opts
25
25
  opts[:pooling_api] = nil
26
+ opts[:datacenter] = 'testdc'
26
27
 
27
28
  vcloud = Beaker::Vcloud.new( make_hosts, opts )
28
29
  allow( vcloud ).to receive( :require ).and_return( true )
@@ -47,6 +48,7 @@ module Beaker
47
48
 
48
49
  opts = make_opts
49
50
  opts[:pooling_api] = nil
51
+ opts[:datacenter] = 'testdc'
50
52
 
51
53
  vcloud = Beaker::Vcloud.new( make_hosts, opts )
52
54
  allow( vcloud ).to receive( :require ).and_return( true )
@@ -60,6 +60,42 @@ module Beaker
60
60
  end
61
61
  end
62
62
 
63
+ describe '#disk_added?' do
64
+ let(:vmpooler) { Beaker::Vmpooler.new(make_hosts, make_opts) }
65
+ let(:response_hash_no_disk) {
66
+ {
67
+ "ok" => "true",
68
+ "hostname" => {
69
+ "template"=>"redhat-7-x86_64",
70
+ "domain"=>"delivery.puppetlabs.net"
71
+ }
72
+ }
73
+ }
74
+ let(:response_hash_disk) {
75
+ {
76
+ "ok" => "true",
77
+ "hostname" => {
78
+ "disk" => [
79
+ '+16gb',
80
+ '+8gb'
81
+ ],
82
+ "template"=>"redhat-7-x86_64",
83
+ "domain"=>"delivery.puppetlabs.net"
84
+ }
85
+ }
86
+ }
87
+ it 'returns false when there is no disk' do
88
+ host = response_hash_no_disk['hostname']
89
+ expect(vmpooler.disk_added?(host, "8", 0)).to be(false)
90
+ end
91
+
92
+ it 'returns true when there is a disk' do
93
+ host = response_hash_disk["hostname"]
94
+ expect(vmpooler.disk_added?(host, "16", 0)).to be(true)
95
+ expect(vmpooler.disk_added?(host, "8", 1)).to be(true)
96
+ end
97
+ end
98
+
63
99
  describe "#provision" do
64
100
 
65
101
  it 'provisions hosts from the pool' do
@@ -5,23 +5,21 @@ module Beaker
5
5
  let( :logger ) { double('logger').as_null_object }
6
6
  let( :vInfo ) { { :server => "vsphere.labs.net", :user => "vsphere@labs.com", :pass => "supersekritpassword" } }
7
7
  let( :vsphere_helper ) { VsphereHelper.new ( vInfo.merge( { :logger => logger } ) ) }
8
- let( :snaplist ) { { 'snap1' => { 'snap1sub1' => nil ,
9
- 'snap1sub2' => nil },
8
+ let( :snaplist ) { { 'snap1' => { 'snap1sub1' => nil ,
9
+ 'snap1sub2' => nil },
10
10
  'snap2' => nil,
11
- 'snap3' => { 'snap3sub1' => nil ,
11
+ 'snap3' => { 'snap3sub1' => nil ,
12
12
  'snap3sub2' => nil ,
13
13
  'snap3sub3' => nil } } }
14
- let( :vms ) { [ MockRbVmomiVM.new( 'mockvm1', snaplist ),
15
- MockRbVmomiVM.new( 'mockvm2', snaplist ),
14
+ let( :vms ) { [ MockRbVmomiVM.new( 'mockvm1', snaplist ),
15
+ MockRbVmomiVM.new( 'mockvm2', snaplist ),
16
16
  MockRbVmomiVM.new( 'mockvm3', snaplist ) ] }
17
17
 
18
-
19
-
20
18
  before :each do
21
19
  stub_const( "RbVmomi", MockRbVmomi )
22
20
  end
23
21
 
24
- describe "#load_config" do
22
+ describe "#load_config" do
25
23
 
26
24
  it 'can load a .fog file' do
27
25
  allow( File ).to receive( :exists? ).and_return( true )
@@ -78,7 +76,9 @@ module Beaker
78
76
 
79
77
  describe "#find_datastore" do
80
78
  it 'finds the datastore from the connection object' do
81
- expect(vsphere_helper.find_datastore( 'datastorename' ) ).to be === true
79
+ connection = vsphere_helper.instance_variable_get( :@connection )
80
+ dc = connection.serviceInstance.find_datacenter('testdc')
81
+ expect(vsphere_helper.find_datastore( dc,'datastorename' ) ).to be === true
82
82
  end
83
83
 
84
84
  end
@@ -86,8 +86,7 @@ module Beaker
86
86
  describe "#find_folder" do
87
87
  it 'can find a folder in the datacenter' do
88
88
  connection = vsphere_helper.instance_variable_get( :@connection )
89
-
90
- expect(vsphere_helper.find_folder( 'root' ) ).to be === connection.serviceInstance.find_datacenter.vmFolder
89
+ expect(vsphere_helper.find_folder( 'testdc','root' ) ).to be === connection.serviceInstance.find_datacenter('testdc').vmFolder
91
90
  end
92
91
 
93
92
  end
@@ -95,26 +94,28 @@ module Beaker
95
94
  describe "#find_pool" do
96
95
  it 'can find a pool in a folder in the datacenter' do
97
96
  connection = vsphere_helper.instance_variable_get( :@connection )
98
- connection.serviceInstance.find_datacenter.hostFolder = MockRbVmomi::VIM::Folder.new
99
- connection.serviceInstance.find_datacenter.hostFolder.name = "/root"
97
+ dc = connection.serviceInstance.find_datacenter('testdc')
98
+ dc.hostFolder = MockRbVmomi::VIM::Folder.new
99
+ dc.hostFolder.name = "/root"
100
100
 
101
- expect(vsphere_helper.find_pool( 'root' ) ).to be === connection.serviceInstance.find_datacenter.hostFolder
101
+ expect(vsphere_helper.find_pool( 'testdc','root' ) ).to be === connection.serviceInstance.find_datacenter('testdc').hostFolder
102
102
 
103
103
  end
104
104
  it 'can find a pool in a clustercomputeresource in the datacenter' do
105
105
  connection = vsphere_helper.instance_variable_get( :@connection )
106
- connection.serviceInstance.find_datacenter.hostFolder = MockRbVmomi::VIM::ClusterComputeResource.new
107
- connection.serviceInstance.find_datacenter.hostFolder.name = "/root"
108
-
109
- expect(vsphere_helper.find_pool( 'root' ) ).to be === connection.serviceInstance.find_datacenter.hostFolder
106
+ dc = connection.serviceInstance.find_datacenter('testdc')
107
+ dc.hostFolder = MockRbVmomi::VIM::ClusterComputeResource.new
108
+ dc.hostFolder.name = "/root"
110
109
 
110
+ expect(vsphere_helper.find_pool( 'testdc','root' ) ).to be === connection.serviceInstance.find_datacenter('testdc').hostFolder
111
111
  end
112
112
  it 'can find a pool in a resourcepool in the datacenter' do
113
113
  connection = vsphere_helper.instance_variable_get( :@connection )
114
- connection.serviceInstance.find_datacenter.hostFolder = MockRbVmomi::VIM::ResourcePool.new
115
- connection.serviceInstance.find_datacenter.hostFolder.name = "/root"
114
+ dc = connection.serviceInstance.find_datacenter('testdc')
115
+ dc.hostFolder = MockRbVmomi::VIM::ResourcePool.new
116
+ dc.hostFolder.name = "/root"
116
117
 
117
- expect(vsphere_helper.find_pool( 'root' ) ).to be === connection.serviceInstance.find_datacenter.hostFolder
118
+ expect(vsphere_helper.find_pool( 'testdc','root' ) ).to be === connection.serviceInstance.find_datacenter('testdc').hostFolder
118
119
  end
119
120
 
120
121
  end
@@ -183,7 +183,7 @@ class MockRbVmomiConnection
183
183
  @datacenter = Datacenter.new
184
184
  end
185
185
 
186
- def find_datacenter
186
+ def find_datacenter dc
187
187
  @datacenter
188
188
  end
189
189
 
@@ -258,6 +258,7 @@ class MockRbVmomi
258
258
  self
259
259
  end
260
260
 
261
+
261
262
  def childEntity
262
263
  self
263
264
  end
@@ -266,6 +267,9 @@ class MockRbVmomi
266
267
  self
267
268
  end
268
269
 
270
+ def traverse path, type=Object, create=false
271
+ self
272
+ end
269
273
  end
270
274
 
271
275
  class ResourcePool
@@ -160,15 +160,15 @@ class MockVsphereHelper
160
160
  nil
161
161
  end
162
162
 
163
- def find_datastore datastore
163
+ def find_datastore dc,datastore
164
164
  datastore
165
165
  end
166
166
 
167
- def find_pool pool
167
+ def find_pool dc,pool
168
168
  pool
169
169
  end
170
170
 
171
- def find_folder folder
171
+ def find_folder dc,folder
172
172
  folder
173
173
  end
174
174
 
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: beaker
3
3
  version: !ruby/object:Gem::Version
4
- version: 3.14.0
4
+ version: 3.15.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Puppet
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2017-04-05 00:00:00.000000000 Z
11
+ date: 2017-04-19 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: rspec