fog-opennebula 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +32 -0
- data/CONTRIBUTORS.md +4 -0
- data/Gemfile +9 -0
- data/LICENSE.md +20 -0
- data/README.md +95 -0
- data/Rakefile +118 -0
- data/fog-opennebula.gemspec +35 -0
- data/lib/fog/bin/opennebula.rb +32 -0
- data/lib/fog/opennebula.rb +30 -0
- data/lib/fog/opennebula/compute.rb +136 -0
- data/lib/fog/opennebula/models/compute/flavor.rb +190 -0
- data/lib/fog/opennebula/models/compute/flavors.rb +46 -0
- data/lib/fog/opennebula/models/compute/group.rb +28 -0
- data/lib/fog/opennebula/models/compute/groups.rb +38 -0
- data/lib/fog/opennebula/models/compute/interface.rb +39 -0
- data/lib/fog/opennebula/models/compute/interfaces.rb +20 -0
- data/lib/fog/opennebula/models/compute/network.rb +48 -0
- data/lib/fog/opennebula/models/compute/networks.rb +42 -0
- data/lib/fog/opennebula/models/compute/server.rb +85 -0
- data/lib/fog/opennebula/models/compute/servers.rb +33 -0
- data/lib/fog/opennebula/requests/compute/OpenNebulaVNC.rb +314 -0
- data/lib/fog/opennebula/requests/compute/get_vnc_console.rb +58 -0
- data/lib/fog/opennebula/requests/compute/image_pool.rb +33 -0
- data/lib/fog/opennebula/requests/compute/list_groups.rb +87 -0
- data/lib/fog/opennebula/requests/compute/list_networks.rb +79 -0
- data/lib/fog/opennebula/requests/compute/list_vms.rb +79 -0
- data/lib/fog/opennebula/requests/compute/template_pool.rb +120 -0
- data/lib/fog/opennebula/requests/compute/vm_allocate.rb +97 -0
- data/lib/fog/opennebula/requests/compute/vm_destroy.rb +39 -0
- data/lib/fog/opennebula/requests/compute/vm_disk_snapshot.rb +33 -0
- data/lib/fog/opennebula/requests/compute/vm_resume.rb +35 -0
- data/lib/fog/opennebula/requests/compute/vm_shutdown.rb +22 -0
- data/lib/fog/opennebula/requests/compute/vm_stop.rb +21 -0
- data/lib/fog/opennebula/requests/compute/vm_suspend.rb +38 -0
- data/lib/fog/opennebula/version.rb +9 -0
- data/tests/opennebula/compute_tests.rb +15 -0
- data/tests/opennebula/models/compute/flavor_tests.rb +34 -0
- data/tests/opennebula/models/compute/flavors_tests.rb +15 -0
- data/tests/opennebula/models/compute/group_tests.rb +25 -0
- data/tests/opennebula/models/compute/groups_tests.rb +14 -0
- data/tests/opennebula/models/compute/network_tests.rb +24 -0
- data/tests/opennebula/models/compute/networks_tests.rb +14 -0
- data/tests/opennebula/requests/compute/vm_allocate_tests.rb +70 -0
- data/tests/opennebula/requests/compute/vm_disk_snapshot_test.rb +44 -0
- data/tests/opennebula/requests/compute/vm_suspend_resume_tests.rb +45 -0
- metadata +243 -0
@@ -0,0 +1,39 @@
|
|
1
|
+
module Fog
|
2
|
+
|
3
|
+
module Compute
|
4
|
+
|
5
|
+
class OpenNebula
|
6
|
+
|
7
|
+
class Real
|
8
|
+
|
9
|
+
def vm_destroy(id)
|
10
|
+
vmpool = ::OpenNebula::VirtualMachinePool.new(client)
|
11
|
+
vmpool.info(-2, id, id, -1)
|
12
|
+
|
13
|
+
vmpool.each do |vm|
|
14
|
+
# true => delete and recreate vm
|
15
|
+
vm.delete(false)
|
16
|
+
end
|
17
|
+
end
|
18
|
+
|
19
|
+
end
|
20
|
+
|
21
|
+
class Mock
|
22
|
+
|
23
|
+
def vm_destroy(id)
|
24
|
+
response = Excon::Response.new
|
25
|
+
response.status = 200
|
26
|
+
|
27
|
+
data['vms'].each do |vm|
|
28
|
+
data['vms'].delete(vm) if vm['id'] == id
|
29
|
+
end
|
30
|
+
true
|
31
|
+
end
|
32
|
+
|
33
|
+
end
|
34
|
+
|
35
|
+
end
|
36
|
+
|
37
|
+
end
|
38
|
+
|
39
|
+
end
|
@@ -0,0 +1,33 @@
|
|
1
|
+
module Fog
|
2
|
+
|
3
|
+
module Compute
|
4
|
+
|
5
|
+
class OpenNebula
|
6
|
+
|
7
|
+
class Real
|
8
|
+
|
9
|
+
def vm_disk_snapshot(id, disk_id, image_name)
|
10
|
+
vmpool = ::OpenNebula::VirtualMachinePool.new(client)
|
11
|
+
vmpool.info(-2, id, id, -1)
|
12
|
+
|
13
|
+
rc = 0
|
14
|
+
vmpool.each do |vm|
|
15
|
+
rc = vm.disk_snapshot_create(disk_id, image_name)
|
16
|
+
raise(rc) if rc.is_a? ::OpenNebula::Error
|
17
|
+
end
|
18
|
+
rc
|
19
|
+
end
|
20
|
+
|
21
|
+
end
|
22
|
+
|
23
|
+
class Mock
|
24
|
+
|
25
|
+
def vm_disk_snapshot(id, disk_id, image_name); end
|
26
|
+
|
27
|
+
end
|
28
|
+
|
29
|
+
end
|
30
|
+
|
31
|
+
end
|
32
|
+
|
33
|
+
end
|
@@ -0,0 +1,35 @@
|
|
1
|
+
module Fog
|
2
|
+
|
3
|
+
module Compute
|
4
|
+
|
5
|
+
class OpenNebula
|
6
|
+
|
7
|
+
class Real
|
8
|
+
|
9
|
+
def vm_resume(id)
|
10
|
+
vmpool = ::OpenNebula::VirtualMachinePool.new(client)
|
11
|
+
vmpool.info(-2, id, id, -1)
|
12
|
+
|
13
|
+
vmpool.each(&:resume)
|
14
|
+
end
|
15
|
+
|
16
|
+
end
|
17
|
+
|
18
|
+
class Mock
|
19
|
+
|
20
|
+
def vm_resume(id)
|
21
|
+
data['vms'].each do |vm|
|
22
|
+
if id == vm['id']
|
23
|
+
vm['state'] = 'RUNNING'
|
24
|
+
vm['status'] = 3
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
28
|
+
|
29
|
+
end
|
30
|
+
|
31
|
+
end
|
32
|
+
|
33
|
+
end
|
34
|
+
|
35
|
+
end
|
@@ -0,0 +1,22 @@
|
|
1
|
+
module Fog
|
2
|
+
|
3
|
+
module Compute
|
4
|
+
|
5
|
+
class OpenNebula
|
6
|
+
|
7
|
+
class Real
|
8
|
+
|
9
|
+
def vm_shutdown(id)
|
10
|
+
vmpool = ::OpenNebula::VirtualMachinePool.new(client)
|
11
|
+
vmpool.info(-2, id, id, -1)
|
12
|
+
|
13
|
+
vmpool.each(&:shutdown)
|
14
|
+
end
|
15
|
+
|
16
|
+
end
|
17
|
+
|
18
|
+
end
|
19
|
+
|
20
|
+
end
|
21
|
+
|
22
|
+
end
|
@@ -0,0 +1,21 @@
|
|
1
|
+
module Fog
|
2
|
+
|
3
|
+
module Compute
|
4
|
+
|
5
|
+
class OpenNebula
|
6
|
+
|
7
|
+
class Real
|
8
|
+
|
9
|
+
def vm_stop(id)
|
10
|
+
vmpool = ::OpenNebula::VirtualMachinePool.new(client)
|
11
|
+
vmpool.info(-2, id, id, -1)
|
12
|
+
vmpool.each(&:stop)
|
13
|
+
end
|
14
|
+
|
15
|
+
end
|
16
|
+
|
17
|
+
end
|
18
|
+
|
19
|
+
end
|
20
|
+
|
21
|
+
end
|
@@ -0,0 +1,38 @@
|
|
1
|
+
module Fog
|
2
|
+
|
3
|
+
module Compute
|
4
|
+
|
5
|
+
class OpenNebula
|
6
|
+
|
7
|
+
class Real
|
8
|
+
|
9
|
+
def vm_suspend(id)
|
10
|
+
vmpool = ::OpenNebula::VirtualMachinePool.new(client)
|
11
|
+
vmpool.info(-2, id, id, -1)
|
12
|
+
|
13
|
+
vmpool.each(&:suspend)
|
14
|
+
end
|
15
|
+
|
16
|
+
end
|
17
|
+
|
18
|
+
class Mock
|
19
|
+
|
20
|
+
def vm_suspend(id)
|
21
|
+
response = Excon::Response.new
|
22
|
+
response.status = 200
|
23
|
+
|
24
|
+
data['vms'].each do |vm|
|
25
|
+
if id == vm['id']
|
26
|
+
vm['state'] = 'LCM_INIT'
|
27
|
+
vm['status'] = 5
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
31
|
+
|
32
|
+
end
|
33
|
+
|
34
|
+
end
|
35
|
+
|
36
|
+
end
|
37
|
+
|
38
|
+
end
|
@@ -0,0 +1,15 @@
|
|
1
|
+
Shindo.tests('Fog::Compute[:opennebula]', ['opennebula']) do
|
2
|
+
compute = Fog::Compute[:opennebula]
|
3
|
+
|
4
|
+
tests('Compute collections') do
|
5
|
+
%w[networks groups].each do |collection|
|
6
|
+
test("it should respond to #{collection}") { compute.respond_to? collection }
|
7
|
+
end
|
8
|
+
end
|
9
|
+
|
10
|
+
tests('Compute requests') do
|
11
|
+
%w[list_networks].each do |request|
|
12
|
+
test("it should respond to #{request}") { compute.respond_to? request }
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
@@ -0,0 +1,34 @@
|
|
1
|
+
Shindo.tests('Fog::Compute[:opennebula] | flavor model', ['opennebula']) do
|
2
|
+
flavors = Fog::Compute[:opennebula].flavors
|
3
|
+
flavor = flavors.get_by_name('fogtest').last
|
4
|
+
|
5
|
+
tests('The flavor model should') do
|
6
|
+
tests('have the action') do
|
7
|
+
test('reload') { flavor.respond_to? 'reload' }
|
8
|
+
end
|
9
|
+
tests('have attributes') do
|
10
|
+
model_attribute_hash = flavor.attributes
|
11
|
+
tests('The flavor model should respond to') do
|
12
|
+
%i[name id to_label to_s get_cpu get_vcpu get_memory get_raw get_disk get_os
|
13
|
+
get_graphics get_nic get_sched_ds_requirements get_sched_ds_rank get_sched_requirements
|
14
|
+
get_sched_rank get_context get_user_variables].each do |attribute|
|
15
|
+
test(attribute.to_s) { flavor.respond_to? attribute }
|
16
|
+
end
|
17
|
+
end
|
18
|
+
tests('The attributes hash should have key') do
|
19
|
+
%i[name id content cpu vcpu memory os graphics context user_variables].each do |attribute|
|
20
|
+
test(attribute.to_s) { model_attribute_hash.key? attribute }
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
24
|
+
test('be a kind of Fog::Compute::OpenNebula::Flavor') { flavor.is_a? Fog::Compute::OpenNebula::Flavor }
|
25
|
+
test('have a nic in network fogtest') { flavor.nic[0].vnet.name == 'fogtest' }
|
26
|
+
|
27
|
+
flavor.vcpu = 666
|
28
|
+
flavor.memory = 666
|
29
|
+
test('have a 666 MB memory') { flavor.get_memory == "MEMORY=666\n" }
|
30
|
+
test('have a 666 CPUs') { flavor.get_vcpu == "VCPU=666\n" }
|
31
|
+
|
32
|
+
test('raw parsed properly') { flavor.get_raw == %(RAW=["DATA"="<cpu match='exact'><model fallback='allow'>core2duo</model></cpu>", "TYPE"="kvm"]\n) }
|
33
|
+
end
|
34
|
+
end
|
@@ -0,0 +1,15 @@
|
|
1
|
+
Shindo.tests('Fog::Compute[:opennebula] | flavors collection', ['opennebula']) do
|
2
|
+
flavors = Fog::Compute[:opennebula].flavors
|
3
|
+
|
4
|
+
tests('The flavors collection should') do
|
5
|
+
test('should be a kind of Fog::Compute::OpenNebula::Flavors') { flavors.is_a? Fog::Compute::OpenNebula::Flavors }
|
6
|
+
tests('should be able to reload itself').succeeds { flavors.reload }
|
7
|
+
|
8
|
+
tests('should be able to get models') do
|
9
|
+
tests('all').succeeds { flavors.all }
|
10
|
+
tests('by instance id').succeeds { flavors.get flavors.first.id }
|
11
|
+
tests('by name').succeeds { flavors.get_by_name 'fogtest' }
|
12
|
+
tests('by filter').succeeds { flavors.get_by_filter ({ name: 'fogtest', id: flavors.first.id }) }
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
@@ -0,0 +1,25 @@
|
|
1
|
+
Shindo.tests('Fog::Compute[:opennebula] | group model', ['opennebula']) do
|
2
|
+
groups = Fog::Compute[:opennebula].groups
|
3
|
+
group = groups.last
|
4
|
+
|
5
|
+
tests('The group model should') do
|
6
|
+
tests('have the action') do
|
7
|
+
test('reload') { group.respond_to? 'reload' }
|
8
|
+
end
|
9
|
+
tests('have attributes') do
|
10
|
+
model_attribute_hash = group.attributes
|
11
|
+
attributes =
|
12
|
+
tests('The group model should respond to') do
|
13
|
+
%i[name id to_label].each do |attribute|
|
14
|
+
test(attribute.to_s) { group.respond_to? attribute }
|
15
|
+
end
|
16
|
+
end
|
17
|
+
tests('The attributes hash should have key') do
|
18
|
+
%i[name id].each do |attribute|
|
19
|
+
test(attribute.to_s) { model_attribute_hash.key? attribute }
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
23
|
+
test('be a kind of Fog::Compute::OpenNebula::Group') { group.is_a? Fog::Compute::OpenNebula::Group }
|
24
|
+
end
|
25
|
+
end
|
@@ -0,0 +1,14 @@
|
|
1
|
+
Shindo.tests('Fog::Compute[:opennebula] | groups collection', ['opennebula']) do
|
2
|
+
groups = Fog::Compute[:opennebula].groups
|
3
|
+
|
4
|
+
tests('The groups collection') do
|
5
|
+
test('should be a kind of Fog::Compute::OpenNebula::Groups') { groups.is_a? Fog::Compute::OpenNebula::Groups }
|
6
|
+
tests('should be able to reload itself').succeeds { groups.reload }
|
7
|
+
tests('should be able to get a model by id') do
|
8
|
+
tests('by instance id').succeeds { groups.get groups.first.id }
|
9
|
+
end
|
10
|
+
tests('should be able to get a model by name') do
|
11
|
+
tests('by instance id').succeeds { groups.get_by_name 'fogtest' }
|
12
|
+
end
|
13
|
+
end
|
14
|
+
end
|
@@ -0,0 +1,24 @@
|
|
1
|
+
Shindo.tests('Fog::Compute[:opennebula] | network model', ['opennebula']) do
|
2
|
+
networks = Fog::Compute[:opennebula].networks
|
3
|
+
network = networks.get_by_name('fogtest')
|
4
|
+
|
5
|
+
tests('The network model should') do
|
6
|
+
tests('have the action') do
|
7
|
+
test('reload') { network.respond_to? 'reload' }
|
8
|
+
end
|
9
|
+
tests('have attributes') do
|
10
|
+
model_attribute_hash = network.attributes
|
11
|
+
tests('The network model should respond to') do
|
12
|
+
%i[name id vlan uid uname gid description].each do |attribute|
|
13
|
+
test(attribute.to_s) { network.respond_to? attribute }
|
14
|
+
end
|
15
|
+
end
|
16
|
+
tests('The attributes hash should have key') do
|
17
|
+
%i[name id uid uname gid].each do |attribute|
|
18
|
+
test(attribute.to_s) { model_attribute_hash.key? attribute }
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
22
|
+
test('be a kind of Fog::Compute::OpenNebula::Network') { network.is_a? Fog::Compute::OpenNebula::Network }
|
23
|
+
end
|
24
|
+
end
|
@@ -0,0 +1,14 @@
|
|
1
|
+
Shindo.tests('Fog::Compute[:opennebula] | networks collection', ['opennebula']) do
|
2
|
+
networks = Fog::Compute[:opennebula].networks
|
3
|
+
|
4
|
+
tests('The networks collection') do
|
5
|
+
test('should be a kind of Fog::Compute::OpenNebula::Networks') { networks.is_a? Fog::Compute::OpenNebula::Networks }
|
6
|
+
tests('should be able to reload itself').succeeds { networks.reload }
|
7
|
+
tests('should be able to get a model') do
|
8
|
+
tests('all').succeeds { networks.all }
|
9
|
+
tests('by instance id').succeeds { networks.get networks.first.id }
|
10
|
+
tests('by filter').succeeds { networks.get_by_filter ({ id: networks.first.id }) }
|
11
|
+
tests('by name').succeeds { networks.get_by_name 'fogtest' }
|
12
|
+
end
|
13
|
+
end
|
14
|
+
end
|
@@ -0,0 +1,70 @@
|
|
1
|
+
Shindo.tests('Fog::Compute[:opennebula] | vm_create and vm_destroy request', 'opennebula') do
|
2
|
+
compute = Fog::Compute[:opennebula]
|
3
|
+
name_base = Time.now.to_i
|
4
|
+
f = compute.flavors.get_by_name('fogtest')
|
5
|
+
|
6
|
+
tests("Get 'fogtest' flavor/template") do
|
7
|
+
test("could not get template with name 'fogtest'! This is required for live tests!") { f.is_a? Array }
|
8
|
+
raise ArgumentError, "Could not get a template with the name 'fogtest'" unless f
|
9
|
+
end
|
10
|
+
|
11
|
+
f = f.first
|
12
|
+
response = {}
|
13
|
+
|
14
|
+
tests('Allocate VM') do
|
15
|
+
response = compute.vm_allocate(name: 'fog-' + name_base.to_s, flavor: f)
|
16
|
+
test('response should be a kind of Hash') { response.is_a? Hash }
|
17
|
+
test('id should be a one-id (Fixnum)') { response['id'].is_a? Integer }
|
18
|
+
end
|
19
|
+
|
20
|
+
tests('Destroy VM') do
|
21
|
+
compute.vm_destroy(response['id'])
|
22
|
+
test('vm should not be in array of vms') do
|
23
|
+
vm_not_exist = true
|
24
|
+
compute.list_vms.each do |vm|
|
25
|
+
vm_not_exist = false if vm['id'] == response['id']
|
26
|
+
end
|
27
|
+
vm_not_exist
|
28
|
+
end
|
29
|
+
test('vm should not be in array of vms by filter') do
|
30
|
+
vm_not_exist = true
|
31
|
+
compute.list_vms(id: response['id']).each do |vm|
|
32
|
+
vm_not_exist = false if vm['id'] == response['id']
|
33
|
+
end
|
34
|
+
vm_not_exist
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
# tests("Create VM from template (clone)") do
|
39
|
+
# response = compute.create_vm(:name => 'fog-'+(name_base+ 1).to_s, :template_name => 'hwp_small', :cluster_name => 'Default')
|
40
|
+
# test("should be a kind of OVIRT::VM") { response.kind_of? OVIRT::VM}
|
41
|
+
# end
|
42
|
+
|
43
|
+
tests('Fail Creating VM - no flavor') do
|
44
|
+
begin
|
45
|
+
response = compute.vm_allocate(name: 'fog-' + name_base.to_s, flavor: nil)
|
46
|
+
test('should be a kind of Hash') { response.is_a? Hash } # mock never raise exceptions
|
47
|
+
rescue StandardError => e
|
48
|
+
# should raise vm name already exist exception.
|
49
|
+
test('error should be a kind of ArgumentError') { e.is_a? ArgumentError }
|
50
|
+
end
|
51
|
+
end
|
52
|
+
tests('Fail Creating VM - nil name') do
|
53
|
+
begin
|
54
|
+
response = compute.vm_allocate(name: nil, flavor: f)
|
55
|
+
test('should be a kind of Hash') { response.is_a? Hash } # mock never raise exceptions
|
56
|
+
rescue StandardError => e
|
57
|
+
# should raise vm name already exist exception.
|
58
|
+
test('error should be a kind of ArgumentError') { e.is_a? ArgumentError }
|
59
|
+
end
|
60
|
+
end
|
61
|
+
tests('Fail Creating VM - empty name') do
|
62
|
+
begin
|
63
|
+
response = compute.vm_allocate(name: '', flavor: f)
|
64
|
+
test('should be a kind of Hash') { response.is_a? Hash } # mock never raise exceptions
|
65
|
+
rescue StandardError => e
|
66
|
+
# should raise vm name already exist exception.
|
67
|
+
test('error should be a kind of ArgumentError') { e.is_a? ArgumentError }
|
68
|
+
end
|
69
|
+
end
|
70
|
+
end
|
@@ -0,0 +1,44 @@
|
|
1
|
+
Shindo.tests('Fog::Compute[:opennebula] | vm_create and destroy request', 'opennebula') do
|
2
|
+
compute = Fog::Compute[:opennebula]
|
3
|
+
|
4
|
+
name_base = Time.now.to_i
|
5
|
+
f = compute.flavors.get_by_name('fogtest')
|
6
|
+
tests("Get 'fogtest' flavor/template") do
|
7
|
+
test("Got template with name 'fogtest'") { f.is_a? Array }
|
8
|
+
raise ArgumentError, "Could not get a template with the name 'fogtest'! This is required for live tests!" unless f
|
9
|
+
end
|
10
|
+
|
11
|
+
f = f.first
|
12
|
+
newvm = compute.servers.new
|
13
|
+
newvm.flavor = f
|
14
|
+
newvm.name = 'fogtest-' + name_base.to_s
|
15
|
+
vm = newvm.save
|
16
|
+
|
17
|
+
tests('Start VM') do
|
18
|
+
test('response should be a kind of Hash') { vm.is_a? Fog::Compute::OpenNebula::Server }
|
19
|
+
test('id should be a one-id (Fixnum)') { vm.id.is_a? Integer }
|
20
|
+
vm.wait_for { (vm.state == 'RUNNING') }
|
21
|
+
test('VM should be in RUNNING state') { vm.state == 'RUNNING' }
|
22
|
+
sleep(30) # waiting for 30 seconds to let VM finish booting
|
23
|
+
end
|
24
|
+
|
25
|
+
tests('Create snapshot of the disk and shutdown VM') do
|
26
|
+
img_id = compute.vm_disk_snapshot(vm.id, 0, 'fogtest-' + name_base.to_s)
|
27
|
+
test('Image ID of created snapshot should be a kind of Fixnum') { img_id.is_a? Integer }
|
28
|
+
5.times do # wait maximum 5 seconds
|
29
|
+
sleep(1) # The delay is needed for some reason between issueing disk-snapshot and shutdown
|
30
|
+
images = compute.image_pool(mine: true, id: img_id)
|
31
|
+
test("Got Image with ID=#{img_id}") { images.is_a? Array }
|
32
|
+
break if images[0].state == 4 # LOCKED, it is normal we must shutdown VM for image to go into READY state
|
33
|
+
end
|
34
|
+
compute.servers.shutdown(vm.id)
|
35
|
+
image_state = 4
|
36
|
+
25.times do # Waiting for up to 50 seconds for Image to become READY
|
37
|
+
sleep(2)
|
38
|
+
images = compute.image_pool(mine: true, id: img_id)
|
39
|
+
image_state = images[0].state
|
40
|
+
break if image_state == 1
|
41
|
+
end
|
42
|
+
test("New image with ID=#{img_id} should be in state READY.") { image_state == 1 }
|
43
|
+
end
|
44
|
+
end
|