steamcannon-deltacloud-core 0.0.8.1 → 0.1.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. data/Rakefile +3 -9
  2. data/bin/deltacloudd +17 -15
  3. data/deltacloud.rb +1 -0
  4. data/lib/deltacloud/backend_capability.rb +21 -0
  5. data/lib/deltacloud/base_driver/base_driver.rb +6 -0
  6. data/lib/deltacloud/base_driver/features.rb +22 -0
  7. data/lib/deltacloud/base_driver/mock_driver.rb +24 -27
  8. data/lib/deltacloud/drivers/ec2/ec2_driver.rb +432 -494
  9. data/lib/deltacloud/drivers/gogrid/gogrid_client.rb +4 -7
  10. data/lib/deltacloud/drivers/gogrid/gogrid_driver.rb +116 -3
  11. data/lib/deltacloud/drivers/mock/mock_driver.rb +56 -2
  12. data/lib/deltacloud/drivers/rhevm/rhevm_driver.rb +5 -19
  13. data/lib/deltacloud/helpers/application_helper.rb +26 -3
  14. data/lib/deltacloud/models/image.rb +2 -1
  15. data/lib/deltacloud/models/instance.rb +22 -5
  16. data/lib/deltacloud/models/key.rb +17 -0
  17. data/lib/deltacloud/models/load_balancer.rb +39 -0
  18. data/lib/deltacloud/models/storage_volume.rb +2 -0
  19. data/lib/sinatra/rabbit.rb +4 -7
  20. data/public/javascripts/application.js +10 -24
  21. data/public/stylesheets/compiled/application.css +2 -0
  22. data/server.rb +214 -75
  23. data/views/blobs/new.html.haml +10 -0
  24. data/views/blobs/show.html.haml +21 -15
  25. data/views/buckets/index.html.haml +1 -1
  26. data/views/buckets/show.html.haml +5 -2
  27. data/views/errors/backend_capability_failure.html.haml +11 -0
  28. data/views/errors/backend_capability_failure.xml.haml +4 -0
  29. data/views/errors/backend_error.html.haml +3 -0
  30. data/views/errors/not_allowed.html.haml +6 -0
  31. data/views/errors/not_allowed.xml.haml +2 -0
  32. data/views/instances/index.html.haml +1 -1
  33. data/views/instances/new.html.haml +8 -0
  34. data/views/instances/show.html.haml +1 -1
  35. data/views/keys/show.xml.haml +2 -0
  36. data/views/load_balancers/index.html.haml +33 -0
  37. data/views/load_balancers/index.xml.haml +5 -0
  38. data/views/load_balancers/new.html.haml +38 -0
  39. data/views/load_balancers/show.html.haml +37 -0
  40. data/views/load_balancers/show.xml.haml +21 -0
  41. data/views/realms/index.html.haml +4 -7
  42. data/views/storage_snapshots/index.html.haml +3 -0
  43. data/views/storage_snapshots/index.xml.haml +0 -2
  44. data/views/storage_snapshots/new.html.haml +9 -0
  45. data/views/storage_snapshots/show.xml.haml +0 -2
  46. data/views/storage_volumes/attach.html.haml +20 -0
  47. data/views/storage_volumes/index.html.haml +16 -1
  48. data/views/storage_volumes/index.xml.haml +1 -20
  49. data/views/storage_volumes/new.html.haml +17 -0
  50. data/views/storage_volumes/show.xml.haml +13 -19
  51. metadata +53 -99
data/Rakefile CHANGED
@@ -43,15 +43,9 @@ begin
43
43
  rescue LoadError
44
44
  end
45
45
 
46
- @specs = ['ruby', 'java'].inject({}) do |hash, spec_platform|
47
- $platform = spec_platform
48
- hash.update(spec_platform => Gem::Specification.load('deltacloud-core.gemspec'))
49
- end
50
-
51
- @specs.values.each do |spec|
52
- Rake::GemPackageTask.new(spec) do |pkg|
53
- pkg.need_tar = true
54
- end
46
+ spec = Gem::Specification.load('deltacloud-core.gemspec')
47
+ Rake::GemPackageTask.new(spec) do |pkg|
48
+ pkg.need_tar = true
55
49
  end
56
50
 
57
51
  desc "Install API"
data/bin/deltacloudd CHANGED
@@ -3,6 +3,18 @@
3
3
  require 'rubygems'
4
4
  require 'optparse'
5
5
 
6
+ # See if we can require +name+ and return +true+ if the library is there,
7
+ # +false+ otherwise. Note that, as a side effect, the library will be
8
+ # loaded
9
+ def library_present?(name)
10
+ begin
11
+ require name
12
+ true
13
+ rescue LoadError
14
+ false
15
+ end
16
+ end
17
+
6
18
  options = {
7
19
  :env => 'development'
8
20
  }
@@ -47,9 +59,11 @@ puts "Starting Deltacloud API :: #{ENV["API_DRIVER"]} :: http://#{ENV["API_HOST"
47
59
  puts
48
60
 
49
61
  dirname="#{File.dirname(__FILE__)}/.."
50
- platform = RUBY_PLATFORM[/java/] || 'ruby'
51
62
 
52
- if platform == 'java'
63
+ have_thin = library_present?('thin')
64
+ have_rerun = library_present?('rerun')
65
+
66
+ unless have_thin
53
67
  require 'rack'
54
68
 
55
69
  # We can't chdir with webrick so add our root directory
@@ -78,8 +92,6 @@ if platform == 'java'
78
92
  :Port => port,
79
93
  :AccessLog => [])
80
94
  else
81
- require 'thin'
82
-
83
95
  argv_opts = ARGV.clone
84
96
  argv_opts << ['start'] unless Thin::Runner.commands.include?(options[0])
85
97
  argv_opts << ['--address', ENV["API_HOST"] ]
@@ -91,17 +103,7 @@ else
91
103
 
92
104
  argv_opts.flatten!
93
105
 
94
- if options[:env] == "development"
95
- use_rerun = false
96
- begin
97
- require "rerun"
98
- use_rerun = true
99
- rescue
100
- # Do nothing
101
- end
102
- end
103
-
104
- if use_rerun
106
+ if have_rerun && options[:env] == "development"
105
107
  argv_opts.unshift "thin"
106
108
  command = argv_opts.join(" ")
107
109
  topdir = File::expand_path(File::join(File::dirname(__FILE__), ".."))
data/deltacloud.rb CHANGED
@@ -15,6 +15,7 @@ require 'deltacloud/models/storage_snapshot'
15
15
  require 'deltacloud/models/storage_volume'
16
16
  require 'deltacloud/models/bucket'
17
17
  require 'deltacloud/models/blob'
18
+ require 'deltacloud/models/load_balancer'
18
19
 
19
20
  require 'deltacloud/validation'
20
21
  require 'deltacloud/helpers'
@@ -0,0 +1,21 @@
1
+ module Deltacloud::BackendCapability
2
+
3
+ class Failure < StandardError
4
+ attr_reader :capability
5
+ def initialize(capability, msg='')
6
+ super(msg)
7
+ @capability = capability
8
+ end
9
+ end
10
+
11
+ attr_reader :capability
12
+ def with_capability(capability)
13
+ @capability = capability
14
+ end
15
+
16
+ def check_capability(backend)
17
+ if capability and !backend.respond_to?(capability)
18
+ raise Failure.new(capability, "#{capability} capability not supported by backend #{backend.class.name}")
19
+ end
20
+ end
21
+ end
@@ -215,6 +215,12 @@ module Deltacloud
215
215
  def blob_data(credentials, bucket_id, blob_id, opts)
216
216
  end
217
217
 
218
+ def create_blob(credentials, bucket_id, blob_id, blob_data, opts=nil)
219
+ end
220
+
221
+ def delete_blob(credentials, bucket_id, blob_id, opts=nil)
222
+ end
223
+
218
224
  def filter_on(collection, attribute, opts)
219
225
  return collection if opts.nil?
220
226
  return collection if opts[attribute].nil?
@@ -143,6 +143,14 @@ module Deltacloud
143
143
  end
144
144
  end
145
145
 
146
+ declare_feature :instances, :security_group do
147
+ description "Put instance in one or more security groups on launch"
148
+ operation :create do
149
+ param :security_group, :array, :optional, nil,
150
+ "Array of security group names"
151
+ end
152
+ end
153
+
146
154
  declare_feature :instances, :authentication_key do
147
155
  operation :create do
148
156
  param :keyname, :string, :optional, nil
@@ -169,5 +177,19 @@ module Deltacloud
169
177
  param :location, :string, :optional
170
178
  end
171
179
  end
180
+
181
+ declare_feature :instances, :register_to_load_balancer do
182
+ description "Register instance to load balancer"
183
+ operation :create do
184
+ param :load_balancer_id, :string, :optional
185
+ end
186
+ end
187
+
188
+ declare_feature :instances, :public_ip do
189
+ operation :create do
190
+ param :public_ip, :string, :optional
191
+ end
192
+ end
193
+
172
194
  end
173
195
  end
@@ -4,55 +4,52 @@ require 'deltacloud/method_serializer'
4
4
 
5
5
  module Mock
6
6
 
7
- class S3 < RightAws::S3
8
- include MethodSerializer::Cache
9
-
10
- def self.cached_methods
11
- [
12
- :buckets
13
- ]
14
- end
15
-
16
- MethodSerializer::Cache::wrap_methods(self, :cache_dir => File.join(File.dirname(__FILE__), '..', '..', '..', '..', 'tests', 'ec2', 'support'))
17
- end
18
-
19
- class EC2 < AWS::EC2::Base
7
+ class Ec2 < Aws::Ec2
20
8
 
21
9
  include MethodSerializer::Cache
22
10
 
23
11
  def self.cached_methods
24
12
  [
25
13
  :describe_images,
14
+ :describe_images_by_owner,
26
15
  :describe_availability_zones,
27
- :describe_keypairs,
28
- :create_keypair,
29
- :run_instances,
16
+ :launch_instances,
30
17
  :describe_instances,
31
18
  :reboot_instances,
32
19
  :terminate_instances,
33
- :delete_keypair
20
+ :describe_key_pairs,
21
+ :create_key_pair,
22
+ :delete_key_pair,
23
+ :create_volume,
24
+ :describe_volumes,
25
+ :delete_volume,
26
+ :attach_volume,
27
+ :detach_volume,
28
+ :describe_snapshots,
29
+ :associate_address,
30
+ :try_create_snapshot,
34
31
  ]
35
32
  end
36
33
 
37
34
  MethodSerializer::Cache::wrap_methods(self, :cache_dir => File.join(File.dirname(__FILE__), '..', '..', '..', '..', 'tests', 'ec2', 'support'))
38
35
  end
36
+
39
37
  end
40
38
 
41
39
 
42
40
  # Replace original client with mock client
43
41
  Deltacloud::Drivers::EC2::EC2Driver.class_eval do
44
42
  alias_method :original_new_client, :new_client
45
- alias_method :original_s3_client, :s3_client
46
43
 
47
- def new_client(credentials, opts={})
48
- Mock::EC2.new(
49
- :access_key_id => credentials.user,
50
- :secret_access_key => credentials.password
51
- )
52
- end
53
-
54
- def s3_client(credentials)
55
- Mock::S3.new(credentials.user, credentials.password)
44
+ def new_client(credentials, provider = :ec2)
45
+ auth_credentials = { :access_key_id => credentials.user, :secret_access_key => credentials.password}
46
+ if provider == :elb
47
+ Mock::ELB.new(auth_credentials)
48
+ elsif provider == :s3
49
+ Mock::S3.new(auth_credentials)
50
+ else
51
+ Mock::Ec2.new(auth_credentials[:access_key_id], auth_credentials[:secret_access_key])
52
+ end
56
53
  end
57
54
 
58
55
  end
@@ -1,5 +1,5 @@
1
1
  #
2
- # Copyright (C) 2009 Red Hat, Inc.
2
+ # Copyright (C) 2010 Red Hat, Inc.
3
3
  #
4
4
  # Licensed to the Apache Software Foundation (ASF) under one or more
5
5
  # contributor license agreements. See the NOTICE file distributed with
@@ -15,12 +15,10 @@
15
15
  # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
16
16
  # License for the specific language governing permissions and limitations
17
17
  # under the License.
18
-
18
+ #
19
19
 
20
20
  require 'deltacloud/base_driver'
21
- require 'active_support'
22
- require 'AWS'
23
- require 'right_aws'
21
+ require 'aws'
24
22
 
25
23
  class Instance
26
24
  attr_accessor :keyname
@@ -35,546 +33,486 @@ end
35
33
  module Deltacloud
36
34
  module Drivers
37
35
  module EC2
38
- class EC2Driver < Deltacloud::BaseDriver
39
-
40
- def supported_collections
41
- DEFAULT_COLLECTIONS + [ :keys, :buckets ]
42
- end
43
-
44
- feature :instances, :user_data
45
- feature :instances, :authentication_key
46
- feature :images, :owner_id
47
- feature :buckets, :bucket_location
48
-
49
- define_hardware_profile('m1.small') do
50
- cpu 1
51
- memory 1.7 * 1024
52
- storage 160
53
- architecture 'i386'
54
- end
55
-
56
- define_hardware_profile('m1.large') do
57
- cpu 4
58
- memory 7.5 * 1024
59
- storage 850
60
- architecture 'x86_64'
61
- end
62
-
63
- define_hardware_profile('m1.xlarge') do
64
- cpu 8
65
- memory 15 * 1024
66
- storage 1690
67
- architecture 'x86_64'
68
- end
69
-
70
- define_hardware_profile('c1.medium') do
71
- cpu 5
72
- memory 1.7 * 1024
73
- storage 350
74
- architecture 'i386'
75
- end
76
-
77
- define_hardware_profile('c1.xlarge') do
78
- cpu 20
79
- memory 7 * 1024
80
- storage 1690
81
- architecture 'x86_64'
82
- end
83
-
84
- define_hardware_profile('m2.xlarge') do
85
- cpu 6.5
86
- memory 17.1 * 1024
87
- storage 420
88
- architecture 'x86_64'
89
- end
36
+ class EC2Driver < Deltacloud::BaseDriver
90
37
 
91
- define_hardware_profile('m2.2xlarge') do
92
- cpu 13
93
- memory 34.2 * 1024
94
- storage 850
95
- architecture 'x86_64'
96
- end
38
+ def supported_collections
39
+ DEFAULT_COLLECTIONS + [ :keys, :buckets ]
40
+ end
97
41
 
98
- define_hardware_profile('m2.4xlarge') do
99
- cpu 26
100
- memory 68.4 * 1024
101
- storage 1690
102
- architecture 'x86_64'
103
- end
42
+ feature :instances, :user_data
43
+ feature :instances, :authentication_key
44
+ feature :instances, :public_ip
45
+ feature :instances, :security_group
46
+ feature :images, :owner_id
47
+ feature :buckets, :bucket_location
48
+
49
+ define_hardware_profile('t1.micro') do
50
+ cpu 1
51
+ memory 0.63 * 1024
52
+ storage 160
53
+ architecture 'i386'
54
+ end
104
55
 
105
- define_instance_states do
106
- start.to( :pending ) .automatically
107
- pending.to( :running ) .automatically
108
- pending.to( :stopping ) .on( :stop )
109
- pending.to( :stopped ) .automatically
110
- stopped.to( :running ) .on( :start )
111
- running.to( :running ) .on( :reboot )
112
- running.to( :stopping ) .on( :stop )
113
- shutting_down.to( :stopped ) .automatically
114
- stopped.to( :finish ) .automatically
115
- end
56
+ define_hardware_profile('m1.small') do
57
+ cpu 1
58
+ memory 1.7 * 1024
59
+ storage 160
60
+ architecture 'i386'
61
+ end
116
62
 
117
- #
118
- # Images
119
- #
120
-
121
- def images(credentials, opts={} )
122
- ec2 = new_client(credentials)
123
- img_arr = []
124
- # if we know the image_id, we don't want to limit by owner_id, since this
125
- # will exclude public images
126
- if (opts and opts[:id])
127
- config = { :image_id => opts[:id] }
128
- else
129
- config = { :owner_id => "amazon" }
130
- config.merge!({ :owner_id => opts[:owner_id] }) if opts and opts[:owner_id]
131
- end
132
- safely do
133
- ec2.describe_images(config).imagesSet.item.each do |image|
134
- img_arr << convert_image(image)
135
- end
136
- end
137
- img_arr = filter_on( img_arr, :architecture, opts )
138
- img_arr.sort_by{|e| [e.owner_id, e.name]}
139
- end
63
+ define_hardware_profile('m1.large') do
64
+ cpu 4
65
+ memory 7.5 * 1024
66
+ storage 850
67
+ architecture 'x86_64'
68
+ end
140
69
 
141
- #
142
- # Realms
143
- #
70
+ define_hardware_profile('m1.xlarge') do
71
+ cpu 8
72
+ memory 15 * 1024
73
+ storage 1690
74
+ architecture 'x86_64'
75
+ end
144
76
 
145
- def realms(credentials, opts=nil)
146
- ec2 = new_client(credentials)
147
- realms = []
148
- safely do
149
- ec2.describe_availability_zones.availabilityZoneInfo.item.each do |ec2_realm|
150
- realms << convert_realm( ec2_realm )
151
- end
152
- end
153
- realms
154
- end
77
+ define_hardware_profile('c1.medium') do
78
+ cpu 5
79
+ memory 1.7 * 1024
80
+ storage 350
81
+ architecture 'i386'
82
+ end
155
83
 
156
- #
157
- # Instances
158
- #
159
- def instances(credentials, opts=nil)
160
- ec2 = new_client(credentials)
161
- instances = []
162
- safely do
163
- param = opts.nil? ? nil : opts[:id]
164
- ec2_instances = ec2.describe_instances.reservationSet
165
- return [] unless ec2_instances
166
- ec2_instances.item.each do |item|
167
- item.instancesSet.item.each do |ec2_instance|
168
- instances << convert_instance( ec2_instance, item.ownerId )
84
+ define_hardware_profile('c1.xlarge') do
85
+ cpu 20
86
+ memory 7 * 1024
87
+ storage 1690
88
+ architecture 'x86_64'
169
89
  end
170
- end
171
- end
172
- instances = filter_on( instances, :id, opts )
173
- instances = filter_on( instances, :state, opts )
174
- instances
175
- end
176
90
 
91
+ define_hardware_profile('m2.xlarge') do
92
+ cpu 6.5
93
+ memory 17.1 * 1024
94
+ storage 420
95
+ architecture 'x86_64'
96
+ end
177
97
 
178
- def create_instance(credentials, image_id, opts)
179
- ec2 = new_client( credentials )
180
- realm_id = opts[:realm_id]
181
- safely do
182
- image = image(credentials, :id => image_id )
183
- hwp = find_hardware_profile(credentials, opts[:hwp_id], image.id)
184
- ec2_instances = ec2.run_instances(
185
- :image_id => image.id,
186
- :user_data => opts[:user_data],
187
- :key_name => opts[:keyname],
188
- :availability_zone => realm_id,
189
- :monitoring_enabled => true,
190
- :instance_type => hwp.name,
191
- :disable_api_termination => false,
192
- :instance_initiated_shutdown_behavior => 'terminate',
193
- :security_group => opts[:security_group]
194
- )
195
- return convert_instance( ec2_instances.instancesSet.item.first, 'pending' )
196
- end
197
- end
98
+ define_hardware_profile('m2.2xlarge') do
99
+ cpu 13
100
+ memory 34.2 * 1024
101
+ storage 850
102
+ architecture 'x86_64'
103
+ end
198
104
 
199
- def generate_instance(ec2, id, backup)
200
- begin
201
- this_instance = ec2.describe_instances( :instance_id => id ).reservationSet.item.first.instancesSet.item.first
202
- convert_instance(this_instance, this_instance.ownerId)
203
- rescue Exception => e
204
- puts "WARNING: ignored error during instance refresh: #{e.message}"
205
- # at this point, the action has succeeded but our follow-up
206
- # "describe_instances" failed for some reason. Create a simple Instance
207
- # object with only the ID and new state in place
208
- state = convert_state(backup.instancesSet.item.first.currentState.name)
209
- Instance.new( {
210
- :id => id,
211
- :state => state,
212
- :actions => instance_actions_for( state ),
213
- } )
214
- end
215
- end
105
+ define_hardware_profile('m2.4xlarge') do
106
+ cpu 26
107
+ memory 68.4 * 1024
108
+ storage 1690
109
+ architecture 'x86_64'
110
+ end
216
111
 
217
- def reboot_instance(credentials, id)
218
- ec2 = new_client(credentials)
219
- backup = ec2.reboot_instances( :instance_id => id )
112
+ define_instance_states do
113
+ start.to( :pending ) .automatically
114
+ pending.to( :running ) .automatically
115
+ pending.to( :stopping ) .on( :stop )
116
+ pending.to( :stopped ) .automatically
117
+ stopped.to( :running ) .on( :start )
118
+ running.to( :running ) .on( :reboot )
119
+ running.to( :stopping ) .on( :stop )
120
+ shutting_down.to( :stopped ) .automatically
121
+ stopped.to( :finish ) .automatically
122
+ end
220
123
 
221
- generate_instance(ec2, id, backup)
222
- end
124
+ def images(credentials, opts={})
125
+ ec2 = new_client(credentials)
126
+ img_arr = []
127
+ opts ||= {}
128
+ if opts[:id]
129
+ safely do
130
+ img_arr = ec2.describe_images(opts[:id]).collect do |image|
131
+ convert_image(image)
132
+ end
133
+ end
134
+ else
135
+ owner_id = opts[:owner_id] || "amazon"
136
+ safely do
137
+ img_arr = ec2.describe_images_by_owner(owner_id, "machine").collect do |image|
138
+ convert_image(image)
139
+ end
140
+ end
141
+ end
142
+ img_arr = filter_on( img_arr, :architecture, opts )
143
+ img_arr.sort_by { |e| [e.owner_id, e.name] }
144
+ end
223
145
 
224
- def stop_instance(credentials, id)
225
- ec2 = new_client(credentials)
226
- backup = ec2.terminate_instances( :instance_id => id )
146
+ def realms(credentials, opts={})
147
+ ec2 = new_client(credentials)
148
+ zone_id = opts ? opts[:id] : nil
149
+ safely do
150
+ return ec2.describe_availability_zones(zone_id).collect do |realm|
151
+ convert_realm(realm)
152
+ end
153
+ end
154
+ end
227
155
 
228
- generate_instance(ec2, id, backup)
229
- end
156
+ def instances(credentials, opts={})
157
+ ec2 = new_client(credentials)
158
+ inst_arr = []
159
+ safely do
160
+ inst_arr = ec2.describe_instances.collect do |instance|
161
+ convert_instance(instance) if instance
162
+ end.flatten
163
+ end
164
+ inst_arr = filter_on( inst_arr, :id, opts )
165
+ filter_on( inst_arr, :state, opts )
166
+ end
230
167
 
231
- def destroy_instance(credentials, id)
232
- ec2 = new_client(credentials)
233
- backup = ec2.terminate_instances( :instance_id => id )
168
+ def create_instance(credentials, image_id, opts={})
169
+ ec2 = new_client(credentials)
170
+ instance_options = {}
171
+ instance_options.merge!(:user_data => opts[:user_data]) if opts[:user_data]
172
+ instance_options.merge!(:key_name => opts[:key_name]) if opts[:key_name]
173
+ instance_options.merge!(:availability_zone => opts[:realm_id]) if opts[:realm_id]
174
+ instance_options.merge!(:instance_type => opts[:hwp_id]) if opts[:hwp_id]
175
+ instance_options.merge!(:group_ids => opts[:security_group]) if opts[:security_group]
176
+ safely do
177
+ new_instance = convert_instance(ec2.launch_instances(image_id, instance_options).first)
178
+ if opts[:public_ip]
179
+ ec2.associate_address(new_instance.id, opts[:public_ip])
180
+ end
181
+ new_instance
182
+ end
183
+ end
184
+
185
+ def reboot_instance(credentials, instance_id)
186
+ ec2 = new_client(credentials)
187
+ if ec2.reboot_instances([instance_id])
188
+ instance(credentials, instance_id)
189
+ else
190
+ raise Deltacloud::BackendError.new(500, "Instance", "Instance reboot failed", "")
191
+ end
192
+ end
234
193
 
235
- generate_instance(ec2, id, backup)
236
- end
194
+ def destroy_instance(credentials, instance_id)
195
+ ec2 = new_client(credentials)
196
+ puts "Terminating instance #{instance_id}"
197
+ if ec2.terminate_instances([instance_id])
198
+ instance(credentials, instance_id)
199
+ else
200
+ raise Deltacloud::BackendError.new(500, "Instance", "Instance cannot be terminated", "")
201
+ end
202
+ end
237
203
 
238
- #
239
- # Storage Volumes
240
- #
204
+ alias :stop_instance :destroy_instance
241
205
 
242
- def storage_volumes(credentials, opts=nil)
243
- ec2 = new_client( credentials )
244
- volumes = []
245
- safely do
246
- if (opts)
247
- ec2.describe_volumes(:volume_id => opts[:id]).volumeSet.item.each do |ec2_volume|
248
- volumes << convert_volume( ec2_volume )
206
+ def keys(credentials, opts={})
207
+ ec2 = new_client(credentials)
208
+ opts ||= {}
209
+ safely do
210
+ ec2.describe_key_pairs(opts[:id] || nil).collect do |key|
211
+ convert_key(key)
212
+ end
213
+ end
249
214
  end
250
- else
251
- ec2_volumes = ec2.describe_volumes.volumeSet
252
- return [] unless ec2_volumes
253
- ec2_volumes.item.each do |ec2_volume|
254
- volumes << convert_volume( ec2_volume )
215
+
216
+ def key(credentials, opts={})
217
+ keys(credentials, :id => opts[:id]).first
255
218
  end
256
- end
257
- end
258
- volumes
259
- end
260
219
 
261
- def create_storage_volume(credentials, opts)
262
- ec2 = new_client( credentials )
263
- safely do
264
- ec2_volume = ec2.create_volume(
265
- :availability_zone => opts[:realm_id],
266
- :size => opts[:capacity]
267
- )
268
- return convert_volume( ec2_volume )
269
- end
270
- end
220
+ def create_key(credentials, opts={})
221
+ ec2 = new_client(credentials)
222
+ safely do
223
+ convert_key(ec2.create_key_pair(opts[:key_name]))
224
+ end
225
+ end
271
226
 
272
- def destroy_storage_volume(credentials, id)
273
- ec2 = new_client(credentials)
274
- ec2.delete_volume( :volume_id => id )
275
- storage_volumes( credentials, :id => id ).first
276
- end
227
+ def destroy_key(credentials, opts={})
228
+ ec2 = new_client(credentials)
229
+ original_key = key(credentials, opts)
230
+ safely do
231
+ ec2.delete_key_pair(original_key.id)
232
+ original_key= original_key.state = "DELETED"
233
+ end
234
+ original_key
235
+ end
277
236
 
278
- def attach_storage_volume(credentials, opts)
279
- ec2 = new_client(credentials)
280
- id = opts[:id]
281
- ec2.attach_volume(
282
- :volume_id => id,
283
- :instance_id => opts[:instance_id],
284
- :device => opts[:device]
285
- )
286
- storage_volumes( credentials, :id => id ).first
287
- end
237
+ def buckets(credentials, opts)
238
+ buckets = []
239
+ safely do
240
+ s3_client = new_client(credentials, :s3)
241
+ bucket_list = s3_client.buckets
242
+ bucket_list.each do |current|
243
+ buckets << convert_bucket(current)
244
+ end
245
+ end
246
+ filter_on(buckets, :id, opts)
247
+ end
288
248
 
289
- def detach_storage_volume(credentials, id)
290
- ec2 = new_client(credentials)
291
- ec2.detach_volume( :volume_id => id )
292
- storage_volumes( credentials, :id => id ).first
293
- end
294
-
295
- #
296
- # Storage Snapshots
297
- #
298
-
299
- def storage_snapshots(credentials, opts=nil)
300
- ec2 = new_client( credentials )
301
- snapshots = []
302
- safely do
303
- if (opts)
304
- ec2.describe_snapshots(:owner => 'self', :snapshot_id => opts[:id]).snapshotSet.item.each do |ec2_snapshot|
305
- snapshots << convert_snapshot( ec2_snapshot )
306
- end
307
- else
308
- ec2_snapshots = ec2.describe_snapshots(:owner => 'self').snapshotSet
309
- return [] unless ec2_snapshots
310
- ec2_snapshots.item.each do |ec2_snapshot|
311
- snapshots << convert_snapshot( ec2_snapshot )
249
+ def create_bucket(credentials, name, opts={})
250
+ bucket = nil
251
+ safely do
252
+ s3_client = new_client(credentials, :s3)
253
+ bucket_location = opts['location']
254
+ if bucket_location
255
+ bucket = Aws::S3::Bucket.create(s3_client, name, true, nil, :location => bucket_location)
256
+ else
257
+ bucket = Aws::S3::Bucket.create(s3_client, name, true)
258
+ end
259
+ end
260
+ convert_bucket(bucket)
312
261
  end
313
- end
314
- end
315
- snapshots
316
- end
317
262
 
318
- def key(credentials, opts=nil)
319
- keys(credentials, opts).first
320
- end
263
+ def delete_bucket(credentials, name, opts={})
264
+ s3_client = new_client(credentials, :s3)
265
+ safely do
266
+ s3_client.interface.delete_bucket(name)
267
+ end
268
+ end
321
269
 
322
- def keys(credentials, opts=nil)
323
- ec2 = new_client( credentials )
324
- opts[:key_name] = opts[:id] if opts and opts[:id]
325
- keypairs = ec2.describe_keypairs(opts || {})
326
- result = []
327
- safely do
328
- keypairs.keySet.item.each do |keypair|
329
- result << convert_key(keypair)
330
- end
331
- end
332
- result
333
- end
270
+ def blobs(credentials, opts = nil)
271
+ s3_client = new_client(credentials, :s3)
272
+ blobs = []
273
+ safely do
274
+ s3_bucket = s3_client.bucket(opts['bucket'])
275
+ s3_bucket.keys({}, true).each do |s3_object|
276
+ blobs << convert_object(s3_object)
277
+ end
278
+ end
279
+ blobs = filter_on(blobs, :id, opts)
280
+ blobs
281
+ end
334
282
 
335
- def create_key(credentials, opts={})
336
- key = Key.new
337
- ec2 = new_client( credentials )
338
- safely do
339
- key = convert_key(ec2.create_keypair(opts))
340
- end
341
- return key
342
- end
283
+ #--
284
+ # Create Blob
285
+ #--
286
+ def create_blob(credentials, bucket_id, blob_id, data = nil, opts = nil)
287
+ s3_client = new_client(credentials, :s3)
288
+ #data is a construct with the temporary file created by server @.tempfile
289
+ #also file[:type] will give us the content-type
290
+ res = nil
291
+ # File stream needs to be reopened in binary mode for whatever reason
292
+ file = File::open(data[:tempfile].path, 'rb')
293
+ safely do
294
+ res = s3_client.interface.put(bucket_id,
295
+ blob_id,
296
+ file,
297
+ {"Content-Type" => data[:type]})
298
+ end
299
+ #create a new Blob object and return that
300
+ Blob.new( { :id => blob_id,
301
+ :bucket => bucket_id,
302
+ :content_length => data[:tempfile].length,
303
+ :content_type => data[:type],
304
+ :last_modified => ''
305
+ }
306
+ )
307
+ end
343
308
 
344
- def destroy_key(credentials, opts={})
345
- safely do
346
- ec2 = new_client( credentials )
347
- ec2.delete_keypair(opts)
348
- end
349
- end
309
+ #--
310
+ # Delete Blob
311
+ #--
312
+ def delete_blob(credentials, bucket_id, blob_id, opts=nil)
313
+ s3_client = new_client(credentials, :s3)
314
+ s3_client.interface.delete(bucket_id, blob_id)
315
+ end
350
316
 
351
- def valid_credentials?(credentials)
352
- client = new_client(credentials)
353
- # FIXME: We need to do this call to determine if
354
- # EC2 is working with given credentials. There is no
355
- # other way to check, if given credentials are valid or not.
356
- realms = client.describe_availability_zones rescue false
357
- return realms ? true : false
358
- end
359
317
 
360
- #--
361
- # Buckets
362
- #-- get a list of your buckets from the s3 service
363
- def buckets(credentials, opts)
364
- buckets = []
365
- safely do
366
- s3_client = s3_client(credentials)
367
- bucket_list = s3_client.buckets
368
- bucket_list.each do |current|
369
- buckets << convert_bucket(current)
370
- end
371
- end
372
- buckets = filter_on(buckets, :id, opts)
373
- buckets
374
- end
318
+ def blob_data(credentials, bucket_id, blob_id, opts)
319
+ s3_client = new_client(credentials, :s3)
320
+ s3_client.interface.get(bucket_id, blob_id) do |chunk|
321
+ yield chunk
322
+ end
323
+ end
375
324
 
376
- #--
377
- # Create bucket
378
- #--
379
- #valid values for bucket location: 'EU'|'us-west1'|'ap-southeast-1' - if you
380
- #don't specify a location then by default buckets are created in 'us-east'
381
- #[but if you *do* specify 'us-east' things blow up]
382
- def create_bucket(credentials, name, opts={})
383
- bucket = nil
384
- safely do
385
- begin
386
- s3_client = s3_client(credentials)
387
- bucket_location = opts['location']
388
- if bucket_location
389
- bucket = RightAws::S3::Bucket.create(s3_client, name, true, nil, :location => bucket_location)
390
- else
391
- bucket = RightAws::S3::Bucket.create(s3_client, name, true)
392
- end #if
393
- rescue RightAws::AwsError => e
394
- raise e unless e.message =~ /BucketAlreadyExists/
395
- raise Deltacloud::BackendError.new(409, e.class.to_s, e.message, e.backtrace)
396
- end #begin
397
- end #do
398
- convert_bucket(bucket)
399
- end
325
+ def storage_volumes(credentials, opts={})
326
+ ec2 = new_client( credentials )
327
+ volume_list = (opts and opts[:id]) ? opts[:id] : nil
328
+ safely do
329
+ ec2.describe_volumes(volume_list).collect do |volume|
330
+ convert_volume(volume)
331
+ end
332
+ end
333
+ end
400
334
 
401
- #--
402
- # Delete_bucket
403
- #--
404
- def delete_bucket(credentials, name, opts={})
405
- s3_client = s3_client(credentials)
406
- safely do
407
- s3_client.interface.delete_bucket(name)
408
- end
409
- end
335
+ def create_storage_volume(credentials, opts=nil)
336
+ ec2 = new_client(credentials)
337
+ opts ||= {}
338
+ opts[:snapshot_id] ||= ""
339
+ opts[:capacity] ||= "1"
340
+ opts[:realm_id] ||= realms(credentials).first.id
341
+ safely do
342
+ convert_volume(ec2.create_volume(opts[:snapshot_id], opts[:capacity], opts[:realm_id]))
343
+ end
344
+ end
410
345
 
411
- #--
412
- # Blobs
413
- #--
414
- def blobs(credentials, opts = nil)
415
- s3_client = s3_client(credentials)
416
- blobs = []
417
- safely do
418
- s3_bucket = s3_client.bucket(opts['bucket'])
419
- s3_bucket.keys({}, true).each do |s3_object|
420
- blobs << convert_object(s3_object)
421
- end
422
- end
423
- blobs = filter_on(blobs, :id, opts)
424
- blobs
425
- end
346
+ def destroy_storage_volume(credentials, opts={})
347
+ ec2 = new_client(credentials)
348
+ safely do
349
+ unless ec2.delete_volume(opts[:id])
350
+ raise Deltacloud::BackendError.new(500, "StorageVolume", "Cannot delete storage volume")
351
+ end
352
+ storage_volume(credentials, opts[:id])
353
+ end
354
+ end
426
355
 
427
- #--
428
- # Blob data
429
- #--
430
- def blob_data(credentials, bucket_id, blob_id, opts)
431
- s3_client = s3_client(credentials)
432
- s3_client.interface.get(bucket_id, blob_id) do |chunk|
433
- yield chunk
434
- end
435
- end
356
+ def attach_storage_volume(credentials, opts={})
357
+ ec2 = new_client(credentials)
358
+ safely do
359
+ convert_volume(ec2.attach_volume(opts[:id], opts[:instance_id], opts[:device]))
360
+ end
361
+ end
436
362
 
437
- private
363
+ def detach_storage_volume(credentials, opts={})
364
+ ec2 = new_client(credentials)
365
+ safely do
366
+ convert_volume(ec2.detach_volume(opts[:id], opts[:instance_id], opts[:device], true))
367
+ end
368
+ end
438
369
 
439
- def new_client(credentials)
440
- opts = {
441
- :access_key_id => credentials.user,
442
- :secret_access_key => credentials.password
443
- }
444
- opts[:server] = ENV['DCLOUD_EC2_URL'] if ENV['DCLOUD_EC2_URL']
445
- safely do
446
- AWS::EC2::Base.new(opts)
447
- end
448
- end
370
+ def storage_snapshots(credentials, opts={})
371
+ ec2 = new_client(credentials)
372
+ snapshot_list = (opts and opts[:id]) ? opts[:id] : []
373
+ safely do
374
+ ec2.describe_snapshots(snapshot_list).collect do |snapshot|
375
+ convert_snapshot(snapshot)
376
+ end
377
+ end
378
+ end
449
379
 
450
- def convert_key(key)
451
- Key.new({
452
- :id => key['keyName'],
453
- :fingerprint => key['keyFingerprint'],
454
- :credential_type => :key,
455
- :pem_rsa_key => key['keyMaterial']
456
- })
457
- end
380
+ def create_storage_snapshot(credentials, opts={})
381
+ ec2 = new_client(credentials)
382
+ safely do
383
+ convert_snapshot(ec2.try_create_snapshot(opts[:volume_id]))
384
+ end
385
+ end
458
386
 
459
- def convert_image(ec2_image)
460
- Image.new( {
461
- :id=>ec2_image['imageId'],
462
- :name=>ec2_image['name'] || ec2_image['imageId'],
463
- :description=>ec2_image['description'] || ec2_image['imageLocation'] || '',
464
- :owner_id=>ec2_image['imageOwnerId'],
465
- :architecture=>ec2_image['architecture'],
466
- } )
467
- end
387
+ def destroy_storage_snapshot(credentials, opts={})
388
+ ec2 = new_client(credentials)
389
+ safely do
390
+ unless convert_snapshot(opts[:id])
391
+ raise Deltacloud::BackendError.new(500, "StorageSnapshot", "Cannot destroy this snapshot")
392
+ end
393
+ end
394
+ end
468
395
 
469
- def convert_realm(ec2_realm)
470
- Realm.new( {
471
- :id=>ec2_realm['zoneName'],
472
- :name=>ec2_realm['regionName'],
473
- :limit=>ec2_realm['zoneState'].eql?('available') ? :unlimited : 0,
474
- :state=>ec2_realm['zoneState'].upcase,
475
- } )
476
- end
396
+ private
477
397
 
478
- def convert_state(ec2_state)
479
- case ec2_state
480
- when "terminated"
481
- "STOPPED"
482
- when "stopped"
483
- "STOPPED"
484
- when "running"
485
- "RUNNING"
486
- when "pending"
487
- "PENDING"
488
- when "shutting-down"
489
- "STOPPED"
490
- end
491
- end
398
+ def new_client(credentials, type = :ec2)
399
+ case type
400
+ when :ec2 then Aws::Ec2.new(credentials.user, credentials.password)
401
+ when :s3 then Aws::S3.new(credentials.user, credentials.password)
402
+ end
403
+ end
404
+
405
+ def convert_bucket(s3_bucket)
406
+ #get blob list:
407
+ blob_list = []
408
+ s3_bucket.keys.each do |s3_object|
409
+ blob_list << s3_object.name
410
+ end
411
+ #can use AWS::S3::Owner.current.display_name or current.id
412
+ Bucket.new(
413
+ :id => s3_bucket.name,
414
+ :name => s3_bucket.name,
415
+ :size => s3_bucket.keys.length,
416
+ :blob_list => blob_list
417
+ )
418
+ end
492
419
 
493
- def convert_instance(ec2_instance, owner_id)
494
- state = convert_state(ec2_instance['instanceState']['name'])
495
- realm_id = ec2_instance['placement']['availabilityZone']
496
- (realm_id = nil ) if ( realm_id == '' )
497
- hwp_name = ec2_instance['instanceType']
498
- instance = Instance.new( {
499
- :id=>ec2_instance['instanceId'],
500
- :name => ec2_instance['imageId'],
501
- :state=>state,
502
- :image_id=>ec2_instance['imageId'],
503
- :owner_id=>owner_id,
504
- :realm_id=>realm_id,
505
- :public_addresses=>( ec2_instance['dnsName'] == '' ? [] : [ec2_instance['dnsName']] ),
506
- :private_addresses=>( ec2_instance['privateDnsName'] == '' ? [] : [ec2_instance['privateDnsName']] ),
507
- :instance_profile =>InstanceProfile.new(hwp_name),
508
- :actions=>instance_actions_for( state ),
509
- :keyname => ec2_instance['keyName'],
510
- :launch_time => ec2_instance['launchTime']
511
- } )
512
- instance.authn_error = "Key not set for instance" unless ec2_instance['keyName']
513
- return instance
514
- end
420
+ def convert_realm(realm)
421
+ Realm.new(
422
+ :id => realm[:zone_name],
423
+ :name => realm[:zone_name],
424
+ :state => realm[:zone_state],
425
+ :limit => realm[:zone_state].eql?('available') ? :unlimited : 0
426
+ )
427
+ end
515
428
 
516
- def convert_volume(ec2_volume)
517
- attachment = ec2_volume.attachmentSet.item.first if ec2_volume.attachmentSet
518
- attachment ||= { }
519
- StorageVolume.new( {
520
- :id=>ec2_volume['volumeId'],
521
- :created=>ec2_volume['createTime'],
522
- :state=>ec2_volume['status'].upcase,
523
- :capacity=>ec2_volume['size'],
524
- :instance_id=>attachment['instanceId'],
525
- :device=>attachment['device'],
526
- } )
527
- end
429
+ def convert_image(image)
430
+ # There is not support for 'name' for now
431
+ Image.new(
432
+ :id => image[:aws_id],
433
+ :name => image[:aws_name] || image[:aws_id],
434
+ :description => image[:aws_description] || image[:aws_location],
435
+ :owner_id => image[:aws_owner],
436
+ :architecture => image[:aws_architecture],
437
+ :state => image[:state]
438
+ )
439
+ end
528
440
 
529
- def convert_snapshot(ec2_snapshot)
530
- StorageSnapshot.new( {
531
- :id=>ec2_snapshot['snapshotId'],
532
- :state=>ec2_snapshot['status'].upcase,
533
- :storage_volume_id=>ec2_snapshot['volumeId'],
534
- :created=>ec2_snapshot['startTime'],
535
- } )
536
- end
441
+ def convert_instance(instance)
442
+ Instance.new(
443
+ :id => instance[:aws_instance_id],
444
+ :name => instance[:aws_image_id],
445
+ :state => convert_state(instance[:aws_state]),
446
+ :image_id => instance[:aws_image_id],
447
+ :owner_id => instance[:aws_owner],
448
+ :actions => instance_actions_for(convert_state(instance[:aws_state])),
449
+ :key_name => instance[:ssh_key_name],
450
+ :launch_time => instance[:aws_launch_time],
451
+ :instance_profile => InstanceProfile.new(instance[:aws_instance_type]),
452
+ :realm_id => instance[:aws_availability_zone],
453
+ :private_addresses => instance[:private_dns_name],
454
+ :public_addresses => instance[:dns_name]
455
+ )
456
+ end
537
457
 
538
- def s3_client(credentials)
539
- safely do
540
- s3_client = RightAws::S3.new(credentials.user, credentials.password)
541
- end
542
- end
458
+ def convert_key(key)
459
+ Key.new(
460
+ :id => key[:aws_key_name],
461
+ :fingerprint => key[:aws_fingerprint],
462
+ :credential_type => :key,
463
+ :pem_rsa_key => key[:aws_material],
464
+ :state => "AVAILABLE"
465
+ )
466
+ end
543
467
 
544
- def convert_bucket(s3_bucket)
545
- #get blob list:
546
- blob_list = []
547
- s3_bucket.keys.each do |s3_object|
548
- blob_list << s3_object.name
549
- end
550
- #can use AWS::S3::Owner.current.display_name or current.id
551
- Bucket.new( { :id => s3_bucket.name,
552
- :name => s3_bucket.name,
553
- :size => s3_bucket.keys.length,
554
- :blob_list => blob_list
555
- }
556
- )
557
- end
468
+ def convert_volume(volume)
469
+ StorageVolume.new(
470
+ :id => volume[:aws_id],
471
+ :created => volume[:aws_created_at],
472
+ :state => volume[:aws_status] ? volume[:aws_status].upcase : 'unknown',
473
+ :capacity => volume[:aws_size],
474
+ :instance_id => volume[:aws_instance_id],
475
+ :realm_id => volume[:zone],
476
+ :device => volume[:aws_device],
477
+ # TODO: the available actions should be tied to the current
478
+ # volume state
479
+ :actions => [:attach, :detach, :destroy]
480
+ )
481
+ end
558
482
 
559
- def convert_object(s3_object)
560
- Blob.new({ :id => s3_object.name,
561
- :bucket => s3_object.bucket.name.to_s,
562
- :content_length => s3_object.size,
563
- :content_type => s3_object.content_type,
564
- :last_modified => s3_object.last_modified
565
- })
566
- end
483
+ def convert_snapshot(snapshot)
484
+ StorageSnapshot.new(
485
+ :id => snapshot[:aws_id],
486
+ :state => snapshot[:aws_status],
487
+ :storage_volume_id => snapshot[:aws_volume_id],
488
+ :created => snapshot[:aws_started_at]
489
+ )
490
+ end
567
491
 
568
- def catched_exceptions_list
569
- {
570
- :auth => [ AWS::AuthFailure ],
571
- :error => [],
572
- :glob => [ /AWS::(\w+)/ ]
573
- }
574
- end
492
+ def convert_state(ec2_state)
493
+ case ec2_state
494
+ when "terminated"
495
+ "STOPPED"
496
+ when "stopped"
497
+ "STOPPED"
498
+ when "running"
499
+ "RUNNING"
500
+ when "pending"
501
+ "PENDING"
502
+ when "shutting-down"
503
+ "STOPPED"
504
+ end
505
+ end
575
506
 
576
- end
507
+ def catched_exceptions_list
508
+ {
509
+ :auth => [], # [ ::Aws::AuthFailure ],
510
+ :error => [ ::Aws::AwsError ],
511
+ :glob => [ /AWS::(\w+)/ ]
512
+ }
513
+ end
577
514
 
515
+ end
578
516
  end
579
517
  end
580
518
  end