bosh_warden_cpi 1.5.0.pre.3

Sign up to get free protection for your applications and to get access to all the features.
data/README ADDED
@@ -0,0 +1,16 @@
1
+ # BOSH Warden Cloud Provider
2
+
3
+ # Running with Vagrant up
4
+
5
+ First things first: if you find that the "chef/cookbooks" directory is not present, do
6
+
7
+ cd chef
8
+ librarian-chef install
9
+
10
+
11
+ Later on, after the vagrant box is set up and the director + Warden is successfully running,
12
+ you'll probably have to download a stemcell from s3 and then upload it to the director.
13
+
14
+ we need a warden-friendly stemcell that includes the director. micro_bosh stemcell did not work. Let's try vsphere next
15
+
16
+ No worries, we'll soon work this into the vagrantfile
@@ -0,0 +1,22 @@
1
+ Sequel.migration do
2
+ up do
3
+ create_table(:warden_vm) do
4
+ primary_key :id
5
+ String :container_id
6
+ end
7
+
8
+ create_table(:warden_disk) do
9
+ primary_key :id
10
+ foreign_key :vm_id, :warden_vm
11
+ Integer :device_num
12
+ String :device_path
13
+ String :image_path
14
+ Boolean :attached, :default => false
15
+ end
16
+ end
17
+
18
+ down do
19
+ drop_table :warden_disk
20
+ drop_table :warden_vm
21
+ end
22
+ end
@@ -0,0 +1,40 @@
1
+ require "yajl"
2
+ require "sequel"
3
+ require "fileutils"
4
+ require "tempfile"
5
+ require "securerandom"
6
+ require "etc"
7
+
8
+ require "common/exec"
9
+ require "common/thread_pool"
10
+ require "common/thread_formatter"
11
+
12
+ require "cloud"
13
+ require "cloud/warden/helpers"
14
+ require "cloud/warden/device_pool"
15
+ require "cloud/warden/cloud"
16
+ require "cloud/warden/version"
17
+ require "cloud/warden/models/vm"
18
+ require "cloud/warden/models/disk"
19
+
20
+ require "warden/client"
21
+
22
+ module Bosh
23
+ module Clouds
24
+ class Warden
25
+ extend Forwardable
26
+
27
+ def_delegators :@delegate,
28
+ :create_stemcell, :delete_stemcell,
29
+ :create_vm, :delete_vm, :reboot_vm,
30
+ :configure_networks,
31
+ :create_disk, :delete_disk,
32
+ :attach_disk, :detach_disk,
33
+ :validate_deployment
34
+
35
+ def initialize(options)
36
+ @delegate = WardenCloud::Cloud.new(options)
37
+ end
38
+ end
39
+ end
40
+ end
@@ -0,0 +1,501 @@
1
+ module Bosh::WardenCloud
2
+ class Cloud < Bosh::Cloud
3
+
4
+ include Helpers
5
+
6
+ DEFAULT_WARDEN_SOCK = "/tmp/warden.sock"
7
+ DEFAULT_STEMCELL_ROOT = "/var/vcap/stemcell"
8
+ DEFAULT_DISK_ROOT = "/var/vcap/store/disk"
9
+ DEFAULT_FS_TYPE = "ext4"
10
+ DEFAULT_POOL_SIZE = 128
11
+ DEFAULT_POOL_START_NUMBER = 10
12
+ DEFAULT_DEVICE_PREFIX = "/dev/sd"
13
+
14
+ DEFAULT_SETTINGS_FILE = "/var/vcap/bosh/settings.json"
15
+
16
+ attr_accessor :logger
17
+
18
+ ##
19
+ # Initialize BOSH Warden CPI
20
+ # @param [Hash] options CPI options
21
+ #
22
+ def initialize(options)
23
+ @logger = Bosh::Clouds::Config.logger
24
+
25
+ @agent_properties = options["agent"] || {}
26
+ @warden_properties = options["warden"] || {}
27
+ @stemcell_properties = options["stemcell"] || {}
28
+ @disk_properties = options["disk"] || {}
29
+
30
+ setup_warden
31
+ setup_stemcell
32
+ setup_disk
33
+
34
+ setup_pool
35
+ end
36
+
37
+ ##
38
+ # Create a stemcell using stemcell image
39
+ # This method simply untar the stemcell image to a local directory. Warden
40
+ # can use the rootfs within the image as a base fs.
41
+ # @param [String] image_path local path to a stemcell image
42
+ # @param [Hash] cloud_properties not used
43
+ # return [String] stemcell id
44
+ def create_stemcell(image_path, cloud_properties)
45
+ not_used(cloud_properties)
46
+
47
+ stemcell_id = uuid("stemcell")
48
+ stemcell_dir = stemcell_path(stemcell_id)
49
+
50
+ with_thread_name("create_stemcell(#{image_path}, _)") do
51
+
52
+ # Extract to tarball
53
+ @logger.info("Extracting stemcell from #{image_path} to #{stemcell_dir}")
54
+ FileUtils.mkdir_p(stemcell_dir)
55
+
56
+ # This command needs priviledge because the stemcell contains device files,
57
+ # which cannot be untared without priviledge
58
+ sudo "tar -C #{stemcell_dir} -xzf #{image_path} 2>&1"
59
+
60
+ stemcell_id
61
+ end
62
+ rescue => e
63
+ sudo "rm -rf #{stemcell_dir}"
64
+
65
+ raise e
66
+ end
67
+
68
+ ##
69
+ # Delete the stemcell
70
+ # @param [String] id of the stemcell to be deleted
71
+ # @return [void]
72
+ def delete_stemcell(stemcell_id)
73
+ with_thread_name("delete_stemcell(#{stemcell_id}, _)") do
74
+ stemcell_dir = stemcell_path(stemcell_id)
75
+ sudo "rm -rf #{stemcell_dir}"
76
+
77
+ nil
78
+ end
79
+ end
80
+
81
+ ##
82
+ # Create a container in warden
83
+ #
84
+ # Limitaion: We don't support creating VM with multiple network nics.
85
+ #
86
+ # @param [String] agent_id UUID for bosh agent
87
+ # @param [String] stemcell_id stemcell id
88
+ # @param [Hash] resource_pool not used
89
+ # @param [Hash] networks list of networks and their settings needed for this VM
90
+ # @param [optional, String, Array] disk_locality not used
91
+ # @param [optional, Hash] env environment that will be passed to this vm
92
+ # @return [String] vm_id
93
+ def create_vm(agent_id, stemcell_id, resource_pool,
94
+ networks, disk_locality = nil, env = nil)
95
+ not_used(resource_pool)
96
+ not_used(disk_locality)
97
+ not_used(env)
98
+
99
+ vm = nil
100
+
101
+ with_thread_name("create_vm(#{agent_id}, #{stemcell_id}, #{networks})") do
102
+
103
+ stemcell_path = stemcell_path(stemcell_id)
104
+
105
+ if networks.size > 1
106
+ raise ArgumentError, "Not support more than 1 nics"
107
+ end
108
+
109
+ unless Dir.exist?(stemcell_path)
110
+ cloud_error("Cannot find Stemcell(#{stemcell_id})")
111
+ end
112
+
113
+ vm = Models::VM.create
114
+
115
+ # Create Container
116
+ handle = with_warden do |client|
117
+ request = Warden::Protocol::CreateRequest.new
118
+ request.rootfs = stemcell_path
119
+ if networks.first[1]["type"] != "dynamic"
120
+ request.network = networks.first[1]["ip"]
121
+ end
122
+
123
+ response = client.call(request)
124
+ response.handle
125
+ end
126
+ vm.container_id = handle
127
+
128
+ # Agent settings
129
+ env = generate_agent_env(vm, agent_id, networks)
130
+ set_agent_env(vm.container_id, env)
131
+
132
+ # Notice: It's a little hacky, but it's the way it is now.
133
+ #
134
+ # Warden has a default white list for devices. By default, all the loop
135
+ # devices cannot be read/written/mknod. We don't want to change the
136
+ # warden behavior, so we just manipulate the container cgroup directly.
137
+ sudo "bash -c 'echo \"b 7:* rwm\" > /tmp/warden/cgroup/devices/instance-#{handle}/devices.allow'"
138
+
139
+ # Start bosh agent
140
+ with_warden do |client|
141
+ request = Warden::Protocol::SpawnRequest.new
142
+ request.handle = handle
143
+ request.privileged = true
144
+ request.script = "/usr/sbin/runsvdir-start"
145
+
146
+ client.call(request)
147
+ end
148
+
149
+ # Save to DB
150
+ vm.save
151
+
152
+ vm.id.to_s
153
+ end
154
+ rescue => e
155
+ if vm
156
+ if vm.container_id
157
+ with_warden do |client|
158
+ request = Warden::Protocol::DestroyRequest.new
159
+ request.handle = vm.container_id
160
+
161
+ client.call(request)
162
+ end
163
+ end
164
+
165
+ vm.destroy
166
+ end
167
+ raise e
168
+ end
169
+
170
+ ##
171
+ # Deletes a VM
172
+ #
173
+ # @param [String] vm_id vm id
174
+ # @return [void]
175
+ def delete_vm(vm_id)
176
+ with_thread_name("delete_vm(#{vm_id})") do
177
+ vm = Models::VM[vm_id.to_i]
178
+
179
+ cloud_error("Cannot find VM #{vm}") unless vm
180
+ cloud_error("Cannot delete vm with disks attached") if vm.disks.size > 0
181
+
182
+ container_id = vm.container_id
183
+
184
+ vm.destroy
185
+
186
+ with_warden do |client|
187
+ request = Warden::Protocol::DestroyRequest.new
188
+ request.handle = container_id
189
+
190
+ client.call(request)
191
+ end
192
+
193
+ nil
194
+ end
195
+
196
+ end
197
+
198
+ def reboot_vm(vm_id)
199
+ # no-op
200
+ end
201
+
202
+ def configure_networks(vm_id, networks)
203
+ # no-op
204
+ end
205
+
206
+ ##
207
+ # Create a disk
208
+ #
209
+ # @param [Integer] size disk size in MB
210
+ # @param [String] vm_locality vm id if known of the VM that this disk will
211
+ # be attached to
212
+ # @return [String] disk id
213
+ def create_disk(size, vm_locality = nil)
214
+ not_used(vm_locality)
215
+
216
+ disk = nil
217
+ number = nil
218
+ image_file = nil
219
+
220
+ raise ArgumentError, "disk size <= 0" unless size > 0
221
+
222
+ with_thread_name("create_disk(#{size}, _)") do
223
+ disk = Models::Disk.create
224
+
225
+ image_file = image_path(disk.id)
226
+
227
+ FileUtils.touch(image_file)
228
+ File.truncate(image_file, size << 20) # 1 MB == 1<<20 Byte
229
+ sh "mkfs -t #{@fs_type} -F #{image_file} 2>&1"
230
+
231
+ # Get a device number from the pool
232
+ number = @pool.acquire
233
+ cloud_error("Failed to fetch device number") unless number
234
+
235
+ # Attach image file to the device
236
+ sudo "losetup /dev/loop#{number} #{image_file}"
237
+
238
+ disk.image_path = image_file
239
+ disk.device_num = number
240
+ disk.attached = false
241
+ disk.save
242
+
243
+ disk.id.to_s
244
+ end
245
+ rescue => e
246
+ if number
247
+ sudo "losetup -d /dev/loop#{number}"
248
+ @pool.release(number)
249
+ end
250
+ FileUtils.rm_f image_file if image_file
251
+ disk.destroy if disk
252
+
253
+ raise e
254
+ end
255
+
256
+ ##
257
+ # Delete a disk
258
+ #
259
+ # @param [String] disk id
260
+ # @return [void]
261
+ def delete_disk(disk_id)
262
+ with_thread_name("delete_disk(#{disk_id})") do
263
+ disk = Models::Disk[disk_id.to_i]
264
+
265
+ cloud_error("Cannot find disk #{disk_id}") unless disk
266
+ cloud_error("Cannot delete attached disk") if disk.attached
267
+
268
+ # Detach image file from loop device
269
+ sudo "losetup -d /dev/loop#{disk.device_num}"
270
+
271
+ # Release the device number back to pool
272
+ @pool.release(disk.device_num)
273
+
274
+ # Delete DB entry
275
+ disk.destroy
276
+
277
+ # Remove image file
278
+ FileUtils.rm_f image_path(disk_id)
279
+
280
+ nil
281
+ end
282
+ end
283
+
284
+ ##
285
+ # Attach a disk to a VM
286
+ #
287
+ # @param [String] vm vm id that was once returned by {#create_vm}
288
+ # @param [String] disk disk id that was once returned by {#create_disk}
289
+ # @return nil
290
+ def attach_disk(vm_id, disk_id)
291
+ with_thread_name("attach_disk(#{vm_id}, #{disk_id})") do
292
+ vm = Models::VM[vm_id.to_i]
293
+ disk = Models::Disk[disk_id.to_i]
294
+
295
+ cloud_error("Cannot find vm #{vm_id}") unless vm
296
+ cloud_error("Cannot find disk #{disk_id}") unless disk
297
+ cloud_error("Disk #{disk_id} already attached") if disk.attached
298
+
299
+ # Create a device file inside warden container
300
+ script = attach_script(disk.device_num, @device_path_prefix)
301
+
302
+ device_path = with_warden do |client|
303
+ request = Warden::Protocol::RunRequest.new
304
+ request.handle = vm.container_id
305
+ request.script = script
306
+ request.privileged = true
307
+
308
+ response = client.call(request)
309
+
310
+ stdout = response.stdout || ""
311
+ stdout.strip
312
+ end
313
+
314
+ # Save device path into agent env settings
315
+ env = get_agent_env(vm.container_id)
316
+ env["disks"]["persistent"][disk_id] = device_path
317
+ set_agent_env(vm.container_id, env)
318
+
319
+ # Save DB entry
320
+ disk.device_path = device_path
321
+ disk.attached = true
322
+ disk.vm = vm
323
+ disk.save
324
+
325
+ nil
326
+ end
327
+ end
328
+
329
+ ##
330
+ # Detach a disk from a VM
331
+ #
332
+ # @param [String] vm vm id that was once returned by {#create_vm}
333
+ # @param [String] disk disk id that was once returned by {#create_disk}
334
+ # @return nil
335
+ def detach_disk(vm_id, disk_id)
336
+ with_thread_name("detach_disk(#{vm_id}, #{disk_id})") do
337
+ vm = Models::VM[vm_id.to_i]
338
+ disk = Models::Disk[disk_id.to_i]
339
+
340
+ cloud_error("Cannot find vm #{vm_id}") unless vm
341
+ cloud_error("Cannot find disk #{disk_id}") unless disk
342
+ cloud_error("Disk #{disk_id} not attached") unless disk.attached
343
+
344
+ device_num = disk.device_num
345
+ device_path = disk.device_path
346
+
347
+ # Save device path into agent env settings
348
+ env = get_agent_env(vm.container_id)
349
+ env["disks"]["persistent"][disk_id] = nil
350
+ set_agent_env(vm.container_id, env)
351
+
352
+ # Save DB entry
353
+ disk.attached = false
354
+ disk.device_path = nil
355
+ disk.vm = nil
356
+ disk.save
357
+
358
+ # Remove the device file and partition file inside warden container
359
+ script = "rm #{partition_path(device_path)} #{device_path}"
360
+
361
+ with_warden do |client|
362
+ request = Warden::Protocol::RunRequest.new
363
+ request.handle = vm.container_id
364
+ request.script = script
365
+ request.privileged = true
366
+
367
+ client.call(request)
368
+ end
369
+
370
+ nil
371
+ end
372
+ end
373
+
374
+ def validate_deployment(old_manifest, new_manifest)
375
+ # no-op
376
+ end
377
+
378
+ private
379
+
380
+ def not_used(*arg)
381
+ # no-op
382
+ end
383
+
384
+ def stemcell_path(stemcell_id)
385
+ File.join(@stemcell_root, stemcell_id)
386
+ end
387
+
388
+ def image_path(disk_id)
389
+ File.join(@disk_root, "#{disk_id}.img")
390
+ end
391
+
392
+ def setup_warden
393
+ @warden_unix_path = @warden_properties["unix_domain_path"] || DEFAULT_WARDEN_SOCK
394
+ end
395
+
396
+ def setup_stemcell
397
+ @stemcell_root = @stemcell_properties["root"] || DEFAULT_STEMCELL_ROOT
398
+
399
+ FileUtils.mkdir_p(@stemcell_root)
400
+ end
401
+
402
+ def setup_disk
403
+ @disk_root = @disk_properties["root"] || DEFAULT_DISK_ROOT
404
+ @fs_type = @disk_properties["fs"] || DEFAULT_FS_TYPE
405
+ @pool_size = @disk_properties["pool_count"] || DEFAULT_POOL_SIZE
406
+ @pool_start_number = @disk_properties["pool_start_number"] || DEFAULT_POOL_START_NUMBER
407
+ @device_path_prefix = @disk_properties["device_path_prefix"] || DEFAULT_DEVICE_PREFIX
408
+
409
+ FileUtils.mkdir_p(@disk_root)
410
+ end
411
+
412
+ def setup_pool
413
+ @pool = DevicePool.new(@pool_size) { |i| i + @pool_start_number }
414
+
415
+ occupied_numbers = Models::Disk.collect { |disk| disk.device_num }
416
+ @pool.delete_if do |i|
417
+ occupied_numbers.include? i
418
+ end
419
+
420
+ # Initialize the loop devices
421
+ last = @pool_start_number + @pool_size - 1
422
+ @pool_start_number.upto(last) do |i|
423
+ sudo "mknod /dev/loop#{i} b 7 #{i}" unless File.exists? "/dev/loop#{i}"
424
+ end
425
+ end
426
+
427
+ def with_warden
428
+ client = Warden::Client.new(@warden_unix_path)
429
+ client.connect
430
+
431
+ ret = yield client
432
+
433
+ ret
434
+ ensure
435
+ client.disconnect if client
436
+ end
437
+
438
+ def agent_settings_file
439
+ DEFAULT_SETTINGS_FILE
440
+ end
441
+
442
+ def generate_agent_env(vm, agent_id, networks)
443
+ vm_env = {
444
+ "name" => vm.container_id,
445
+ "id" => vm.id
446
+ }
447
+
448
+ env = {
449
+ "vm" => vm_env,
450
+ "agent_id" => agent_id,
451
+ "networks" => networks,
452
+ "disks" => { "persistent" => {} },
453
+ }
454
+ env.merge!(@agent_properties)
455
+ env
456
+ end
457
+
458
+ def get_agent_env(handle)
459
+ body = with_warden do |client|
460
+ request = Warden::Protocol::RunRequest.new
461
+ request.handle = handle
462
+ request.privileged = true
463
+ request.script = "cat #{agent_settings_file}"
464
+
465
+ client.call(request).stdout
466
+ end
467
+
468
+ env = Yajl::Parser.parse(body)
469
+ env
470
+ end
471
+
472
+ def set_agent_env(handle, env)
473
+ tempfile = Tempfile.new("settings")
474
+ tempfile.write(Yajl::Encoder.encode(env))
475
+ tempfile.close
476
+
477
+ tempfile_in = "/tmp/#{rand(100000)}"
478
+
479
+ # Here we copy the setting file to temp file in container, then mv it to
480
+ # /var/vcap/bosh by privileged user.
481
+ with_warden do |client|
482
+ request = Warden::Protocol::CopyInRequest.new
483
+ request.handle = handle
484
+ request.src_path = tempfile.path
485
+ request.dst_path = tempfile_in
486
+
487
+ client.call(request)
488
+
489
+ request = Warden::Protocol::RunRequest.new
490
+ request.handle = handle
491
+ request.privileged = true
492
+ request.script = "mv #{tempfile_in} #{agent_settings_file}"
493
+
494
+ client.call(request)
495
+ end
496
+
497
+ tempfile.unlink
498
+ end
499
+
500
+ end
501
+ end
@@ -0,0 +1,36 @@
1
+ module Bosh::WardenCloud
2
+
3
+ class DevicePool
4
+ def initialize(count)
5
+ @mutex = Mutex.new
6
+ @pool = []
7
+
8
+ @pool = count.times.map { |i| block_given? ? yield(i) : i }
9
+ end
10
+
11
+ def size
12
+ @mutex.synchronize do
13
+ @pool.size
14
+ end
15
+ end
16
+
17
+ def acquire
18
+ @mutex.synchronize do
19
+ @pool.shift
20
+ end
21
+ end
22
+
23
+ def release(entry)
24
+ @mutex.synchronize do
25
+ @pool << entry
26
+ end
27
+ end
28
+
29
+ def delete_if(&blk)
30
+ @mutex.synchronize do
31
+ @pool.delete_if &blk
32
+ end
33
+ end
34
+ end
35
+
36
+ end
@@ -0,0 +1,60 @@
1
+ module Bosh::WardenCloud
2
+
3
+ module Helpers
4
+
5
+ def cloud_error(message)
6
+ @logger.error(message) if @logger
7
+ raise Bosh::Clouds::CloudError, message
8
+ end
9
+
10
+ def uuid(klass=nil)
11
+ id = SecureRandom.uuid
12
+
13
+ if klass
14
+ id = "%s-%s" % [klass, id]
15
+ end
16
+
17
+ id
18
+ end
19
+
20
+
21
+ def sudo(cmd)
22
+ logger.info "run 'sudo -n #{cmd}'"
23
+ Bosh::Exec.sh "sudo -n #{cmd}"
24
+ end
25
+
26
+ def sh(cmd)
27
+ logger.info "run '#{cmd}'"
28
+ Bosh::Exec.sh "#{cmd}"
29
+ end
30
+
31
+ ##
32
+ # This method generates a script that is run inside a container, to get an
33
+ # available device path.
34
+ #
35
+ # This is hacky. The attached device is already formatted. In order to trick
36
+ # bosh agent not to format the disk again, we touch an empty device file and
37
+ # mknod the real partition file that is already formatted. Bosh agent will
38
+ # mount skip the format process and directly mount the partition file.
39
+ #
40
+ # e.g.
41
+ # Device file is like /dev/sda
42
+ # Partition file is like /dev/sda1
43
+ def attach_script(device_number, device_prefix)
44
+ script = <<-EOF
45
+ for i in a b c d e f g h; do (stat #{device_prefix}${i} > /dev/null 2>&1) || break; done
46
+ touch #{device_prefix}${i}
47
+ mknod #{device_prefix}${i}1 b 7 #{device_number} > /dev/null 2>&1 && echo "#{device_prefix}${i}"
48
+ EOF
49
+ end
50
+
51
+ def partition_path(device_path)
52
+ "#{device_path}1"
53
+ end
54
+
55
+ def process_user
56
+ Etc.getpwuid(Process.uid).name
57
+ end
58
+
59
+ end
60
+ end
@@ -0,0 +1,5 @@
1
+ module Bosh::WardenCloud::Models
2
+ class Disk < Sequel::Model(Bosh::Clouds::Config.db[:warden_disk])
3
+ many_to_one :vm, :key => :vm_id, :class => Bosh::WardenCloud::Models::VM
4
+ end
5
+ end
@@ -0,0 +1,5 @@
1
+ module Bosh::WardenCloud::Models
2
+ class VM < Sequel::Model(Bosh::Clouds::Config.db[:warden_vm])
3
+ one_to_many :disks, :key => :vm_id
4
+ end
5
+ end
@@ -0,0 +1,7 @@
1
+ module Bosh
2
+ module Clouds
3
+ class Warden
4
+ VERSION = '1.5.0.pre.3'
5
+ end
6
+ end
7
+ end
metadata ADDED
@@ -0,0 +1,117 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: bosh_warden_cpi
3
+ version: !ruby/object:Gem::Version
4
+ version: 1.5.0.pre.3
5
+ prerelease: 6
6
+ platform: ruby
7
+ authors:
8
+ - Cloud Foundry
9
+ autorequire:
10
+ bindir: bin
11
+ cert_chain: []
12
+ date: 2013-06-12 00:00:00.000000000 Z
13
+ dependencies:
14
+ - !ruby/object:Gem::Dependency
15
+ name: bosh_cpi
16
+ requirement: !ruby/object:Gem::Requirement
17
+ none: false
18
+ requirements:
19
+ - - ! '>='
20
+ - !ruby/object:Gem::Version
21
+ version: '0'
22
+ type: :runtime
23
+ prerelease: false
24
+ version_requirements: !ruby/object:Gem::Requirement
25
+ none: false
26
+ requirements:
27
+ - - ! '>='
28
+ - !ruby/object:Gem::Version
29
+ version: '0'
30
+ - !ruby/object:Gem::Dependency
31
+ name: warden-protocol
32
+ requirement: !ruby/object:Gem::Requirement
33
+ none: false
34
+ requirements:
35
+ - - ! '>='
36
+ - !ruby/object:Gem::Version
37
+ version: '0'
38
+ type: :runtime
39
+ prerelease: false
40
+ version_requirements: !ruby/object:Gem::Requirement
41
+ none: false
42
+ requirements:
43
+ - - ! '>='
44
+ - !ruby/object:Gem::Version
45
+ version: '0'
46
+ - !ruby/object:Gem::Dependency
47
+ name: warden-client
48
+ requirement: !ruby/object:Gem::Requirement
49
+ none: false
50
+ requirements:
51
+ - - ! '>='
52
+ - !ruby/object:Gem::Version
53
+ version: '0'
54
+ type: :runtime
55
+ prerelease: false
56
+ version_requirements: !ruby/object:Gem::Requirement
57
+ none: false
58
+ requirements:
59
+ - - ! '>='
60
+ - !ruby/object:Gem::Version
61
+ version: '0'
62
+ - !ruby/object:Gem::Dependency
63
+ name: sequel
64
+ requirement: !ruby/object:Gem::Requirement
65
+ none: false
66
+ requirements:
67
+ - - ! '>='
68
+ - !ruby/object:Gem::Version
69
+ version: '0'
70
+ type: :runtime
71
+ prerelease: false
72
+ version_requirements: !ruby/object:Gem::Requirement
73
+ none: false
74
+ requirements:
75
+ - - ! '>='
76
+ - !ruby/object:Gem::Version
77
+ version: '0'
78
+ description: BOSH Warden CPI
79
+ email: support@cloudfoundry.org
80
+ executables: []
81
+ extensions: []
82
+ extra_rdoc_files: []
83
+ files:
84
+ - db/migrations/20130312211408_initial.rb
85
+ - lib/cloud/warden.rb
86
+ - lib/cloud/warden/cloud.rb
87
+ - lib/cloud/warden/device_pool.rb
88
+ - lib/cloud/warden/helpers.rb
89
+ - lib/cloud/warden/models/disk.rb
90
+ - lib/cloud/warden/models/vm.rb
91
+ - lib/cloud/warden/version.rb
92
+ - README
93
+ homepage: http://github.com/cloudfoundry/bosh
94
+ licenses: []
95
+ post_install_message:
96
+ rdoc_options: []
97
+ require_paths:
98
+ - lib
99
+ required_ruby_version: !ruby/object:Gem::Requirement
100
+ none: false
101
+ requirements:
102
+ - - ! '>='
103
+ - !ruby/object:Gem::Version
104
+ version: '0'
105
+ required_rubygems_version: !ruby/object:Gem::Requirement
106
+ none: false
107
+ requirements:
108
+ - - ! '>'
109
+ - !ruby/object:Gem::Version
110
+ version: 1.3.1
111
+ requirements: []
112
+ rubyforge_project:
113
+ rubygems_version: 1.8.25
114
+ signing_key:
115
+ specification_version: 3
116
+ summary: BOSH Warden CPI
117
+ test_files: []