bosh_warden_cpi 1.5.0.pre.3 → 1.2513.0
Sign up to get free protection for your applications and to get access to all the features.
- data/README +3 -11
- data/lib/cloud/warden/cloud.rb +116 -323
- data/lib/cloud/warden/diskutils.rb +107 -0
- data/lib/cloud/warden/helpers.rb +79 -32
- data/lib/cloud/warden/version.rb +2 -4
- data/lib/cloud/warden.rb +14 -32
- data/spec/assets/stemcell-warden-test.tgz +0 -0
- data/spec/spec_helper.rb +20 -0
- data/spec/unit/cloud_spec.rb +264 -0
- data/spec/unit/diskutils_spec.rb +146 -0
- data/spec/unit/helper_spec.rb +106 -0
- metadata +59 -16
- data/db/migrations/20130312211408_initial.rb +0 -22
- data/lib/cloud/warden/device_pool.rb +0 -36
- data/lib/cloud/warden/models/disk.rb +0 -5
- data/lib/cloud/warden/models/vm.rb +0 -5
data/README
CHANGED
@@ -2,15 +2,7 @@
|
|
2
2
|
|
3
3
|
# Running with Vagrant up
|
4
4
|
|
5
|
-
|
5
|
+
Now we use bosh-lite to bootstrap the local vm with warden cpi enabled:
|
6
|
+
here is howto:
|
6
7
|
|
7
|
-
|
8
|
-
librarian-chef install
|
9
|
-
|
10
|
-
|
11
|
-
Later on, after the vagrant box is set up and the director + Warden is successfully running,
|
12
|
-
you'll probably have to download a stemcell from s3 and then upload it to the director.
|
13
|
-
|
14
|
-
we need a warden-friendly stemcell that includes the director. micro_bosh stemcell did not work. Let's try vsphere next
|
15
|
-
|
16
|
-
No worries, we'll soon work this into the vagrantfile
|
8
|
+
https://github.com/cloudfoundry/bosh-lite/blob/master/README.md
|
data/lib/cloud/warden/cloud.rb
CHANGED
@@ -1,17 +1,13 @@
|
|
1
1
|
module Bosh::WardenCloud
|
2
2
|
class Cloud < Bosh::Cloud
|
3
3
|
|
4
|
-
include Helpers
|
4
|
+
include Bosh::WardenCloud::Helpers
|
5
5
|
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
DEFAULT_POOL_START_NUMBER = 10
|
12
|
-
DEFAULT_DEVICE_PREFIX = "/dev/sd"
|
13
|
-
|
14
|
-
DEFAULT_SETTINGS_FILE = "/var/vcap/bosh/settings.json"
|
6
|
+
DEFAULT_STEMCELL_ROOT = '/var/vcap/stemcell'
|
7
|
+
DEFAULT_DISK_ROOT = '/var/vcap/store/disk'
|
8
|
+
DEFAULT_FS_TYPE = 'ext4'
|
9
|
+
DEFAULT_WARDEN_DEV_ROOT = '/warden-cpi-dev'
|
10
|
+
DEFAULT_WARDEN_SOCK = '/tmp/warden.sock'
|
15
11
|
|
16
12
|
attr_accessor :logger
|
17
13
|
|
@@ -22,47 +18,28 @@ module Bosh::WardenCloud
|
|
22
18
|
def initialize(options)
|
23
19
|
@logger = Bosh::Clouds::Config.logger
|
24
20
|
|
25
|
-
@agent_properties = options
|
26
|
-
@warden_properties = options
|
27
|
-
@stemcell_properties = options
|
28
|
-
@disk_properties = options
|
29
|
-
|
30
|
-
setup_warden
|
31
|
-
setup_stemcell
|
32
|
-
setup_disk
|
21
|
+
@agent_properties = options.fetch('agent', {})
|
22
|
+
@warden_properties = options.fetch('warden', {})
|
23
|
+
@stemcell_properties = options.fetch('stemcell', {})
|
24
|
+
@disk_properties = options.fetch('disk', {})
|
33
25
|
|
34
|
-
|
26
|
+
setup_path
|
27
|
+
@disk_utils = DiskUtils.new(@disk_root, @stemcell_root, @fs_type)
|
35
28
|
end
|
36
29
|
|
37
30
|
##
|
38
31
|
# Create a stemcell using stemcell image
|
39
|
-
# This method simply untar the stemcell image to a local directory. Warden
|
40
|
-
# can use the rootfs within the image as a base fs.
|
41
32
|
# @param [String] image_path local path to a stemcell image
|
42
33
|
# @param [Hash] cloud_properties not used
|
43
34
|
# return [String] stemcell id
|
44
35
|
def create_stemcell(image_path, cloud_properties)
|
45
36
|
not_used(cloud_properties)
|
46
|
-
|
47
|
-
stemcell_id = uuid("stemcell")
|
48
|
-
stemcell_dir = stemcell_path(stemcell_id)
|
49
|
-
|
37
|
+
stemcell_id = uuid('stemcell')
|
50
38
|
with_thread_name("create_stemcell(#{image_path}, _)") do
|
51
|
-
|
52
|
-
|
53
|
-
@logger.info("Extracting stemcell from #{image_path} to #{stemcell_dir}")
|
54
|
-
FileUtils.mkdir_p(stemcell_dir)
|
55
|
-
|
56
|
-
# This command needs priviledge because the stemcell contains device files,
|
57
|
-
# which cannot be untared without priviledge
|
58
|
-
sudo "tar -C #{stemcell_dir} -xzf #{image_path} 2>&1"
|
59
|
-
|
39
|
+
@logger.info("Extracting stemcell from #{image_path} for #{stemcell_id}")
|
40
|
+
@disk_utils.stemcell_unpack(image_path, stemcell_id)
|
60
41
|
stemcell_id
|
61
42
|
end
|
62
|
-
rescue => e
|
63
|
-
sudo "rm -rf #{stemcell_dir}"
|
64
|
-
|
65
|
-
raise e
|
66
43
|
end
|
67
44
|
|
68
45
|
##
|
@@ -71,9 +48,7 @@ module Bosh::WardenCloud
|
|
71
48
|
# @return [void]
|
72
49
|
def delete_stemcell(stemcell_id)
|
73
50
|
with_thread_name("delete_stemcell(#{stemcell_id}, _)") do
|
74
|
-
|
75
|
-
sudo "rm -rf #{stemcell_dir}"
|
76
|
-
|
51
|
+
@disk_utils.stemcell_delete(stemcell_id)
|
77
52
|
nil
|
78
53
|
end
|
79
54
|
end
|
@@ -91,79 +66,40 @@ module Bosh::WardenCloud
|
|
91
66
|
# @param [optional, Hash] env environment that will be passed to this vm
|
92
67
|
# @return [String] vm_id
|
93
68
|
def create_vm(agent_id, stemcell_id, resource_pool,
|
94
|
-
networks, disk_locality = nil,
|
69
|
+
networks, disk_locality = nil, environment = nil)
|
95
70
|
not_used(resource_pool)
|
96
71
|
not_used(disk_locality)
|
97
|
-
not_used(env)
|
98
|
-
|
99
|
-
vm = nil
|
100
72
|
|
73
|
+
vm_handle = nil
|
101
74
|
with_thread_name("create_vm(#{agent_id}, #{stemcell_id}, #{networks})") do
|
75
|
+
stemcell_path = @disk_utils.stemcell_path(stemcell_id)
|
76
|
+
vm_id = uuid('vm')
|
102
77
|
|
103
|
-
|
104
|
-
|
105
|
-
if networks.size > 1
|
106
|
-
raise ArgumentError, "Not support more than 1 nics"
|
107
|
-
end
|
108
|
-
|
109
|
-
unless Dir.exist?(stemcell_path)
|
110
|
-
cloud_error("Cannot find Stemcell(#{stemcell_id})")
|
111
|
-
end
|
112
|
-
|
113
|
-
vm = Models::VM.create
|
78
|
+
raise ArgumentError, 'Not support more than 1 nics' if networks.size > 1
|
79
|
+
cloud_error("Cannot find Stemcell(#{stemcell_id})") unless Dir.exist?(stemcell_path)
|
114
80
|
|
115
81
|
# Create Container
|
116
|
-
|
82
|
+
vm_handle = with_warden do |client|
|
117
83
|
request = Warden::Protocol::CreateRequest.new
|
84
|
+
request.handle = vm_id
|
118
85
|
request.rootfs = stemcell_path
|
119
|
-
if networks.first[1][
|
120
|
-
request.network = networks.first[1][
|
86
|
+
if networks.first[1]['type'] != 'dynamic'
|
87
|
+
request.network = networks.first[1]['ip']
|
121
88
|
end
|
122
|
-
|
89
|
+
request.bind_mounts = bind_mount_prepare(vm_id)
|
123
90
|
response = client.call(request)
|
124
91
|
response.handle
|
125
92
|
end
|
126
|
-
vm
|
93
|
+
cloud_error("Cannot create vm with given handle #{vm_id}") unless vm_handle == vm_id
|
127
94
|
|
128
95
|
# Agent settings
|
129
|
-
env = generate_agent_env(
|
130
|
-
set_agent_env(
|
131
|
-
|
132
|
-
|
133
|
-
#
|
134
|
-
# Warden has a default white list for devices. By default, all the loop
|
135
|
-
# devices cannot be read/written/mknod. We don't want to change the
|
136
|
-
# warden behavior, so we just manipulate the container cgroup directly.
|
137
|
-
sudo "bash -c 'echo \"b 7:* rwm\" > /tmp/warden/cgroup/devices/instance-#{handle}/devices.allow'"
|
138
|
-
|
139
|
-
# Start bosh agent
|
140
|
-
with_warden do |client|
|
141
|
-
request = Warden::Protocol::SpawnRequest.new
|
142
|
-
request.handle = handle
|
143
|
-
request.privileged = true
|
144
|
-
request.script = "/usr/sbin/runsvdir-start"
|
145
|
-
|
146
|
-
client.call(request)
|
147
|
-
end
|
148
|
-
|
149
|
-
# Save to DB
|
150
|
-
vm.save
|
151
|
-
|
152
|
-
vm.id.to_s
|
96
|
+
env = generate_agent_env(vm_id, agent_id, networks, environment)
|
97
|
+
set_agent_env(vm_id, env)
|
98
|
+
start_agent(vm_id)
|
99
|
+
vm_id
|
153
100
|
end
|
154
101
|
rescue => e
|
155
|
-
if
|
156
|
-
if vm.container_id
|
157
|
-
with_warden do |client|
|
158
|
-
request = Warden::Protocol::DestroyRequest.new
|
159
|
-
request.handle = vm.container_id
|
160
|
-
|
161
|
-
client.call(request)
|
162
|
-
end
|
163
|
-
end
|
164
|
-
|
165
|
-
vm.destroy
|
166
|
-
end
|
102
|
+
destroy_container(vm_handle) if vm_handle
|
167
103
|
raise e
|
168
104
|
end
|
169
105
|
|
@@ -174,25 +110,39 @@ module Bosh::WardenCloud
|
|
174
110
|
# @return [void]
|
175
111
|
def delete_vm(vm_id)
|
176
112
|
with_thread_name("delete_vm(#{vm_id})") do
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
container_id = vm.container_id
|
183
|
-
|
184
|
-
vm.destroy
|
185
|
-
|
186
|
-
with_warden do |client|
|
187
|
-
request = Warden::Protocol::DestroyRequest.new
|
188
|
-
request.handle = container_id
|
189
|
-
|
190
|
-
client.call(request)
|
113
|
+
if has_vm?(vm_id)
|
114
|
+
destroy_container(vm_id)
|
115
|
+
vm_bind_mount = File.join(@bind_mount_points, vm_id)
|
116
|
+
sudo "umount #{vm_bind_mount}"
|
191
117
|
end
|
192
118
|
|
119
|
+
ephemeral_mount = File.join(@ephemeral_mount_points, vm_id)
|
120
|
+
sudo "rm -rf #{ephemeral_mount}"
|
121
|
+
vm_bind_mount = File.join(@bind_mount_points, vm_id)
|
122
|
+
sudo "rm -rf #{vm_bind_mount}"
|
193
123
|
nil
|
194
124
|
end
|
125
|
+
end
|
126
|
+
|
127
|
+
##
|
128
|
+
# Checks if a VM exists
|
129
|
+
#
|
130
|
+
# @param [String] vm_id vm id
|
131
|
+
# @return [Boolean] True if the vm exists
|
195
132
|
|
133
|
+
def has_vm?(vm_id)
|
134
|
+
with_thread_name("has_vm(#{vm_id})") do
|
135
|
+
result = false
|
136
|
+
handles = with_warden do |client|
|
137
|
+
request = Warden::Protocol::ListRequest.new
|
138
|
+
response = client.call(request)
|
139
|
+
response.handles
|
140
|
+
end
|
141
|
+
unless handles.nil?
|
142
|
+
result = handles.include?(vm_id)
|
143
|
+
end
|
144
|
+
result
|
145
|
+
end
|
196
146
|
end
|
197
147
|
|
198
148
|
def reboot_vm(vm_id)
|
@@ -212,45 +162,11 @@ module Bosh::WardenCloud
|
|
212
162
|
# @return [String] disk id
|
213
163
|
def create_disk(size, vm_locality = nil)
|
214
164
|
not_used(vm_locality)
|
215
|
-
|
216
|
-
disk = nil
|
217
|
-
number = nil
|
218
|
-
image_file = nil
|
219
|
-
|
220
|
-
raise ArgumentError, "disk size <= 0" unless size > 0
|
221
|
-
|
222
165
|
with_thread_name("create_disk(#{size}, _)") do
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
FileUtils.touch(image_file)
|
228
|
-
File.truncate(image_file, size << 20) # 1 MB == 1<<20 Byte
|
229
|
-
sh "mkfs -t #{@fs_type} -F #{image_file} 2>&1"
|
230
|
-
|
231
|
-
# Get a device number from the pool
|
232
|
-
number = @pool.acquire
|
233
|
-
cloud_error("Failed to fetch device number") unless number
|
234
|
-
|
235
|
-
# Attach image file to the device
|
236
|
-
sudo "losetup /dev/loop#{number} #{image_file}"
|
237
|
-
|
238
|
-
disk.image_path = image_file
|
239
|
-
disk.device_num = number
|
240
|
-
disk.attached = false
|
241
|
-
disk.save
|
242
|
-
|
243
|
-
disk.id.to_s
|
166
|
+
disk_id = uuid('disk')
|
167
|
+
@disk_utils.create_disk(disk_id, size)
|
168
|
+
disk_id
|
244
169
|
end
|
245
|
-
rescue => e
|
246
|
-
if number
|
247
|
-
sudo "losetup -d /dev/loop#{number}"
|
248
|
-
@pool.release(number)
|
249
|
-
end
|
250
|
-
FileUtils.rm_f image_file if image_file
|
251
|
-
disk.destroy if disk
|
252
|
-
|
253
|
-
raise e
|
254
170
|
end
|
255
171
|
|
256
172
|
##
|
@@ -260,23 +176,8 @@ module Bosh::WardenCloud
|
|
260
176
|
# @return [void]
|
261
177
|
def delete_disk(disk_id)
|
262
178
|
with_thread_name("delete_disk(#{disk_id})") do
|
263
|
-
disk
|
264
|
-
|
265
|
-
cloud_error("Cannot find disk #{disk_id}") unless disk
|
266
|
-
cloud_error("Cannot delete attached disk") if disk.attached
|
267
|
-
|
268
|
-
# Detach image file from loop device
|
269
|
-
sudo "losetup -d /dev/loop#{disk.device_num}"
|
270
|
-
|
271
|
-
# Release the device number back to pool
|
272
|
-
@pool.release(disk.device_num)
|
273
|
-
|
274
|
-
# Delete DB entry
|
275
|
-
disk.destroy
|
276
|
-
|
277
|
-
# Remove image file
|
278
|
-
FileUtils.rm_f image_path(disk_id)
|
279
|
-
|
179
|
+
cloud_error("Cannot find disk #{disk_id}") unless has_disk?(disk_id)
|
180
|
+
@disk_utils.delete_disk(disk_id)
|
280
181
|
nil
|
281
182
|
end
|
282
183
|
end
|
@@ -289,38 +190,17 @@ module Bosh::WardenCloud
|
|
289
190
|
# @return nil
|
290
191
|
def attach_disk(vm_id, disk_id)
|
291
192
|
with_thread_name("attach_disk(#{vm_id}, #{disk_id})") do
|
292
|
-
vm
|
293
|
-
disk
|
294
|
-
|
295
|
-
cloud_error("Cannot find vm #{vm_id}") unless vm
|
296
|
-
cloud_error("Cannot find disk #{disk_id}") unless disk
|
297
|
-
cloud_error("Disk #{disk_id} already attached") if disk.attached
|
298
|
-
|
299
|
-
# Create a device file inside warden container
|
300
|
-
script = attach_script(disk.device_num, @device_path_prefix)
|
193
|
+
cloud_error("Cannot find vm #{vm_id}") unless has_vm?(vm_id)
|
194
|
+
cloud_error("Cannot find disk #{disk_id}") unless has_disk?(disk_id)
|
301
195
|
|
302
|
-
|
303
|
-
|
304
|
-
request.handle = vm.container_id
|
305
|
-
request.script = script
|
306
|
-
request.privileged = true
|
307
|
-
|
308
|
-
response = client.call(request)
|
309
|
-
|
310
|
-
stdout = response.stdout || ""
|
311
|
-
stdout.strip
|
312
|
-
end
|
196
|
+
vm_bind_mount = File.join(@bind_mount_points, vm_id)
|
197
|
+
disk_dir = File.join(vm_bind_mount, disk_id)
|
313
198
|
|
199
|
+
@disk_utils.mount_disk(disk_dir, disk_id)
|
314
200
|
# Save device path into agent env settings
|
315
|
-
env = get_agent_env(
|
316
|
-
env[
|
317
|
-
set_agent_env(
|
318
|
-
|
319
|
-
# Save DB entry
|
320
|
-
disk.device_path = device_path
|
321
|
-
disk.attached = true
|
322
|
-
disk.vm = vm
|
323
|
-
disk.save
|
201
|
+
env = get_agent_env(vm_id)
|
202
|
+
env['disks']['persistent'][disk_id] = File.join(@warden_dev_root, disk_id)
|
203
|
+
set_agent_env(vm_id, env)
|
324
204
|
|
325
205
|
nil
|
326
206
|
end
|
@@ -334,38 +214,19 @@ module Bosh::WardenCloud
|
|
334
214
|
# @return nil
|
335
215
|
def detach_disk(vm_id, disk_id)
|
336
216
|
with_thread_name("detach_disk(#{vm_id}, #{disk_id})") do
|
337
|
-
vm = Models::VM[vm_id.to_i]
|
338
|
-
disk = Models::Disk[disk_id.to_i]
|
339
217
|
|
340
|
-
cloud_error("Cannot find vm #{vm_id}") unless
|
341
|
-
cloud_error("Cannot find disk #{disk_id}") unless
|
342
|
-
cloud_error("Disk #{disk_id} not attached") unless disk.attached
|
218
|
+
cloud_error("Cannot find vm #{vm_id}") unless has_vm?(vm_id)
|
219
|
+
cloud_error("Cannot find disk #{disk_id}") unless has_disk?(disk_id)
|
343
220
|
|
344
|
-
|
345
|
-
device_path =
|
221
|
+
vm_bind_mount = File.join(@bind_mount_points, vm_id)
|
222
|
+
device_path = File.join(vm_bind_mount, disk_id)
|
346
223
|
|
224
|
+
# umount the image file
|
225
|
+
@disk_utils.umount_disk(device_path)
|
347
226
|
# Save device path into agent env settings
|
348
|
-
env = get_agent_env(
|
349
|
-
env[
|
350
|
-
set_agent_env(
|
351
|
-
|
352
|
-
# Save DB entry
|
353
|
-
disk.attached = false
|
354
|
-
disk.device_path = nil
|
355
|
-
disk.vm = nil
|
356
|
-
disk.save
|
357
|
-
|
358
|
-
# Remove the device file and partition file inside warden container
|
359
|
-
script = "rm #{partition_path(device_path)} #{device_path}"
|
360
|
-
|
361
|
-
with_warden do |client|
|
362
|
-
request = Warden::Protocol::RunRequest.new
|
363
|
-
request.handle = vm.container_id
|
364
|
-
request.script = script
|
365
|
-
request.privileged = true
|
366
|
-
|
367
|
-
client.call(request)
|
368
|
-
end
|
227
|
+
env = get_agent_env(vm_id)
|
228
|
+
env['disks']['persistent'][disk_id] = nil
|
229
|
+
set_agent_env(vm_id, env)
|
369
230
|
|
370
231
|
nil
|
371
232
|
end
|
@@ -377,124 +238,56 @@ module Bosh::WardenCloud
|
|
377
238
|
|
378
239
|
private
|
379
240
|
|
380
|
-
def
|
381
|
-
|
382
|
-
end
|
383
|
-
|
384
|
-
def stemcell_path(stemcell_id)
|
385
|
-
File.join(@stemcell_root, stemcell_id)
|
386
|
-
end
|
387
|
-
|
388
|
-
def image_path(disk_id)
|
389
|
-
File.join(@disk_root, "#{disk_id}.img")
|
241
|
+
def has_disk?(disk_id)
|
242
|
+
@disk_utils.disk_exist?(disk_id)
|
390
243
|
end
|
391
244
|
|
392
|
-
def
|
393
|
-
|
394
|
-
end
|
395
|
-
|
396
|
-
def setup_stemcell
|
397
|
-
@stemcell_root = @stemcell_properties["root"] || DEFAULT_STEMCELL_ROOT
|
398
|
-
|
399
|
-
FileUtils.mkdir_p(@stemcell_root)
|
400
|
-
end
|
401
|
-
|
402
|
-
def setup_disk
|
403
|
-
@disk_root = @disk_properties["root"] || DEFAULT_DISK_ROOT
|
404
|
-
@fs_type = @disk_properties["fs"] || DEFAULT_FS_TYPE
|
405
|
-
@pool_size = @disk_properties["pool_count"] || DEFAULT_POOL_SIZE
|
406
|
-
@pool_start_number = @disk_properties["pool_start_number"] || DEFAULT_POOL_START_NUMBER
|
407
|
-
@device_path_prefix = @disk_properties["device_path_prefix"] || DEFAULT_DEVICE_PREFIX
|
408
|
-
|
409
|
-
FileUtils.mkdir_p(@disk_root)
|
410
|
-
end
|
411
|
-
|
412
|
-
def setup_pool
|
413
|
-
@pool = DevicePool.new(@pool_size) { |i| i + @pool_start_number }
|
414
|
-
|
415
|
-
occupied_numbers = Models::Disk.collect { |disk| disk.device_num }
|
416
|
-
@pool.delete_if do |i|
|
417
|
-
occupied_numbers.include? i
|
418
|
-
end
|
419
|
-
|
420
|
-
# Initialize the loop devices
|
421
|
-
last = @pool_start_number + @pool_size - 1
|
422
|
-
@pool_start_number.upto(last) do |i|
|
423
|
-
sudo "mknod /dev/loop#{i} b 7 #{i}" unless File.exists? "/dev/loop#{i}"
|
424
|
-
end
|
245
|
+
def not_used(*arg)
|
246
|
+
# no-op
|
425
247
|
end
|
426
248
|
|
427
|
-
def
|
428
|
-
|
429
|
-
|
249
|
+
def setup_path
|
250
|
+
@warden_unix_path = @warden_properties.fetch('unix_domain_path', DEFAULT_WARDEN_SOCK)
|
251
|
+
@warden_dev_root = @disk_properties.fetch('warden_dev_root', DEFAULT_WARDEN_DEV_ROOT)
|
252
|
+
@stemcell_root = @stemcell_properties.fetch('root', DEFAULT_STEMCELL_ROOT)
|
430
253
|
|
431
|
-
|
254
|
+
@disk_root = @disk_properties.fetch('root', DEFAULT_DISK_ROOT)
|
255
|
+
@fs_type = @disk_properties.fetch('fs', DEFAULT_FS_TYPE)
|
432
256
|
|
433
|
-
|
434
|
-
|
435
|
-
client.disconnect if client
|
257
|
+
@bind_mount_points = File.join(@disk_root, 'bind_mount_points')
|
258
|
+
@ephemeral_mount_points = File.join(@disk_root, 'ephemeral_mount_point')
|
436
259
|
end
|
437
260
|
|
438
|
-
def
|
439
|
-
|
440
|
-
|
261
|
+
def bind_mount_prepare(vm_id)
|
262
|
+
vm_bind_mount = File.join(@bind_mount_points, vm_id)
|
263
|
+
FileUtils.mkdir_p(vm_bind_mount)
|
264
|
+
vm_ephemeral_mount = File.join(@ephemeral_mount_points, vm_id)
|
265
|
+
FileUtils.mkdir_p(vm_ephemeral_mount)
|
441
266
|
|
442
|
-
|
443
|
-
|
444
|
-
|
445
|
-
|
446
|
-
}
|
447
|
-
|
448
|
-
env = {
|
449
|
-
"vm" => vm_env,
|
450
|
-
"agent_id" => agent_id,
|
451
|
-
"networks" => networks,
|
452
|
-
"disks" => { "persistent" => {} },
|
453
|
-
}
|
454
|
-
env.merge!(@agent_properties)
|
455
|
-
env
|
456
|
-
end
|
267
|
+
# Make the bind mount point shareable
|
268
|
+
sudo "mount --bind #{vm_bind_mount} #{vm_bind_mount}"
|
269
|
+
sudo "mount --make-unbindable #{vm_bind_mount}"
|
270
|
+
sudo "mount --make-shared #{vm_bind_mount}"
|
457
271
|
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
request.privileged = true
|
463
|
-
request.script = "cat #{agent_settings_file}"
|
272
|
+
bind_mount = Warden::Protocol::CreateRequest::BindMount.new
|
273
|
+
bind_mount.src_path = vm_bind_mount
|
274
|
+
bind_mount.dst_path = @warden_dev_root
|
275
|
+
bind_mount.mode = Warden::Protocol::CreateRequest::BindMount::Mode::RW
|
464
276
|
|
465
|
-
|
466
|
-
|
277
|
+
ephemeral_mount = Warden::Protocol::CreateRequest::BindMount.new
|
278
|
+
ephemeral_mount.src_path = vm_ephemeral_mount
|
279
|
+
ephemeral_mount.dst_path = '/var/vcap/data'
|
280
|
+
ephemeral_mount.mode = Warden::Protocol::CreateRequest::BindMount::Mode::RW
|
467
281
|
|
468
|
-
|
469
|
-
env
|
282
|
+
return [bind_mount, ephemeral_mount]
|
470
283
|
end
|
471
284
|
|
472
|
-
def
|
473
|
-
tempfile = Tempfile.new("settings")
|
474
|
-
tempfile.write(Yajl::Encoder.encode(env))
|
475
|
-
tempfile.close
|
476
|
-
|
477
|
-
tempfile_in = "/tmp/#{rand(100000)}"
|
478
|
-
|
479
|
-
# Here we copy the setting file to temp file in container, then mv it to
|
480
|
-
# /var/vcap/bosh by privileged user.
|
285
|
+
def destroy_container(container_id)
|
481
286
|
with_warden do |client|
|
482
|
-
request = Warden::Protocol::
|
483
|
-
request.handle =
|
484
|
-
request.src_path = tempfile.path
|
485
|
-
request.dst_path = tempfile_in
|
486
|
-
|
487
|
-
client.call(request)
|
488
|
-
|
489
|
-
request = Warden::Protocol::RunRequest.new
|
490
|
-
request.handle = handle
|
491
|
-
request.privileged = true
|
492
|
-
request.script = "mv #{tempfile_in} #{agent_settings_file}"
|
493
|
-
|
287
|
+
request = Warden::Protocol::DestroyRequest.new
|
288
|
+
request.handle = container_id
|
494
289
|
client.call(request)
|
495
290
|
end
|
496
|
-
|
497
|
-
tempfile.unlink
|
498
291
|
end
|
499
292
|
|
500
293
|
end
|