vcap_services_base 0.2.10
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/lib/base/abstract.rb +11 -0
- data/lib/base/api/message.rb +31 -0
- data/lib/base/asynchronous_service_gateway.rb +529 -0
- data/lib/base/backup.rb +206 -0
- data/lib/base/barrier.rb +54 -0
- data/lib/base/base.rb +159 -0
- data/lib/base/base_async_gateway.rb +164 -0
- data/lib/base/base_job.rb +5 -0
- data/lib/base/catalog_manager_base.rb +67 -0
- data/lib/base/catalog_manager_v1.rb +225 -0
- data/lib/base/catalog_manager_v2.rb +291 -0
- data/lib/base/cloud_controller_services.rb +75 -0
- data/lib/base/datamapper_l.rb +148 -0
- data/lib/base/gateway.rb +167 -0
- data/lib/base/gateway_service_catalog.rb +68 -0
- data/lib/base/http_handler.rb +101 -0
- data/lib/base/job/async_job.rb +71 -0
- data/lib/base/job/config.rb +27 -0
- data/lib/base/job/lock.rb +153 -0
- data/lib/base/job/package.rb +112 -0
- data/lib/base/job/serialization.rb +365 -0
- data/lib/base/job/snapshot.rb +354 -0
- data/lib/base/node.rb +471 -0
- data/lib/base/node_bin.rb +154 -0
- data/lib/base/plan.rb +63 -0
- data/lib/base/provisioner.rb +1120 -0
- data/lib/base/provisioner_v1.rb +125 -0
- data/lib/base/provisioner_v2.rb +193 -0
- data/lib/base/service.rb +93 -0
- data/lib/base/service_advertiser.rb +184 -0
- data/lib/base/service_error.rb +122 -0
- data/lib/base/service_message.rb +94 -0
- data/lib/base/service_plan_change_set.rb +11 -0
- data/lib/base/simple_aop.rb +63 -0
- data/lib/base/snapshot_v2/snapshot.rb +227 -0
- data/lib/base/snapshot_v2/snapshot_client.rb +158 -0
- data/lib/base/snapshot_v2/snapshot_job.rb +95 -0
- data/lib/base/utils.rb +63 -0
- data/lib/base/version.rb +7 -0
- data/lib/base/warden/instance_utils.rb +161 -0
- data/lib/base/warden/node_utils.rb +205 -0
- data/lib/base/warden/service.rb +426 -0
- data/lib/base/worker_bin.rb +76 -0
- data/lib/vcap_services_base.rb +16 -0
- metadata +364 -0
@@ -0,0 +1,365 @@
|
|
1
|
+
# Copyright (c) 2009-2011 VMware, Inc.
|
2
|
+
require "resque-status"
|
3
|
+
require "fileutils"
|
4
|
+
require "tmpdir"
|
5
|
+
require "curb"
|
6
|
+
|
7
|
+
$LOAD_PATH.unshift File.dirname(__FILE__)
|
8
|
+
require "snapshot"
|
9
|
+
|
10
|
+
$LOAD_PATH.unshift File.join(File.dirname(__FILE__), '..')
|
11
|
+
require "service_error"
|
12
|
+
|
13
|
+
module VCAP::Services::Base::AsyncJob
|
14
|
+
module Serialization
|
15
|
+
SERIALIZATION_KEY_PREFIX = "vcap:serialization".freeze
|
16
|
+
|
17
|
+
class << self
|
18
|
+
attr_reader :redis
|
19
|
+
|
20
|
+
def redis_connect
|
21
|
+
@redis = ::Redis.new(Config.redis_config)
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
def redis_key(key)
|
26
|
+
"#{SERIALIZATION_KEY_PREFIX}:#{key}"
|
27
|
+
end
|
28
|
+
|
29
|
+
def fmt_error(e)
|
30
|
+
"#{e}: [#{e.backtrace.join(" | ")}]"
|
31
|
+
end
|
32
|
+
|
33
|
+
class SerializationJob
|
34
|
+
attr_reader :name
|
35
|
+
|
36
|
+
include Serialization
|
37
|
+
include Snapshot
|
38
|
+
include Resque::Plugins::Status
|
39
|
+
include VCAP::Services::Base::Error
|
40
|
+
|
41
|
+
class << self
|
42
|
+
|
43
|
+
def queue_lookup_key
|
44
|
+
:node_id
|
45
|
+
end
|
46
|
+
|
47
|
+
def select_queue(*args)
|
48
|
+
result = nil
|
49
|
+
args.each do |arg|
|
50
|
+
result = arg[queue_lookup_key]if (arg.is_a? Hash )&& (arg.has_key?(queue_lookup_key))
|
51
|
+
end
|
52
|
+
@logger = Config.logger
|
53
|
+
@logger.info("Select queue #{result} for job #{self.class} with args:#{args.inspect}") if @logger
|
54
|
+
result
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
def initialize(*args)
|
59
|
+
super(*args)
|
60
|
+
parse_config
|
61
|
+
init_worker_logger
|
62
|
+
Serialization.redis_connect
|
63
|
+
Snapshot.redis_connect
|
64
|
+
end
|
65
|
+
|
66
|
+
def create_lock
|
67
|
+
lock_name = "lock:lifecycle:#{name}"
|
68
|
+
ttl = @config["job_ttl"] || 600
|
69
|
+
lock = Lock.new(lock_name, :logger => @logger, :ttl => ttl)
|
70
|
+
lock
|
71
|
+
end
|
72
|
+
|
73
|
+
def init_worker_logger
|
74
|
+
@logger = Config.logger
|
75
|
+
end
|
76
|
+
|
77
|
+
def handle_error(e)
|
78
|
+
@logger.error("Error in #{self.class} uuid:#{@uuid}: #{fmt_error(e)}")
|
79
|
+
err = (e.instance_of?(ServiceError)? e : ServiceError.new(ServiceError::INTERNAL_ERROR)).to_hash
|
80
|
+
err_msg = Yajl::Encoder.encode(err["msg"])
|
81
|
+
failed(err_msg)
|
82
|
+
end
|
83
|
+
|
84
|
+
def required_options(*args)
|
85
|
+
missing_opts = args.select{|arg| !options.has_key? arg.to_s}
|
86
|
+
raise ArgumentError, "Missing #{missing_opts.join(', ')} in options: #{options.inspect}" unless missing_opts.empty?
|
87
|
+
end
|
88
|
+
|
89
|
+
# Update the download token for a service snapshot
|
90
|
+
def update_download_token(name, snapshot_id, token)
|
91
|
+
snapshot = snapshot_details(name, snapshot_id)
|
92
|
+
snapshot["token"] = token
|
93
|
+
save_snapshot(name, snapshot)
|
94
|
+
end
|
95
|
+
|
96
|
+
def delete_download_token(name, snapshot_id)
|
97
|
+
snapshot = snapshot_details(name, snapshot_id)
|
98
|
+
res = snapshot.delete("token")
|
99
|
+
save_snapshot(name, snapshot) if res
|
100
|
+
end
|
101
|
+
|
102
|
+
def parse_config
|
103
|
+
@config = Yajl::Parser.parse(ENV['WORKER_CONFIG'])
|
104
|
+
raise "Need environment variable: WORKER_CONFIG" unless @config
|
105
|
+
end
|
106
|
+
|
107
|
+
# Validate the serialized data file.
|
108
|
+
# Sub class should override this method to supply specific validation.
|
109
|
+
def validate_input(files, manifest)
|
110
|
+
raise "Doesn't contains any snapshot file." if files.empty?
|
111
|
+
true
|
112
|
+
end
|
113
|
+
|
114
|
+
# The name for the saved snapshot file. Subclass can override this method to customize file name.
|
115
|
+
def snapshot_filename(name, snapshot_id)
|
116
|
+
"#{name}.gz"
|
117
|
+
end
|
118
|
+
|
119
|
+
def get_dump_path(name, snapshot_id)
|
120
|
+
snapshot_filepath(@config["snapshots_base_dir"], @config["service_name"], name, snapshot_id)
|
121
|
+
end
|
122
|
+
|
123
|
+
private
|
124
|
+
# validate a package file, subclass should not override this method since packaging is transparent.
|
125
|
+
# To implement customized validation, see +validate_input+
|
126
|
+
def validate_package package_file
|
127
|
+
temp_dir = Dir.mktmpdir(@config["tmp_dir"])
|
128
|
+
|
129
|
+
package = Package.load(package_file)
|
130
|
+
manifest = package.manifest
|
131
|
+
files = package.unpack(temp_dir)
|
132
|
+
|
133
|
+
result = validate_input(files, manifest)
|
134
|
+
result
|
135
|
+
rescue => e
|
136
|
+
@logger.error("Failed to validate package file:#{e}")
|
137
|
+
nil
|
138
|
+
ensure
|
139
|
+
FileUtils.rm_rf(temp_dir) if temp_dir
|
140
|
+
end
|
141
|
+
end
|
142
|
+
|
143
|
+
# Generate download URL for a service snapshot
|
144
|
+
class BaseCreateSerializedURLJob < SerializationJob
|
145
|
+
VALID_CREDENTIAL_CHARACTERS = ("A".."Z").to_a + ("a".."z").to_a + ("0".."9").to_a
|
146
|
+
|
147
|
+
# workflow template
|
148
|
+
def perform
|
149
|
+
begin
|
150
|
+
required_options :service_id, :snapshot_id
|
151
|
+
@name = options["service_id"]
|
152
|
+
@snapshot_id = options["snapshot_id"]
|
153
|
+
@logger.info("Launch job: #{self.class} for #{name} with options:#{options.inspect}")
|
154
|
+
|
155
|
+
lock = create_lock
|
156
|
+
lock.lock do
|
157
|
+
result = execute
|
158
|
+
@logger.info("Results of create serialized url: #{result}")
|
159
|
+
|
160
|
+
token = generate_download_token()
|
161
|
+
update_download_token(name, @snapshot_id, token)
|
162
|
+
url = generate_download_url(name, @snapshot_id, token)
|
163
|
+
@logger.info("Download link generated for snapshot=#{@snapshot_id} of #{name}: #{url}")
|
164
|
+
|
165
|
+
job_result = { :url => url }
|
166
|
+
completed(Yajl::Encoder.encode(job_result))
|
167
|
+
@logger.info("Complete job: #{self.class} for #{name}")
|
168
|
+
end
|
169
|
+
rescue => e
|
170
|
+
cleanup(name, @snapshot_id)
|
171
|
+
handle_error(e)
|
172
|
+
ensure
|
173
|
+
set_status({:complete_time => Time.now.to_s})
|
174
|
+
end
|
175
|
+
end
|
176
|
+
|
177
|
+
# empty
|
178
|
+
def execute
|
179
|
+
true
|
180
|
+
end
|
181
|
+
|
182
|
+
def cleanup(name, snapshot_id)
|
183
|
+
return unless (name && snapshot_id)
|
184
|
+
begin
|
185
|
+
delete_download_token(name, snapshot_id)
|
186
|
+
rescue => e
|
187
|
+
@logger.error("Error in cleanup: #{e}")
|
188
|
+
end
|
189
|
+
end
|
190
|
+
|
191
|
+
def generate_download_token(length=12)
|
192
|
+
Array.new(length) { VALID_CREDENTIAL_CHARACTERS[rand(VALID_CREDENTIAL_CHARACTERS.length)] }.join
|
193
|
+
end
|
194
|
+
|
195
|
+
def generate_download_url(name, snapshot_id, token)
|
196
|
+
url_template = @config["download_url_template"]
|
197
|
+
url_template % {:service => @config["service_name"], :name => name, :snapshot_id => snapshot_id, :token => token}
|
198
|
+
end
|
199
|
+
end
|
200
|
+
|
201
|
+
# Create a new snapshot of service using given URL
|
202
|
+
class BaseImportFromURLJob < SerializationJob
|
203
|
+
attr_reader :url, :snapshot_id
|
204
|
+
|
205
|
+
# Sub class should return true for a successful import job.
|
206
|
+
def perform
|
207
|
+
begin
|
208
|
+
required_options :service_id, :url
|
209
|
+
@name = options["service_id"]
|
210
|
+
@url = options["url"]
|
211
|
+
@logger.info("Launch job: #{self.class} for #{name} with options:#{options.inspect}")
|
212
|
+
|
213
|
+
lock = create_lock
|
214
|
+
lock.lock do
|
215
|
+
quota = @config["snapshot_quota"]
|
216
|
+
if quota
|
217
|
+
current = service_snapshots_count(name)
|
218
|
+
@logger.debug("Current snapshots count for #{name}: #{current}, max: #{quota}")
|
219
|
+
raise ServiceError.new(ServiceError::OVER_QUOTA, name, current, quota) if current >= quota
|
220
|
+
end
|
221
|
+
|
222
|
+
@snapshot_id = new_snapshot_id
|
223
|
+
@snapshot_path = get_dump_path(name, snapshot_id)
|
224
|
+
@snapshot_file = File.join(@snapshot_path, "#{snapshot_id}.zip")
|
225
|
+
|
226
|
+
# clean any data in snapshot folder
|
227
|
+
FileUtils.rm_rf(@snapshot_path)
|
228
|
+
FileUtils.mkdir_p(@snapshot_path)
|
229
|
+
|
230
|
+
fetch_url(url, @snapshot_file)
|
231
|
+
raise ServiceError.new(ServiceError::BAD_SERIALIZED_DATAFILE, url) unless validate_package(@snapshot_file)
|
232
|
+
|
233
|
+
result = execute
|
234
|
+
@logger.info("Results of import from url: #{result}")
|
235
|
+
|
236
|
+
snapshot = {
|
237
|
+
:snapshot_id => snapshot_id,
|
238
|
+
:size => File.open(@snapshot_file) {|f| f.size },
|
239
|
+
:date => fmt_time,
|
240
|
+
:file => "#{snapshot_id}.zip"
|
241
|
+
}
|
242
|
+
save_snapshot(name, snapshot)
|
243
|
+
@logger.info("Create new snapshot for #{name}:#{snapshot}")
|
244
|
+
|
245
|
+
completed(Yajl::Encoder.encode(filter_keys(snapshot)))
|
246
|
+
@logger.info("Complete job: #{self.class} for #{name}")
|
247
|
+
end
|
248
|
+
rescue => e
|
249
|
+
handle_error(e)
|
250
|
+
delete_snapshot(name, snapshot_id) if snapshot_id
|
251
|
+
FileUtils.rm_rf(@snapshot_path) if @snapshot_path
|
252
|
+
ensure
|
253
|
+
set_status({:complete_time => Time.now.to_s})
|
254
|
+
end
|
255
|
+
end
|
256
|
+
|
257
|
+
# Fetch remote uri and stream content to file.
|
258
|
+
def fetch_url(url, file_path)
|
259
|
+
max_download_size = (@config["serialization"] && @config["serialization"]["max_download_size_mb"] || 10).to_i * 1024 * 1024 # 10M by default
|
260
|
+
max_redirects = @config["serialization"] && @config["serialization"]["max_download_redirects"] || 5
|
261
|
+
|
262
|
+
File.open(file_path, "wb+") do |f|
|
263
|
+
c = Curl::Easy.new(url)
|
264
|
+
# force use ipv4 dns
|
265
|
+
c.resolve_mode = :ipv4
|
266
|
+
# auto redirect
|
267
|
+
c.follow_location = true
|
268
|
+
c.max_redirects = max_redirects
|
269
|
+
|
270
|
+
c.on_header do |header|
|
271
|
+
if c.downloaded_content_length > max_download_size
|
272
|
+
raise ServiceError.new(ServiceError::FILESIZE_TOO_LARGE, url, c.downloaded_content_length, max_download_size)
|
273
|
+
end
|
274
|
+
|
275
|
+
header.size
|
276
|
+
end
|
277
|
+
|
278
|
+
bytes_downloaded = 0
|
279
|
+
c.on_body do |data|
|
280
|
+
# calculate bytes downloaded for chucked response
|
281
|
+
bytes_downloaded += data.size
|
282
|
+
if bytes_downloaded > max_download_size
|
283
|
+
raise ServiceError.new(ServiceError::FILESIZE_TOO_LARGE, url, bytes_downloaded, max_download_size)
|
284
|
+
end
|
285
|
+
f.write(data)
|
286
|
+
end
|
287
|
+
|
288
|
+
begin
|
289
|
+
c.perform
|
290
|
+
rescue Curl::Err::TooManyRedirectsError
|
291
|
+
raise ServiceError.new(ServiceError::TOO_MANY_REDIRECTS, url, max_redirects)
|
292
|
+
end
|
293
|
+
end
|
294
|
+
end
|
295
|
+
|
296
|
+
# empty by default
|
297
|
+
def execute
|
298
|
+
true
|
299
|
+
end
|
300
|
+
end
|
301
|
+
|
302
|
+
# Create a new snapshot with the given temp file
|
303
|
+
class BaseImportFromDataJob < SerializationJob
|
304
|
+
attr_reader :temp_file_path, :snapshot_id
|
305
|
+
|
306
|
+
def perform
|
307
|
+
begin
|
308
|
+
required_options :service_id, :temp_file_path
|
309
|
+
@name = options["service_id"]
|
310
|
+
@temp_file_path = options["temp_file_path"]
|
311
|
+
@logger.info("Launch job: #{self.class} for #{name} with options:#{options.inspect}")
|
312
|
+
|
313
|
+
lock = create_lock
|
314
|
+
lock.lock do
|
315
|
+
quota = @config["snapshot_quota"]
|
316
|
+
if quota
|
317
|
+
current = service_snapshots_count(name)
|
318
|
+
@logger.debug("Current snapshots count for #{name}: #{current}, max: #{quota}")
|
319
|
+
raise ServiceError.new(ServiceError::OVER_QUOTA, name, current, quota) if current >= quota
|
320
|
+
end
|
321
|
+
|
322
|
+
raise "Can't find temp file: #{@temp_file_path}" unless File.exists? temp_file_path
|
323
|
+
raise ServiceError.new(ServiceError::BAD_SERIALIZED_DATAFILE, "request") unless validate_package(temp_file_path)
|
324
|
+
|
325
|
+
@snapshot_id = new_snapshot_id
|
326
|
+
@snapshot_path = get_dump_path(name, snapshot_id)
|
327
|
+
@snapshot_file = File.join(@snapshot_path, "#{snapshot_id}.zip")
|
328
|
+
# clean any data in snapshot folder
|
329
|
+
FileUtils.rm_rf(@snapshot_path)
|
330
|
+
FileUtils.mkdir_p(@snapshot_path)
|
331
|
+
|
332
|
+
result = execute
|
333
|
+
@logger.info("Results of import from url: #{result}")
|
334
|
+
|
335
|
+
FileUtils.mv(@temp_file_path, @snapshot_file)
|
336
|
+
|
337
|
+
snapshot = {
|
338
|
+
:snapshot_id => snapshot_id,
|
339
|
+
:size => File.open(@snapshot_file) {|f| f.size },
|
340
|
+
:date => fmt_time,
|
341
|
+
:file => snapshot_filename(name, snapshot_id)
|
342
|
+
}
|
343
|
+
save_snapshot(name, snapshot)
|
344
|
+
@logger.info("Create new snapshot for #{name}:#{snapshot}")
|
345
|
+
|
346
|
+
completed(Yajl::Encoder.encode(filter_keys(snapshot)))
|
347
|
+
@logger.info("Complete job: #{self.class} for #{name}")
|
348
|
+
end
|
349
|
+
rescue => e
|
350
|
+
handle_error(e)
|
351
|
+
delete_snapshot(name, snapshot_id) if snapshot_id
|
352
|
+
FileUtils.rm_rf(@snapshot_path) if @snapshot_path
|
353
|
+
ensure
|
354
|
+
set_status({:complete_time => Time.now.to_s})
|
355
|
+
FileUtils.rm_rf(@temp_file_path) if @temp_file_path
|
356
|
+
end
|
357
|
+
end
|
358
|
+
|
359
|
+
# empty
|
360
|
+
def execute
|
361
|
+
true
|
362
|
+
end
|
363
|
+
end
|
364
|
+
end
|
365
|
+
end
|
@@ -0,0 +1,354 @@
|
|
1
|
+
# Copyright (c) 2009-2011 VMware, Inc.
|
2
|
+
require "resque-status"
|
3
|
+
require "fileutils"
|
4
|
+
|
5
|
+
require_relative "../service_error"
|
6
|
+
require_relative "./package.rb"
|
7
|
+
|
8
|
+
module VCAP::Services::Base::AsyncJob
|
9
|
+
module Snapshot
|
10
|
+
include VCAP::Services::Base::Error
|
11
|
+
|
12
|
+
SNAPSHOT_KEY_PREFIX = "vcap:snapshot".freeze
|
13
|
+
SNAPSHOT_ID = "maxid".freeze
|
14
|
+
FILTER_KEYS = %w(snapshot_id date size name).freeze
|
15
|
+
MAX_NAME_LENGTH = 512
|
16
|
+
|
17
|
+
class << self
|
18
|
+
attr_reader :redis
|
19
|
+
|
20
|
+
def redis_connect
|
21
|
+
@redis = ::Redis.new(Config.redis_config)
|
22
|
+
|
23
|
+
redis_init
|
24
|
+
end
|
25
|
+
|
26
|
+
# initialize necessary keys
|
27
|
+
def redis_init
|
28
|
+
@redis.setnx("#{SNAPSHOT_KEY_PREFIX}:#{SNAPSHOT_ID}", 1)
|
29
|
+
end
|
30
|
+
end
|
31
|
+
|
32
|
+
def client
|
33
|
+
Snapshot.redis
|
34
|
+
end
|
35
|
+
|
36
|
+
# Get all snapshots related to a service instance
|
37
|
+
#
|
38
|
+
def service_snapshots(service_id)
|
39
|
+
return unless service_id
|
40
|
+
res = client.hgetall(redis_key(service_id))
|
41
|
+
res.values.map{|v| Yajl::Parser.parse(v)}
|
42
|
+
end
|
43
|
+
|
44
|
+
# Return total snapshots count
|
45
|
+
#
|
46
|
+
def service_snapshots_count(service_id)
|
47
|
+
return unless service_id
|
48
|
+
client.hlen(redis_key(service_id))
|
49
|
+
end
|
50
|
+
|
51
|
+
# Get detail information for a single snapshot
|
52
|
+
#
|
53
|
+
def snapshot_details(service_id, snapshot_id)
|
54
|
+
return unless service_id && snapshot_id
|
55
|
+
res = client.hget(redis_key(service_id), snapshot_id)
|
56
|
+
raise ServiceError.new(ServiceError::NOT_FOUND, "snapshot #{snapshot_id}") unless res
|
57
|
+
Yajl::Parser.parse(res)
|
58
|
+
end
|
59
|
+
|
60
|
+
# filter internal keys of a given snapshot object, return a new snapshot object in canonical format
|
61
|
+
def filter_keys(snapshot)
|
62
|
+
return unless snapshot.is_a? Hash
|
63
|
+
snapshot.select {|k,v| FILTER_KEYS.include? k.to_s}
|
64
|
+
end
|
65
|
+
|
66
|
+
# Generate a new unique id for a snapshot
|
67
|
+
def new_snapshot_id
|
68
|
+
client.incr(redis_key(SNAPSHOT_ID)).to_s
|
69
|
+
end
|
70
|
+
|
71
|
+
# Get the snapshot file path that service should save the dump file to.
|
72
|
+
# the snapshot path structure looks like <base_dir>\snapshots\<service-name>\<aa>\<bb>\<cc>\<aabbcc-rest-of-instance-guid>\snapshot_id\<service specific data>
|
73
|
+
def snapshot_filepath(base_dir, service_name, service_id, snapshot_id)
|
74
|
+
File.join(base_dir, "snapshots", service_name, service_id[0,2], service_id[2,2], service_id[4,2], service_id, snapshot_id.to_s)
|
75
|
+
end
|
76
|
+
|
77
|
+
# Update the name of given snapshot.
|
78
|
+
# This function is not protected by redis lock so a optimistic lock
|
79
|
+
# is applied to prevent concurrent update.
|
80
|
+
#
|
81
|
+
def update_name(service_id, snapshot_id, name)
|
82
|
+
return unless service_id && snapshot_id && name
|
83
|
+
verify_input_name(name)
|
84
|
+
|
85
|
+
key = redis_key(service_id)
|
86
|
+
# NOTE: idealy should watch on combination of (service_id, snapshot_id)
|
87
|
+
# but current design doesn't support such fine-grained watching.
|
88
|
+
client.watch(key)
|
89
|
+
|
90
|
+
snapshot = client.hget(redis_key(service_id), snapshot_id)
|
91
|
+
return nil unless snapshot
|
92
|
+
snapshot = Yajl::Parser.parse(snapshot)
|
93
|
+
snapshot["name"] = name
|
94
|
+
|
95
|
+
res = client.multi do
|
96
|
+
save_snapshot(service_id, snapshot)
|
97
|
+
end
|
98
|
+
|
99
|
+
unless res
|
100
|
+
raise ServiceError.new(ServiceError::REDIS_CONCURRENT_UPDATE)
|
101
|
+
end
|
102
|
+
true
|
103
|
+
end
|
104
|
+
|
105
|
+
def save_snapshot(service_id , snapshot)
|
106
|
+
return unless service_id && snapshot
|
107
|
+
sid = snapshot[:snapshot_id] || snapshot["snapshot_id"]
|
108
|
+
msg = Yajl::Encoder.encode(snapshot)
|
109
|
+
client.hset(redis_key(service_id), sid, msg)
|
110
|
+
end
|
111
|
+
|
112
|
+
def delete_snapshot(service_id , snapshot_id)
|
113
|
+
return unless service_id && snapshot_id
|
114
|
+
client.hdel(redis_key(service_id), snapshot_id)
|
115
|
+
end
|
116
|
+
|
117
|
+
|
118
|
+
def fmt_time()
|
119
|
+
# UTC time in ISO 8601 format.
|
120
|
+
Time.now.utc.strftime("%FT%TZ")
|
121
|
+
end
|
122
|
+
|
123
|
+
protected
|
124
|
+
|
125
|
+
def redis_key(key)
|
126
|
+
"#{SNAPSHOT_KEY_PREFIX}:#{key}"
|
127
|
+
end
|
128
|
+
|
129
|
+
def verify_input_name(name)
|
130
|
+
return unless name
|
131
|
+
|
132
|
+
raise ServiceError.new(ServiceError::INVALID_SNAPSHOT_NAME,
|
133
|
+
"Input name exceed the max allowed #{MAX_NAME_LENGTH} characters.") if name.size > MAX_NAME_LENGTH
|
134
|
+
|
135
|
+
#TODO: shall we sanitize the input?
|
136
|
+
end
|
137
|
+
|
138
|
+
# common utils for snapshot job
|
139
|
+
class SnapshotJob
|
140
|
+
attr_reader :name, :snapshot_id
|
141
|
+
|
142
|
+
include Snapshot
|
143
|
+
include Resque::Plugins::Status
|
144
|
+
|
145
|
+
class << self
|
146
|
+
def queue_lookup_key
|
147
|
+
:node_id
|
148
|
+
end
|
149
|
+
|
150
|
+
def select_queue(*args)
|
151
|
+
result = nil
|
152
|
+
args.each do |arg|
|
153
|
+
result = arg[queue_lookup_key]if (arg.is_a? Hash)&& (arg.has_key?(queue_lookup_key))
|
154
|
+
end
|
155
|
+
@logger = Config.logger
|
156
|
+
@logger.info("Select queue #{result} for job #{self.class} with args:#{args.inspect}") if @logger
|
157
|
+
result
|
158
|
+
end
|
159
|
+
end
|
160
|
+
|
161
|
+
def initialize(*args)
|
162
|
+
super(*args)
|
163
|
+
parse_config
|
164
|
+
init_worker_logger
|
165
|
+
Snapshot.redis_connect
|
166
|
+
end
|
167
|
+
|
168
|
+
def fmt_error(e)
|
169
|
+
"#{e}: [#{e.backtrace.join(" | ")}]"
|
170
|
+
end
|
171
|
+
|
172
|
+
def init_worker_logger
|
173
|
+
@logger = Config.logger
|
174
|
+
end
|
175
|
+
|
176
|
+
def required_options(*args)
|
177
|
+
missing_opts = args.select{|arg| !options.has_key? arg.to_s}
|
178
|
+
raise ArgumentError, "Missing #{missing_opts.join(', ')} in options: #{options.inspect}" unless missing_opts.empty?
|
179
|
+
end
|
180
|
+
|
181
|
+
def create_lock
|
182
|
+
lock_name = "lock:lifecycle:#{name}"
|
183
|
+
ttl = @config['job_ttl'] || 600
|
184
|
+
lock = Lock.new(lock_name, :logger => @logger, :ttl => ttl)
|
185
|
+
lock
|
186
|
+
end
|
187
|
+
|
188
|
+
def get_dump_path(name, snapshot_id)
|
189
|
+
snapshot_filepath(@config["snapshots_base_dir"], @config["service_name"], name, snapshot_id)
|
190
|
+
end
|
191
|
+
|
192
|
+
def parse_config
|
193
|
+
@config = Yajl::Parser.parse(ENV['WORKER_CONFIG'])
|
194
|
+
raise "Need environment variable: WORKER_CONFIG" unless @config
|
195
|
+
end
|
196
|
+
|
197
|
+
def cleanup(name, snapshot_id)
|
198
|
+
return unless name && snapshot_id
|
199
|
+
@logger.info("Clean up snapshot and files for #{name}, snapshot id: #{snapshot_id}")
|
200
|
+
delete_snapshot(name, snapshot_id)
|
201
|
+
FileUtils.rm_rf(get_dump_path(name, snapshot_id))
|
202
|
+
end
|
203
|
+
|
204
|
+
def handle_error(e)
|
205
|
+
@logger.error("Error in #{self.class} uuid:#{@uuid}: #{fmt_error(e)}")
|
206
|
+
err = (e.instance_of?(ServiceError)? e : ServiceError.new(ServiceError::INTERNAL_ERROR)).to_hash
|
207
|
+
err_msg = Yajl::Encoder.encode(err["msg"])
|
208
|
+
failed(err_msg)
|
209
|
+
end
|
210
|
+
end
|
211
|
+
|
212
|
+
class BaseCreateSnapshotJob < SnapshotJob
|
213
|
+
# workflow template
|
214
|
+
# Sub class should implement execute method which returns hash represents of snapshot like:
|
215
|
+
# {:snapshot_id => 1,
|
216
|
+
# :size => 100,
|
217
|
+
# :files => ["my_snapshot.tgz", "readme.txt"]
|
218
|
+
# :manifest => {:version => '1', :service => 'mysql'}
|
219
|
+
# }
|
220
|
+
def perform
|
221
|
+
begin
|
222
|
+
required_options :service_id
|
223
|
+
@name = options["service_id"]
|
224
|
+
@metadata = VCAP.symbolize_keys(options["metadata"])
|
225
|
+
@logger.info("Launch job: #{self.class} for #{name} with metadata: #{@metadata}")
|
226
|
+
|
227
|
+
@snapshot_id = new_snapshot_id
|
228
|
+
lock = create_lock
|
229
|
+
|
230
|
+
@snapshot_files = []
|
231
|
+
lock.lock do
|
232
|
+
quota = @config["snapshot_quota"]
|
233
|
+
if quota
|
234
|
+
current = service_snapshots_count(name)
|
235
|
+
@logger.debug("Current snapshots count for #{name}: #{current}, max: #{quota}")
|
236
|
+
raise ServiceError.new(ServiceError::OVER_QUOTA, name, current, quota) if current >= quota
|
237
|
+
end
|
238
|
+
|
239
|
+
snapshot = execute
|
240
|
+
snapshot = VCAP.symbolize_keys snapshot
|
241
|
+
snapshot[:manifest] ||= {}
|
242
|
+
snapshot[:manifest].merge! @metadata
|
243
|
+
@logger.info("Results of create snapshot: #{snapshot.inspect}")
|
244
|
+
|
245
|
+
# pack snapshot_file into package
|
246
|
+
dump_path = get_dump_path(name, snapshot_id)
|
247
|
+
FileUtils.mkdir_p(dump_path)
|
248
|
+
package_file = "#{snapshot_id}.zip"
|
249
|
+
|
250
|
+
package = Package.new(File.join(dump_path, package_file))
|
251
|
+
package.manifest = snapshot[:manifest]
|
252
|
+
files = Array(snapshot[:files])
|
253
|
+
raise "No snapshot file to package." if files.empty?
|
254
|
+
files.each do |f|
|
255
|
+
full_path = File.join(dump_path, f)
|
256
|
+
@snapshot_files << full_path
|
257
|
+
package.add_files full_path
|
258
|
+
end
|
259
|
+
package.pack(dump_path)
|
260
|
+
@logger.info("Package snapshot file: #{File.join(dump_path, package_file)}")
|
261
|
+
|
262
|
+
# update snapshot metadata for package file
|
263
|
+
snapshot.delete(:files)
|
264
|
+
snapshot[:file] = package_file
|
265
|
+
snapshot[:date] = fmt_time
|
266
|
+
# add default service name
|
267
|
+
snapshot[:name] = "Snapshot #{snapshot[:date]}"
|
268
|
+
|
269
|
+
save_snapshot(name, snapshot)
|
270
|
+
|
271
|
+
completed(Yajl::Encoder.encode(filter_keys(snapshot)))
|
272
|
+
@logger.info("Complete job: #{self.class} for #{name}")
|
273
|
+
end
|
274
|
+
rescue => e
|
275
|
+
cleanup(name, snapshot_id)
|
276
|
+
handle_error(e)
|
277
|
+
ensure
|
278
|
+
set_status({:complete_time => Time.now.to_s})
|
279
|
+
@snapshot_files.each{|f| File.delete(f) if File.exists? f} if @snapshot_files
|
280
|
+
end
|
281
|
+
end
|
282
|
+
end
|
283
|
+
|
284
|
+
class BaseDeleteSnapshotJob < SnapshotJob
|
285
|
+
def perform
|
286
|
+
begin
|
287
|
+
required_options :service_id, :snapshot_id
|
288
|
+
@name = options["service_id"]
|
289
|
+
@snapshot_id = options["snapshot_id"]
|
290
|
+
@logger.info("Launch job: #{self.class} for #{name}")
|
291
|
+
|
292
|
+
lock = create_lock
|
293
|
+
|
294
|
+
lock.lock do
|
295
|
+
result = execute
|
296
|
+
@logger.info("Results of delete snapshot: #{result}")
|
297
|
+
|
298
|
+
delete_snapshot(name, snapshot_id)
|
299
|
+
|
300
|
+
completed(Yajl::Encoder.encode({:result => :ok}))
|
301
|
+
@logger.info("Complete job: #{self.class} for #{name}")
|
302
|
+
end
|
303
|
+
rescue => e
|
304
|
+
handle_error(e)
|
305
|
+
ensure
|
306
|
+
set_status({:complete_time => Time.now.to_s})
|
307
|
+
end
|
308
|
+
end
|
309
|
+
|
310
|
+
def execute
|
311
|
+
cleanup(name, snapshot_id)
|
312
|
+
end
|
313
|
+
end
|
314
|
+
|
315
|
+
class BaseRollbackSnapshotJob < SnapshotJob
|
316
|
+
attr_reader :manifest, :snapshot_files
|
317
|
+
# workflow template
|
318
|
+
# Subclass implement execute method which returns true for a successful rollback
|
319
|
+
def perform
|
320
|
+
begin
|
321
|
+
required_options :service_id, :snapshot_id
|
322
|
+
@name = options["service_id"]
|
323
|
+
@snapshot_id = options["snapshot_id"]
|
324
|
+
@logger.info("Launch job: #{self.class} for #{name}")
|
325
|
+
|
326
|
+
lock = create_lock
|
327
|
+
|
328
|
+
@snapshot_files = []
|
329
|
+
lock.lock do
|
330
|
+
# extract origin files from package
|
331
|
+
dump_path = get_dump_path(name, snapshot_id)
|
332
|
+
package_file = "#{snapshot_id}.zip"
|
333
|
+
package = Package.load(File.join(dump_path, package_file))
|
334
|
+
@manifest = package.manifest
|
335
|
+
@snapshot_files = package.unpack(dump_path)
|
336
|
+
@logger.debug("Unpack files from #{package_file}: #{@snapshot_files}")
|
337
|
+
raise "Package file doesn't contain snapshot file." if @snapshot_files.empty?
|
338
|
+
|
339
|
+
result = execute
|
340
|
+
@logger.info("Results of rollback snapshot: #{result}")
|
341
|
+
|
342
|
+
completed(Yajl::Encoder.encode({:result => :ok}))
|
343
|
+
@logger.info("Complete job: #{self.class} for #{name}")
|
344
|
+
end
|
345
|
+
rescue => e
|
346
|
+
handle_error(e)
|
347
|
+
ensure
|
348
|
+
set_status({:complete_time => Time.now.to_s})
|
349
|
+
@snapshot_files.each{|f| File.delete(f) if File.exists? f} if @snapshot_files
|
350
|
+
end
|
351
|
+
end
|
352
|
+
end
|
353
|
+
end
|
354
|
+
end
|