vmpooler 2.1.0 → 2.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/vmpooler/api/helpers.rb +363 -294
- data/lib/vmpooler/api/v1.rb +482 -377
- data/lib/vmpooler/providers/base.rb +1 -1
- data/lib/vmpooler/version.rb +1 -1
- data/lib/vmpooler.rb +2 -1
- metadata +36 -16
data/lib/vmpooler/api/v1.rb
CHANGED
@@ -49,13 +49,15 @@ module Vmpooler
|
|
49
49
|
end
|
50
50
|
|
51
51
|
def get_template_aliases(template)
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
52
|
+
tracer.in_span("Vmpooler::API::V1.#{__method__}") do
|
53
|
+
result = []
|
54
|
+
aliases = Vmpooler::API.settings.config[:alias]
|
55
|
+
if aliases
|
56
|
+
result += aliases[template] if aliases[template].is_a?(Array)
|
57
|
+
template_backends << aliases[template] if aliases[template].is_a?(String)
|
58
|
+
end
|
59
|
+
result
|
57
60
|
end
|
58
|
-
result
|
59
61
|
end
|
60
62
|
|
61
63
|
def get_pool_weights(template_backends)
|
@@ -109,398 +111,463 @@ module Vmpooler
|
|
109
111
|
end
|
110
112
|
|
111
113
|
def fetch_single_vm(template)
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
114
|
+
tracer.in_span("Vmpooler::API::V1.#{__method__}") do
|
115
|
+
template_backends = [template]
|
116
|
+
aliases = Vmpooler::API.settings.config[:alias]
|
117
|
+
if aliases
|
118
|
+
template_backends += aliases[template] if aliases[template].is_a?(Array)
|
119
|
+
template_backends << aliases[template] if aliases[template].is_a?(String)
|
120
|
+
pool_index = pool_index(pools)
|
121
|
+
weighted_pools = {}
|
122
|
+
template_backends.each do |t|
|
123
|
+
next unless pool_index.key? t
|
124
|
+
|
125
|
+
index = pool_index[t]
|
126
|
+
clone_target = pools[index]['clone_target'] || config['clone_target']
|
127
|
+
next unless config.key?('backend_weight')
|
128
|
+
|
129
|
+
weight = config['backend_weight'][clone_target]
|
130
|
+
if weight
|
131
|
+
weighted_pools[t] = weight
|
132
|
+
end
|
129
133
|
end
|
130
|
-
end
|
131
134
|
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
135
|
+
if weighted_pools.count == template_backends.count
|
136
|
+
pickup = Pickup.new(weighted_pools)
|
137
|
+
selection = pickup.pick
|
138
|
+
template_backends.delete(selection)
|
139
|
+
template_backends.unshift(selection)
|
140
|
+
else
|
141
|
+
first = template_backends.sample
|
142
|
+
template_backends.delete(first)
|
143
|
+
template_backends.unshift(first)
|
144
|
+
end
|
141
145
|
end
|
142
|
-
end
|
143
146
|
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
147
|
+
checkoutlock.synchronize do
|
148
|
+
template_backends.each do |template_backend|
|
149
|
+
vms = backend.smembers("vmpooler__ready__#{template_backend}")
|
150
|
+
next if vms.empty?
|
151
|
+
|
152
|
+
vms.reverse.each do |vm|
|
153
|
+
ready = vm_ready?(vm, config['domain'])
|
154
|
+
if ready
|
155
|
+
smoved = backend.smove("vmpooler__ready__#{template_backend}", "vmpooler__running__#{template_backend}", vm)
|
156
|
+
if smoved
|
157
|
+
return [vm, template_backend, template]
|
158
|
+
else
|
159
|
+
metrics.increment("checkout.smove.failed.#{template_backend}")
|
160
|
+
return [nil, nil, nil]
|
161
|
+
end
|
155
162
|
else
|
156
|
-
|
157
|
-
|
163
|
+
backend.smove("vmpooler__ready__#{template_backend}", "vmpooler__completed__#{template_backend}", vm)
|
164
|
+
metrics.increment("checkout.nonresponsive.#{template_backend}")
|
158
165
|
end
|
159
|
-
else
|
160
|
-
backend.smove("vmpooler__ready__#{template_backend}", "vmpooler__completed__#{template_backend}", vm)
|
161
|
-
metrics.increment("checkout.nonresponsive.#{template_backend}")
|
162
166
|
end
|
163
167
|
end
|
168
|
+
[nil, nil, nil]
|
164
169
|
end
|
165
|
-
[nil, nil, nil]
|
166
170
|
end
|
167
171
|
end
|
168
172
|
|
169
173
|
def return_vm_to_ready_state(template, vm)
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
+
tracer.in_span("Vmpooler::API::V1.#{__method__}") do
|
175
|
+
backend.srem("vmpooler__migrating__#{template}", vm)
|
176
|
+
backend.hdel("vmpooler__active__#{template}", vm)
|
177
|
+
backend.hdel("vmpooler__vm__#{vm}", 'checkout', 'token:token', 'token:user')
|
178
|
+
backend.smove("vmpooler__running__#{template}", "vmpooler__ready__#{template}", vm)
|
179
|
+
end
|
174
180
|
end
|
175
181
|
|
176
182
|
def account_for_starting_vm(template, vm)
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
183
|
+
tracer.in_span("Vmpooler::API::V1.#{__method__}") do |span|
|
184
|
+
user = backend.hget("vmpooler__token__#{request.env['HTTP_X_AUTH_TOKEN']}", 'user')
|
185
|
+
span.set_attribute('enduser.id', user)
|
186
|
+
has_token_result = has_token?
|
187
|
+
backend.sadd("vmpooler__migrating__#{template}", vm)
|
188
|
+
backend.hset("vmpooler__active__#{template}", vm, Time.now)
|
189
|
+
backend.hset("vmpooler__vm__#{vm}", 'checkout', Time.now)
|
190
|
+
|
191
|
+
if Vmpooler::API.settings.config[:auth] and has_token_result
|
192
|
+
backend.hset("vmpooler__vm__#{vm}", 'token:token', request.env['HTTP_X_AUTH_TOKEN'])
|
193
|
+
backend.hset("vmpooler__vm__#{vm}", 'token:user', user)
|
194
|
+
|
195
|
+
if config['vm_lifetime_auth'].to_i > 0
|
196
|
+
backend.hset("vmpooler__vm__#{vm}", 'lifetime', config['vm_lifetime_auth'].to_i)
|
197
|
+
end
|
189
198
|
end
|
190
199
|
end
|
191
200
|
end
|
192
201
|
|
193
202
|
def update_result_hosts(result, template, vm)
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
203
|
+
tracer.in_span("Vmpooler::API::V1.#{__method__}") do
|
204
|
+
result[template] ||= {}
|
205
|
+
if result[template]['hostname']
|
206
|
+
result[template]['hostname'] = Array(result[template]['hostname'])
|
207
|
+
result[template]['hostname'].push(vm)
|
208
|
+
else
|
209
|
+
result[template]['hostname'] = vm
|
210
|
+
end
|
200
211
|
end
|
201
212
|
end
|
202
213
|
|
203
214
|
def atomically_allocate_vms(payload)
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
215
|
+
tracer.in_span("Vmpooler::API::V1.#{__method__}") do |span|
|
216
|
+
result = { 'ok' => false }
|
217
|
+
failed = false
|
218
|
+
vms = []
|
219
|
+
|
220
|
+
validate_token(backend) if Vmpooler::API.settings.config[:auth] and has_token?
|
221
|
+
|
222
|
+
payload.each do |requested, count|
|
223
|
+
count.to_i.times do |_i|
|
224
|
+
vmname, vmpool, vmtemplate = fetch_single_vm(requested)
|
225
|
+
if vmname
|
226
|
+
account_for_starting_vm(vmpool, vmname)
|
227
|
+
vms << [vmpool, vmname, vmtemplate]
|
228
|
+
metrics.increment("checkout.success.#{vmpool}")
|
229
|
+
update_user_metrics('allocate', vmname) if Vmpooler::API.settings.config[:config]['usage_stats']
|
230
|
+
else
|
231
|
+
failed = true
|
232
|
+
metrics.increment("checkout.empty.#{requested}")
|
233
|
+
break
|
234
|
+
end
|
222
235
|
end
|
223
236
|
end
|
224
|
-
end
|
225
237
|
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
238
|
+
if failed
|
239
|
+
vms.each do |(vmpool, vmname, _vmtemplate)|
|
240
|
+
return_vm_to_ready_state(vmpool, vmname)
|
241
|
+
end
|
242
|
+
span.add_event('error', attributes: {
|
243
|
+
'error.type' => 'Vmpooler::API::V1.atomically_allocate_vms',
|
244
|
+
'error.message' => '503 due to failing to allocate one or more vms'
|
245
|
+
})
|
246
|
+
status 503
|
247
|
+
else
|
248
|
+
vm_names = []
|
249
|
+
vms.each do |(_vmpool, vmname, vmtemplate)|
|
250
|
+
update_result_hosts(result, vmtemplate, vmname)
|
251
|
+
vm_names.append(vmname)
|
252
|
+
end
|
253
|
+
|
254
|
+
span.set_attribute('vmpooler.vm_names', vm_names.join(',')) unless vm_names.empty?
|
255
|
+
|
256
|
+
result['ok'] = true
|
257
|
+
result['domain'] = config['domain'] if config['domain']
|
234
258
|
end
|
235
259
|
|
236
|
-
result
|
237
|
-
result['domain'] = config['domain'] if config['domain']
|
260
|
+
result
|
238
261
|
end
|
239
|
-
|
240
|
-
result
|
241
262
|
end
|
242
263
|
|
243
264
|
def component_to_test(match, labels_string)
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
265
|
+
tracer.in_span("Vmpooler::API::V1.#{__method__}") do
|
266
|
+
return if labels_string.nil?
|
267
|
+
|
268
|
+
labels_string_parts = labels_string.split(',')
|
269
|
+
labels_string_parts.each do |part|
|
270
|
+
key, value = part.split('=')
|
271
|
+
next if value.nil?
|
272
|
+
return value if key == match
|
273
|
+
end
|
274
|
+
'none'
|
251
275
|
end
|
252
|
-
'none'
|
253
276
|
end
|
254
277
|
|
255
278
|
def update_user_metrics(operation, vmname)
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
279
|
+
tracer.in_span("Vmpooler::API::V1.#{__method__}") do |span|
|
280
|
+
begin
|
281
|
+
backend.multi
|
282
|
+
backend.hget("vmpooler__vm__#{vmname}", 'tag:jenkins_build_url')
|
283
|
+
backend.hget("vmpooler__vm__#{vmname}", 'token:user')
|
284
|
+
backend.hget("vmpooler__vm__#{vmname}", 'template')
|
285
|
+
jenkins_build_url, user, poolname = backend.exec
|
286
|
+
poolname = poolname.gsub('.', '_')
|
287
|
+
|
288
|
+
if user
|
289
|
+
user = user.gsub('.', '_')
|
290
|
+
else
|
291
|
+
user = 'unauthenticated'
|
292
|
+
end
|
293
|
+
metrics.increment("user.#{user}.#{operation}.#{poolname}")
|
269
294
|
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
295
|
+
if jenkins_build_url
|
296
|
+
if jenkins_build_url.include? 'litmus'
|
297
|
+
# Very simple filter for Litmus jobs - just count them coming through for the moment.
|
298
|
+
metrics.increment("usage_litmus.#{user}.#{operation}.#{poolname}")
|
299
|
+
else
|
300
|
+
url_parts = jenkins_build_url.split('/')[2..-1]
|
301
|
+
jenkins_instance = url_parts[0].gsub('.', '_')
|
302
|
+
value_stream_parts = url_parts[2].split('_')
|
303
|
+
value_stream_parts = value_stream_parts.map { |s| s.gsub('.', '_') }
|
304
|
+
value_stream = value_stream_parts.shift
|
305
|
+
branch = value_stream_parts.pop
|
306
|
+
project = value_stream_parts.shift
|
307
|
+
job_name = value_stream_parts.join('_')
|
308
|
+
build_metadata_parts = url_parts[3]
|
309
|
+
component_to_test = component_to_test('RMM_COMPONENT_TO_TEST_NAME', build_metadata_parts)
|
310
|
+
|
311
|
+
metrics.increment("usage_jenkins_instance.#{jenkins_instance}.#{value_stream}.#{operation}.#{poolname}")
|
312
|
+
metrics.increment("usage_branch_project.#{branch}.#{project}.#{operation}.#{poolname}")
|
313
|
+
metrics.increment("usage_job_component.#{job_name}.#{component_to_test}.#{operation}.#{poolname}")
|
314
|
+
end
|
315
|
+
end
|
316
|
+
rescue StandardError => e
|
317
|
+
puts 'd', "[!] [#{poolname}] failed while evaluating usage labels on '#{vmname}' with an error: #{e}"
|
318
|
+
span.record_exception(e)
|
319
|
+
span.status = OpenTelemetry::Trace::Status.error(e.to_s)
|
320
|
+
span.add_event('log', attributes: {
|
321
|
+
'log.severity' => 'debug',
|
322
|
+
'log.message' => "[#{poolname}] failed while evaluating usage labels on '#{vmname}' with an error: #{e}"
|
323
|
+
})
|
275
324
|
end
|
276
|
-
|
277
|
-
url_parts = jenkins_build_url.split('/')[2..-1]
|
278
|
-
jenkins_instance = url_parts[0].gsub('.', '_')
|
279
|
-
value_stream_parts = url_parts[2].split('_')
|
280
|
-
value_stream_parts = value_stream_parts.map { |s| s.gsub('.', '_') }
|
281
|
-
value_stream = value_stream_parts.shift
|
282
|
-
branch = value_stream_parts.pop
|
283
|
-
project = value_stream_parts.shift
|
284
|
-
job_name = value_stream_parts.join('_')
|
285
|
-
build_metadata_parts = url_parts[3]
|
286
|
-
component_to_test = component_to_test('RMM_COMPONENT_TO_TEST_NAME', build_metadata_parts)
|
287
|
-
|
288
|
-
metrics.increment("usage_jenkins_instance.#{jenkins_instance}.#{value_stream}.#{operation}.#{poolname}")
|
289
|
-
metrics.increment("usage_branch_project.#{branch}.#{project}.#{operation}.#{poolname}")
|
290
|
-
metrics.increment("usage_job_component.#{job_name}.#{component_to_test}.#{operation}.#{poolname}")
|
291
325
|
end
|
292
|
-
rescue StandardError => e
|
293
|
-
puts 'd', "[!] [#{poolname}] failed while evaluating usage labels on '#{vmname}' with an error: #{e}"
|
294
326
|
end
|
295
327
|
|
296
328
|
def reset_pool_size(poolname)
|
297
|
-
|
329
|
+
tracer.in_span("Vmpooler::API::V1.#{__method__}") do
|
330
|
+
result = { 'ok' => false }
|
298
331
|
|
299
|
-
|
332
|
+
pool_index = pool_index(pools)
|
300
333
|
|
301
|
-
|
302
|
-
|
334
|
+
pools_updated = 0
|
335
|
+
sync_pool_sizes
|
303
336
|
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
337
|
+
pool_size_now = pools[pool_index[poolname]]['size'].to_i
|
338
|
+
pool_size_original = pools_at_startup[pool_index[poolname]]['size'].to_i
|
339
|
+
result['pool_size_before_reset'] = pool_size_now
|
340
|
+
result['pool_size_before_overrides'] = pool_size_original
|
308
341
|
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
342
|
+
unless pool_size_now == pool_size_original
|
343
|
+
pools[pool_index[poolname]]['size'] = pool_size_original
|
344
|
+
backend.hdel('vmpooler__config__poolsize', poolname)
|
345
|
+
backend.sadd('vmpooler__pool__undo_size_override', poolname)
|
346
|
+
pools_updated += 1
|
347
|
+
status 201
|
348
|
+
end
|
316
349
|
|
317
|
-
|
318
|
-
|
319
|
-
|
350
|
+
status 200 unless pools_updated > 0
|
351
|
+
result['ok'] = true
|
352
|
+
result
|
353
|
+
end
|
320
354
|
end
|
321
355
|
|
322
356
|
def update_pool_size(payload)
|
323
|
-
|
357
|
+
tracer.in_span("Vmpooler::API::V1.#{__method__}") do
|
358
|
+
result = { 'ok' => false }
|
324
359
|
|
325
|
-
|
326
|
-
|
327
|
-
|
360
|
+
pool_index = pool_index(pools)
|
361
|
+
pools_updated = 0
|
362
|
+
sync_pool_sizes
|
328
363
|
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
|
364
|
+
payload.each do |poolname, size|
|
365
|
+
unless pools[pool_index[poolname]]['size'] == size.to_i
|
366
|
+
pools[pool_index[poolname]]['size'] = size.to_i
|
367
|
+
backend.hset('vmpooler__config__poolsize', poolname, size)
|
368
|
+
pools_updated += 1
|
369
|
+
status 201
|
370
|
+
end
|
335
371
|
end
|
372
|
+
status 200 unless pools_updated > 0
|
373
|
+
result['ok'] = true
|
374
|
+
result
|
336
375
|
end
|
337
|
-
status 200 unless pools_updated > 0
|
338
|
-
result['ok'] = true
|
339
|
-
result
|
340
376
|
end
|
341
377
|
|
342
378
|
def reset_pool_template(poolname)
|
343
|
-
|
379
|
+
tracer.in_span("Vmpooler::API::V1.#{__method__}") do
|
380
|
+
result = { 'ok' => false }
|
381
|
+
|
382
|
+
pool_index_live = pool_index(pools)
|
383
|
+
pool_index_original = pool_index(pools_at_startup)
|
344
384
|
|
345
|
-
|
346
|
-
|
385
|
+
pools_updated = 0
|
386
|
+
sync_pool_templates
|
347
387
|
|
348
|
-
|
349
|
-
|
388
|
+
template_now = pools[pool_index_live[poolname]]['template']
|
389
|
+
template_original = pools_at_startup[pool_index_original[poolname]]['template']
|
390
|
+
result['template_before_reset'] = template_now
|
391
|
+
result['template_before_overrides'] = template_original
|
350
392
|
|
351
|
-
|
352
|
-
|
353
|
-
|
354
|
-
|
393
|
+
unless template_now == template_original
|
394
|
+
pools[pool_index_live[poolname]]['template'] = template_original
|
395
|
+
backend.hdel('vmpooler__config__template', poolname)
|
396
|
+
backend.sadd('vmpooler__pool__undo_template_override', poolname)
|
397
|
+
pools_updated += 1
|
398
|
+
status 201
|
399
|
+
end
|
355
400
|
|
356
|
-
|
357
|
-
|
358
|
-
|
359
|
-
backend.sadd('vmpooler__pool__undo_template_override', poolname)
|
360
|
-
pools_updated += 1
|
361
|
-
status 201
|
401
|
+
status 200 unless pools_updated > 0
|
402
|
+
result['ok'] = true
|
403
|
+
result
|
362
404
|
end
|
363
|
-
|
364
|
-
status 200 unless pools_updated > 0
|
365
|
-
result['ok'] = true
|
366
|
-
result
|
367
405
|
end
|
368
406
|
|
369
407
|
def update_pool_template(payload)
|
370
|
-
|
408
|
+
tracer.in_span("Vmpooler::API::V1.#{__method__}") do
|
409
|
+
result = { 'ok' => false }
|
371
410
|
|
372
|
-
|
373
|
-
|
374
|
-
|
411
|
+
pool_index = pool_index(pools)
|
412
|
+
pools_updated = 0
|
413
|
+
sync_pool_templates
|
375
414
|
|
376
|
-
|
377
|
-
|
378
|
-
|
379
|
-
|
380
|
-
|
381
|
-
|
415
|
+
payload.each do |poolname, template|
|
416
|
+
unless pools[pool_index[poolname]]['template'] == template
|
417
|
+
pools[pool_index[poolname]]['template'] = template
|
418
|
+
backend.hset('vmpooler__config__template', poolname, template)
|
419
|
+
pools_updated += 1
|
420
|
+
status 201
|
421
|
+
end
|
382
422
|
end
|
423
|
+
status 200 unless pools_updated > 0
|
424
|
+
result['ok'] = true
|
425
|
+
result
|
383
426
|
end
|
384
|
-
status 200 unless pools_updated > 0
|
385
|
-
result['ok'] = true
|
386
|
-
result
|
387
427
|
end
|
388
428
|
|
389
429
|
def reset_pool(payload)
|
390
|
-
|
430
|
+
tracer.in_span("Vmpooler::API::V1.#{__method__}") do
|
431
|
+
result = { 'ok' => false }
|
391
432
|
|
392
|
-
|
393
|
-
|
433
|
+
payload.each do |poolname, _count|
|
434
|
+
backend.sadd('vmpooler__poolreset', poolname)
|
435
|
+
end
|
436
|
+
status 201
|
437
|
+
result['ok'] = true
|
438
|
+
result
|
394
439
|
end
|
395
|
-
status 201
|
396
|
-
result['ok'] = true
|
397
|
-
result
|
398
440
|
end
|
399
441
|
|
400
442
|
def update_clone_target(payload)
|
401
|
-
|
402
|
-
|
403
|
-
pool_index = pool_index(pools)
|
404
|
-
pools_updated = 0
|
405
|
-
sync_clone_targets
|
443
|
+
tracer.in_span("Vmpooler::API::V1.#{__method__}") do
|
444
|
+
result = { 'ok' => false }
|
406
445
|
|
407
|
-
|
408
|
-
|
409
|
-
|
410
|
-
|
411
|
-
|
412
|
-
|
446
|
+
pool_index = pool_index(pools)
|
447
|
+
pools_updated = 0
|
448
|
+
sync_clone_targets
|
449
|
+
|
450
|
+
payload.each do |poolname, clone_target|
|
451
|
+
unless pools[pool_index[poolname]]['clone_target'] == clone_target
|
452
|
+
pools[pool_index[poolname]]['clone_target'] = clone_target
|
453
|
+
backend.hset('vmpooler__config__clone_target', poolname, clone_target)
|
454
|
+
pools_updated += 1
|
455
|
+
status 201
|
456
|
+
end
|
413
457
|
end
|
458
|
+
status 200 unless pools_updated > 0
|
459
|
+
result['ok'] = true
|
460
|
+
result
|
414
461
|
end
|
415
|
-
status 200 unless pools_updated > 0
|
416
|
-
result['ok'] = true
|
417
|
-
result
|
418
462
|
end
|
419
463
|
|
420
464
|
def sync_pool_templates
|
421
|
-
|
422
|
-
|
423
|
-
|
424
|
-
|
465
|
+
tracer.in_span("Vmpooler::API::V1.#{__method__}") do
|
466
|
+
pool_index = pool_index(pools)
|
467
|
+
template_configs = backend.hgetall('vmpooler__config__template')
|
468
|
+
template_configs&.each do |poolname, template|
|
469
|
+
next unless pool_index.include? poolname
|
425
470
|
|
426
|
-
|
471
|
+
pools[pool_index[poolname]]['template'] = template
|
472
|
+
end
|
427
473
|
end
|
428
474
|
end
|
429
475
|
|
430
476
|
def sync_pool_sizes
|
431
|
-
|
432
|
-
|
433
|
-
|
434
|
-
|
477
|
+
tracer.in_span("Vmpooler::API::V1.#{__method__}") do
|
478
|
+
pool_index = pool_index(pools)
|
479
|
+
poolsize_configs = backend.hgetall('vmpooler__config__poolsize')
|
480
|
+
poolsize_configs&.each do |poolname, size|
|
481
|
+
next unless pool_index.include? poolname
|
435
482
|
|
436
|
-
|
483
|
+
pools[pool_index[poolname]]['size'] = size.to_i
|
484
|
+
end
|
437
485
|
end
|
438
486
|
end
|
439
487
|
|
440
488
|
def sync_clone_targets
|
441
|
-
|
442
|
-
|
443
|
-
|
444
|
-
|
489
|
+
tracer.in_span("Vmpooler::API::V1.#{__method__}") do
|
490
|
+
pool_index = pool_index(pools)
|
491
|
+
clone_target_configs = backend.hgetall('vmpooler__config__clone_target')
|
492
|
+
clone_target_configs&.each do |poolname, clone_target|
|
493
|
+
next unless pool_index.include? poolname
|
445
494
|
|
446
|
-
|
495
|
+
pools[pool_index[poolname]]['clone_target'] = clone_target
|
496
|
+
end
|
447
497
|
end
|
448
498
|
end
|
449
499
|
|
450
500
|
def too_many_requested?(payload)
|
451
|
-
|
452
|
-
|
501
|
+
tracer.in_span("Vmpooler::API::V1.#{__method__}") do
|
502
|
+
payload&.each do |poolname, count|
|
503
|
+
next unless count.to_i > config['max_ondemand_instances_per_request']
|
453
504
|
|
454
|
-
|
455
|
-
|
505
|
+
metrics.increment("ondemandrequest_fail.toomanyrequests.#{poolname}")
|
506
|
+
return true
|
507
|
+
end
|
508
|
+
false
|
456
509
|
end
|
457
|
-
false
|
458
510
|
end
|
459
511
|
|
460
512
|
def generate_ondemand_request(payload)
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
|
513
|
+
tracer.in_span("Vmpooler::API::V1.#{__method__}") do |span|
|
514
|
+
result = { 'ok': false }
|
515
|
+
|
516
|
+
requested_instances = payload.reject { |k, _v| k == 'request_id' }
|
517
|
+
if too_many_requested?(requested_instances)
|
518
|
+
e_message = "requested amount of instances exceeds the maximum #{config['max_ondemand_instances_per_request']}"
|
519
|
+
result['message'] = e_message
|
520
|
+
status 403
|
521
|
+
span.add_event('error', attributes: {
|
522
|
+
'error.type' => 'Vmpooler::API::V1.generate_ondemand_request',
|
523
|
+
'error.message' => "403 due to #{e_message}"
|
524
|
+
})
|
525
|
+
return result
|
526
|
+
end
|
469
527
|
|
470
|
-
|
471
|
-
|
472
|
-
|
473
|
-
|
528
|
+
score = Time.now.to_i
|
529
|
+
request_id = payload['request_id']
|
530
|
+
request_id ||= generate_request_id
|
531
|
+
result['request_id'] = request_id
|
532
|
+
span.set_attribute('vmpooler.request_id', request_id)
|
533
|
+
|
534
|
+
if backend.exists?("vmpooler__odrequest__#{request_id}")
|
535
|
+
e_message = "request_id '#{request_id}' has already been created"
|
536
|
+
result['message'] = e_message
|
537
|
+
status 409
|
538
|
+
span.add_event('error', attributes: {
|
539
|
+
'error.type' => 'Vmpooler::API::V1.generate_ondemand_request',
|
540
|
+
'error.message' => "409 due to #{e_message}"
|
541
|
+
})
|
542
|
+
metrics.increment('ondemandrequest_generate.duplicaterequests')
|
543
|
+
return result
|
544
|
+
end
|
474
545
|
|
475
|
-
|
476
|
-
result['message'] = "request_id '#{request_id}' has already been created"
|
477
|
-
status 409
|
478
|
-
metrics.increment('ondemandrequest_generate.duplicaterequests')
|
479
|
-
return result
|
480
|
-
end
|
546
|
+
status 201
|
481
547
|
|
482
|
-
|
548
|
+
platforms_with_aliases = []
|
549
|
+
requested_instances.each do |poolname, count|
|
550
|
+
selection = evaluate_template_aliases(poolname, count)
|
551
|
+
selection.map { |selected_pool, selected_pool_count| platforms_with_aliases << "#{poolname}:#{selected_pool}:#{selected_pool_count}" }
|
552
|
+
end
|
553
|
+
platforms_string = platforms_with_aliases.join(',')
|
483
554
|
|
484
|
-
|
485
|
-
requested_instances.each do |poolname, count|
|
486
|
-
selection = evaluate_template_aliases(poolname, count)
|
487
|
-
selection.map { |selected_pool, selected_pool_count| platforms_with_aliases << "#{poolname}:#{selected_pool}:#{selected_pool_count}" }
|
488
|
-
end
|
489
|
-
platforms_string = platforms_with_aliases.join(',')
|
555
|
+
return result unless backend.zadd('vmpooler__provisioning__request', score, request_id)
|
490
556
|
|
491
|
-
|
557
|
+
backend.hset("vmpooler__odrequest__#{request_id}", 'requested', platforms_string)
|
558
|
+
if Vmpooler::API.settings.config[:auth] and has_token?
|
559
|
+
token_token = request.env['HTTP_X_AUTH_TOKEN']
|
560
|
+
token_user = backend.hget("vmpooler__token__#{token_token}", 'user')
|
561
|
+
backend.hset("vmpooler__odrequest__#{request_id}", 'token:token', token_token)
|
562
|
+
backend.hset("vmpooler__odrequest__#{request_id}", 'token:user', token_user)
|
563
|
+
span.set_attribute('enduser.id', token_user)
|
564
|
+
end
|
492
565
|
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
backend.hget("vmpooler__token__#{request.env['HTTP_X_AUTH_TOKEN']}", 'user'))
|
566
|
+
result['domain'] = config['domain'] if config['domain']
|
567
|
+
result[:ok] = true
|
568
|
+
metrics.increment('ondemandrequest_generate.success')
|
569
|
+
result
|
498
570
|
end
|
499
|
-
|
500
|
-
result['domain'] = config['domain'] if config['domain']
|
501
|
-
result[:ok] = true
|
502
|
-
metrics.increment('ondemandrequest_generate.success')
|
503
|
-
result
|
504
571
|
end
|
505
572
|
|
506
573
|
def generate_request_id
|
@@ -813,6 +880,8 @@ module Vmpooler
|
|
813
880
|
data = backend.hgetall(key)
|
814
881
|
|
815
882
|
if data['user'] == Rack::Auth::Basic::Request.new(request.env).username
|
883
|
+
span = OpenTelemetry::Trace.current_span
|
884
|
+
span.set_attribute('enduser.id', data['user'])
|
816
885
|
token = key.split('__').last
|
817
886
|
|
818
887
|
result[token] ||= {}
|
@@ -899,6 +968,8 @@ module Vmpooler
|
|
899
968
|
|
900
969
|
backend.hset("vmpooler__token__#{result['token']}", 'user', @auth.username)
|
901
970
|
backend.hset("vmpooler__token__#{result['token']}", 'created', Time.now)
|
971
|
+
span = OpenTelemetry::Trace.current_span
|
972
|
+
span.set_attribute('enduser.id', @auth.username)
|
902
973
|
|
903
974
|
status 200
|
904
975
|
result['ok'] = true
|
@@ -946,6 +1017,8 @@ module Vmpooler
|
|
946
1017
|
status 404
|
947
1018
|
end
|
948
1019
|
rescue JSON::ParserError
|
1020
|
+
span = OpenTelemetry::Trace.current_span
|
1021
|
+
span.status = OpenTelemetry::Trace::Status.error('JSON payload could not be parsed')
|
949
1022
|
status 400
|
950
1023
|
result = {
|
951
1024
|
'ok' => false,
|
@@ -1031,134 +1104,160 @@ module Vmpooler
|
|
1031
1104
|
end
|
1032
1105
|
|
1033
1106
|
def extract_templates_from_query_params(params)
|
1034
|
-
|
1107
|
+
tracer.in_span("Vmpooler::API::V1.#{__method__}") do
|
1108
|
+
payload = {}
|
1035
1109
|
|
1036
|
-
|
1037
|
-
|
1038
|
-
|
1039
|
-
|
1110
|
+
params.split('+').each do |template|
|
1111
|
+
payload[template] ||= 0
|
1112
|
+
payload[template] += 1
|
1113
|
+
end
|
1040
1114
|
|
1041
|
-
|
1115
|
+
payload
|
1116
|
+
end
|
1042
1117
|
end
|
1043
1118
|
|
1044
1119
|
def invalid_templates(payload)
|
1045
|
-
|
1046
|
-
|
1047
|
-
|
1120
|
+
tracer.in_span("Vmpooler::API::V1.#{__method__}") do
|
1121
|
+
invalid = []
|
1122
|
+
payload.keys.each do |template|
|
1123
|
+
invalid << template unless pool_exists?(template)
|
1124
|
+
end
|
1125
|
+
invalid
|
1048
1126
|
end
|
1049
|
-
invalid
|
1050
1127
|
end
|
1051
1128
|
|
1052
1129
|
def invalid_template_or_size(payload)
|
1053
|
-
|
1054
|
-
|
1055
|
-
|
1056
|
-
|
1057
|
-
|
1058
|
-
|
1130
|
+
tracer.in_span("Vmpooler::API::V1.#{__method__}") do
|
1131
|
+
invalid = []
|
1132
|
+
payload.each do |pool, size|
|
1133
|
+
invalid << pool unless pool_exists?(pool)
|
1134
|
+
unless is_integer?(size)
|
1135
|
+
invalid << pool
|
1136
|
+
next
|
1137
|
+
end
|
1138
|
+
invalid << pool unless Integer(size) >= 0
|
1059
1139
|
end
|
1060
|
-
invalid
|
1140
|
+
invalid
|
1061
1141
|
end
|
1062
|
-
invalid
|
1063
1142
|
end
|
1064
1143
|
|
1065
1144
|
def invalid_template_or_path(payload)
|
1066
|
-
|
1067
|
-
|
1068
|
-
|
1069
|
-
|
1070
|
-
|
1071
|
-
|
1145
|
+
tracer.in_span("Vmpooler::API::V1.#{__method__}") do
|
1146
|
+
invalid = []
|
1147
|
+
payload.each do |pool, template|
|
1148
|
+
invalid << pool unless pool_exists?(pool)
|
1149
|
+
invalid << pool unless template.include? '/'
|
1150
|
+
invalid << pool if template[0] == '/'
|
1151
|
+
invalid << pool if template[-1] == '/'
|
1152
|
+
end
|
1153
|
+
invalid
|
1072
1154
|
end
|
1073
|
-
invalid
|
1074
1155
|
end
|
1075
1156
|
|
1076
1157
|
def invalid_pool(payload)
|
1077
|
-
|
1078
|
-
|
1079
|
-
|
1158
|
+
tracer.in_span("Vmpooler::API::V1.#{__method__}") do
|
1159
|
+
invalid = []
|
1160
|
+
payload.each do |pool, _clone_target|
|
1161
|
+
invalid << pool unless pool_exists?(pool)
|
1162
|
+
end
|
1163
|
+
invalid
|
1080
1164
|
end
|
1081
|
-
invalid
|
1082
1165
|
end
|
1083
1166
|
|
1084
1167
|
def check_ondemand_request(request_id)
|
1085
|
-
|
1086
|
-
|
1087
|
-
|
1088
|
-
|
1089
|
-
|
1090
|
-
|
1168
|
+
tracer.in_span("Vmpooler::API::V1.#{__method__}") do |span|
|
1169
|
+
span.set_attribute('vmpooler.request_id', request_id)
|
1170
|
+
result = { 'ok' => false }
|
1171
|
+
request_hash = backend.hgetall("vmpooler__odrequest__#{request_id}")
|
1172
|
+
if request_hash.empty?
|
1173
|
+
e_message = "no request found for request_id '#{request_id}'"
|
1174
|
+
result['message'] = e_message
|
1175
|
+
span.add_event('error', attributes: {
|
1176
|
+
'error.type' => 'Vmpooler::API::V1.check_ondemand_request',
|
1177
|
+
'error.message' => e_message
|
1178
|
+
})
|
1179
|
+
return result
|
1180
|
+
end
|
1091
1181
|
|
1092
|
-
|
1093
|
-
|
1094
|
-
|
1095
|
-
|
1182
|
+
result['request_id'] = request_id
|
1183
|
+
result['ready'] = false
|
1184
|
+
result['ok'] = true
|
1185
|
+
status 202
|
1096
1186
|
|
1097
|
-
|
1098
|
-
|
1099
|
-
|
1100
|
-
|
1101
|
-
|
1187
|
+
case request_hash['status']
|
1188
|
+
when 'ready'
|
1189
|
+
result['ready'] = true
|
1190
|
+
Parsing.get_platform_pool_count(request_hash['requested']) do |platform_alias, pool, _count|
|
1191
|
+
instances = backend.smembers("vmpooler__#{request_id}__#{platform_alias}__#{pool}")
|
1102
1192
|
|
1103
|
-
|
1104
|
-
|
1105
|
-
|
1106
|
-
|
1193
|
+
if result.key?(platform_alias)
|
1194
|
+
result[platform_alias][:hostname] = result[platform_alias][:hostname] + instances
|
1195
|
+
else
|
1196
|
+
result[platform_alias] = { 'hostname': instances }
|
1197
|
+
end
|
1107
1198
|
end
|
1108
|
-
|
1109
|
-
|
1110
|
-
|
1111
|
-
|
1112
|
-
|
1113
|
-
|
1114
|
-
|
1115
|
-
|
1116
|
-
|
1117
|
-
|
1118
|
-
|
1119
|
-
|
1120
|
-
instances_pending = count.to_i - instance_count.to_i
|
1199
|
+
result['domain'] = config['domain'] if config['domain']
|
1200
|
+
status 200
|
1201
|
+
when 'failed'
|
1202
|
+
result['message'] = "The request failed to provision instances within the configured ondemand_request_ttl '#{config['ondemand_request_ttl']}'"
|
1203
|
+
status 200
|
1204
|
+
when 'deleted'
|
1205
|
+
result['message'] = 'The request has been deleted'
|
1206
|
+
status 200
|
1207
|
+
else
|
1208
|
+
Parsing.get_platform_pool_count(request_hash['requested']) do |platform_alias, pool, count|
|
1209
|
+
instance_count = backend.scard("vmpooler__#{request_id}__#{platform_alias}__#{pool}")
|
1210
|
+
instances_pending = count.to_i - instance_count.to_i
|
1121
1211
|
|
1122
|
-
|
1123
|
-
|
1124
|
-
|
1125
|
-
|
1126
|
-
|
1127
|
-
|
1128
|
-
|
1129
|
-
|
1212
|
+
if result.key?(platform_alias) && result[platform_alias].key?(:ready)
|
1213
|
+
result[platform_alias][:ready] = (result[platform_alias][:ready].to_i + instance_count).to_s
|
1214
|
+
result[platform_alias][:pending] = (result[platform_alias][:pending].to_i + instances_pending).to_s
|
1215
|
+
else
|
1216
|
+
result[platform_alias] = {
|
1217
|
+
'ready': instance_count.to_s,
|
1218
|
+
'pending': instances_pending.to_s
|
1219
|
+
}
|
1220
|
+
end
|
1130
1221
|
end
|
1131
1222
|
end
|
1132
|
-
end
|
1133
1223
|
|
1134
|
-
|
1224
|
+
result
|
1225
|
+
end
|
1135
1226
|
end
|
1136
1227
|
|
1137
1228
|
def delete_ondemand_request(request_id)
|
1138
|
-
|
1139
|
-
|
1140
|
-
|
1141
|
-
|
1142
|
-
|
1143
|
-
|
1144
|
-
|
1229
|
+
tracer.in_span("Vmpooler::API::V1.#{__method__}") do |span|
|
1230
|
+
span.set_attribute('vmpooler.request_id', request_id)
|
1231
|
+
result = { 'ok' => false }
|
1232
|
+
|
1233
|
+
platforms = backend.hget("vmpooler__odrequest__#{request_id}", 'requested')
|
1234
|
+
unless platforms
|
1235
|
+
e_message = "no request found for request_id '#{request_id}'"
|
1236
|
+
result['message'] = e_message
|
1237
|
+
span.add_event('error', attributes: {
|
1238
|
+
'error.type' => 'Vmpooler::API::V1.delete_ondemand_request',
|
1239
|
+
'error.message' => e_message
|
1240
|
+
})
|
1241
|
+
return result
|
1242
|
+
end
|
1145
1243
|
|
1146
|
-
|
1147
|
-
|
1148
|
-
|
1149
|
-
|
1244
|
+
if backend.hget("vmpooler__odrequest__#{request_id}", 'status') == 'deleted'
|
1245
|
+
result['message'] = 'the request has already been deleted'
|
1246
|
+
else
|
1247
|
+
backend.hset("vmpooler__odrequest__#{request_id}", 'status', 'deleted')
|
1150
1248
|
|
1151
|
-
|
1152
|
-
|
1153
|
-
|
1249
|
+
Parsing.get_platform_pool_count(platforms) do |platform_alias, pool, _count|
|
1250
|
+
backend.smembers("vmpooler__#{request_id}__#{platform_alias}__#{pool}")&.each do |vm|
|
1251
|
+
backend.smove("vmpooler__running__#{pool}", "vmpooler__completed__#{pool}", vm)
|
1252
|
+
end
|
1253
|
+
backend.del("vmpooler__#{request_id}__#{platform_alias}__#{pool}")
|
1154
1254
|
end
|
1155
|
-
backend.
|
1255
|
+
backend.expire("vmpooler__odrequest__#{request_id}", 129_600_0)
|
1156
1256
|
end
|
1157
|
-
|
1257
|
+
status 200
|
1258
|
+
result['ok'] = true
|
1259
|
+
result
|
1158
1260
|
end
|
1159
|
-
status 200
|
1160
|
-
result['ok'] = true
|
1161
|
-
result
|
1162
1261
|
end
|
1163
1262
|
|
1164
1263
|
post "#{api_prefix}/vm/:template/?" do
|
@@ -1303,7 +1402,10 @@ module Vmpooler
|
|
1303
1402
|
if backend.exists?("vmpooler__vm__#{params[:hostname]}")
|
1304
1403
|
begin
|
1305
1404
|
jdata = JSON.parse(request.body.read)
|
1306
|
-
rescue StandardError
|
1405
|
+
rescue StandardError => e
|
1406
|
+
span = OpenTelemetry::Trace.current_span
|
1407
|
+
span.record_exception(e)
|
1408
|
+
span.status = OpenTelemetry::Trace::Status.error(e.to_s)
|
1307
1409
|
halt 400, JSON.pretty_generate(result)
|
1308
1410
|
end
|
1309
1411
|
|
@@ -1559,6 +1661,9 @@ module Vmpooler
|
|
1559
1661
|
status 404
|
1560
1662
|
end
|
1561
1663
|
rescue JSON::ParserError
|
1664
|
+
span = OpenTelemetry::Trace.current_span
|
1665
|
+
span.record_exception(e)
|
1666
|
+
span.status = OpenTelemetry::Trace::Status.error('JSON payload could not be parsed')
|
1562
1667
|
status 400
|
1563
1668
|
result = {
|
1564
1669
|
'ok' => false,
|