vmpooler 0.13.2 → 0.14.3

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,121 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This is an adapted Collector module for vmpooler based on the sample implementation
4
+ # available in the prometheus client_ruby library
5
+ # https://github.com/prometheus/client_ruby/blob/master/lib/prometheus/middleware/collector.rb
6
+ #
7
+ # The code was also failing Rubocop on PR check, so have addressed all the offenses.
8
+ #
9
+ # The method strip_hostnames_from_path (originally strip_ids_from_path) has been adapted
10
+ # to add a match for hostnames in paths # to replace with a single ":hostname" string to
11
+ # avoid # proliferation of stat lines for # each new vm hostname deleted, modified or
12
+ # otherwise queried.
13
+
14
+ require 'benchmark'
15
+ require 'prometheus/client'
16
+ require 'vmpooler/logger'
17
+
18
+ module Vmpooler
19
+ class Metrics
20
+ class Promstats
21
+ # CollectorMiddleware is an implementation of Rack Middleware customised
22
+ # for vmpooler use.
23
+ #
24
+ # By default metrics are registered on the global registry. Set the
25
+ # `:registry` option to use a custom registry.
26
+ #
27
+ # By default metrics all have the prefix "http_server". Set to something
28
+ # else if you like.
29
+ #
30
+ # The request counter metric is broken down by code, method and path by
31
+ # default. Set the `:counter_label_builder` option to use a custom label
32
+ # builder.
33
+ #
34
+ # The request duration metric is broken down by method and path by default.
35
+ # Set the `:duration_label_builder` option to use a custom label builder.
36
+ #
37
+ # Label Builder functions will receive a Rack env and a status code, and must
38
+ # return a hash with the labels for that request. They must also accept an empty
39
+ # env, and return a hash with the correct keys. This is necessary to initialize
40
+ # the metrics with the correct set of labels.
41
+ class CollectorMiddleware
42
+ attr_reader :app, :registry
43
+
44
+ def initialize(app, options = {})
45
+ @app = app
46
+ @registry = options[:registry] || Prometheus::Client.registry
47
+ @metrics_prefix = options[:metrics_prefix] || 'http_server'
48
+
49
+ init_request_metrics
50
+ init_exception_metrics
51
+ end
52
+
53
+ def call(env) # :nodoc:
54
+ trace(env) { @app.call(env) }
55
+ end
56
+
57
+ protected
58
+
59
+ def init_request_metrics
60
+ @requests = @registry.counter(
61
+ :"#{@metrics_prefix}_requests_total",
62
+ docstring:
63
+ 'The total number of HTTP requests handled by the Rack application.',
64
+ labels: %i[code method path]
65
+ )
66
+ @durations = @registry.histogram(
67
+ :"#{@metrics_prefix}_request_duration_seconds",
68
+ docstring: 'The HTTP response duration of the Rack application.',
69
+ labels: %i[method path]
70
+ )
71
+ end
72
+
73
+ def init_exception_metrics
74
+ @exceptions = @registry.counter(
75
+ :"#{@metrics_prefix}_exceptions_total",
76
+ docstring: 'The total number of exceptions raised by the Rack application.',
77
+ labels: [:exception]
78
+ )
79
+ end
80
+
81
+ def trace(env)
82
+ response = nil
83
+ duration = Benchmark.realtime { response = yield }
84
+ record(env, response.first.to_s, duration)
85
+ response
86
+ rescue StandardError => e
87
+ @exceptions.increment(labels: { exception: e.class.name })
88
+ raise
89
+ end
90
+
91
+ def record(env, code, duration)
92
+ counter_labels = {
93
+ code: code,
94
+ method: env['REQUEST_METHOD'].downcase,
95
+ path: strip_hostnames_from_path(env['PATH_INFO'])
96
+ }
97
+
98
+ duration_labels = {
99
+ method: env['REQUEST_METHOD'].downcase,
100
+ path: strip_hostnames_from_path(env['PATH_INFO'])
101
+ }
102
+
103
+ @requests.increment(labels: counter_labels)
104
+ @durations.observe(duration, labels: duration_labels)
105
+ rescue # rubocop:disable Style/RescueStandardError
106
+ nil
107
+ end
108
+
109
+ def strip_hostnames_from_path(path)
110
+ # Custom for /vm path - so we just collect aggrate stats for all usage along this one
111
+ # path. Custom counters are then added more specific endpoints in v1.rb
112
+ # Since we aren't parsing UID/GIDs as in the original example, these are removed.
113
+ # Similarly, request IDs are also stripped from the /ondemand path.
114
+ path
115
+ .gsub(%r{/vm/.+$}, '/vm')
116
+ .gsub(%r{/ondemand/.+$}, '/ondemand')
117
+ end
118
+ end
119
+ end
120
+ end
121
+ end
@@ -0,0 +1,40 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'rubygems' unless defined?(Gem)
4
+ require 'statsd'
5
+
6
+ module Vmpooler
7
+ class Metrics
8
+ class Statsd < Metrics
9
+ attr_reader :server, :port, :prefix
10
+
11
+ def initialize(logger, params = {})
12
+ raise ArgumentError, "Statsd server is required. Config: #{params.inspect}" if params['server'].nil? || params['server'].empty?
13
+
14
+ host = params['server']
15
+ @port = params['port'] || 8125
16
+ @prefix = params['prefix'] || 'vmpooler'
17
+ @server = ::Statsd.new(host, @port)
18
+ @logger = logger
19
+ end
20
+
21
+ def increment(label)
22
+ server.increment(prefix + '.' + label)
23
+ rescue StandardError => e
24
+ @logger.log('s', "[!] Failure incrementing #{prefix}.#{label} on statsd server [#{server}:#{port}]: #{e}")
25
+ end
26
+
27
+ def gauge(label, value)
28
+ server.gauge(prefix + '.' + label, value)
29
+ rescue StandardError => e
30
+ @logger.log('s', "[!] Failure updating gauge #{prefix}.#{label} on statsd server [#{server}:#{port}]: #{e}")
31
+ end
32
+
33
+ def timing(label, duration)
34
+ server.timing(prefix + '.' + label, duration)
35
+ rescue StandardError => e
36
+ @logger.log('s', "[!] Failure updating timing #{prefix}.#{label} on statsd server [#{server}:#{port}]: #{e}")
37
+ end
38
+ end
39
+ end
40
+ end
@@ -1,6 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require 'vmpooler/providers'
4
+ require 'vmpooler/util/parsing'
4
5
  require 'spicy-proton'
5
6
  require 'resolv' # ruby standard lib
6
7
 
@@ -149,8 +150,11 @@ module Vmpooler
149
150
  redis.pipelined do
150
151
  redis.hset("vmpooler__active__#{pool}", vm, Time.now)
151
152
  redis.hset("vmpooler__vm__#{vm}", 'checkout', Time.now)
152
- redis.hset("vmpooler__vm__#{vm}", 'token:token', ondemandrequest_hash['token:token']) if ondemandrequest_hash['token:token']
153
- redis.hset("vmpooler__vm__#{vm}", 'token:user', ondemandrequest_hash['token:user']) if ondemandrequest_hash['token:user']
153
+ if ondemandrequest_hash['token:token']
154
+ redis.hset("vmpooler__vm__#{vm}", 'token:token', ondemandrequest_hash['token:token'])
155
+ redis.hset("vmpooler__vm__#{vm}", 'token:user', ondemandrequest_hash['token:user'])
156
+ redis.hset("vmpooler__vm__#{vm}", 'lifetime', $config[:config]['vm_lifetime_auth'].to_i)
157
+ end
154
158
  redis.sadd("vmpooler__#{request_id}__#{pool_alias}__#{pool}", vm)
155
159
  end
156
160
  move_vm_queue(pool, vm, 'pending', 'running', redis)
@@ -490,15 +494,21 @@ module Vmpooler
490
494
  return if checkout.nil?
491
495
 
492
496
  user ||= 'unauthenticated'
493
- unless jenkins_build_url
494
- user = user.gsub('.', '_')
495
- $metrics.increment("usage.#{user}.#{poolname}")
497
+ user = user.gsub('.', '_')
498
+ $metrics.increment("user.#{user}.#{poolname}")
499
+
500
+ return unless jenkins_build_url
501
+
502
+ if jenkins_build_url.include? 'litmus'
503
+ # Very simple filter for Litmus jobs - just count them coming through for the moment.
504
+ $metrics.increment("usage_litmus.#{user}.#{poolname}")
496
505
  return
497
506
  end
498
507
 
499
508
  url_parts = jenkins_build_url.split('/')[2..-1]
500
- instance = url_parts[0]
509
+ jenkins_instance = url_parts[0].gsub('.', '_')
501
510
  value_stream_parts = url_parts[2].split('_')
511
+ value_stream_parts = value_stream_parts.map { |s| s.gsub('.', '_') }
502
512
  value_stream = value_stream_parts.shift
503
513
  branch = value_stream_parts.pop
504
514
  project = value_stream_parts.shift
@@ -506,22 +516,9 @@ module Vmpooler
506
516
  build_metadata_parts = url_parts[3]
507
517
  component_to_test = component_to_test('RMM_COMPONENT_TO_TEST_NAME', build_metadata_parts)
508
518
 
509
- metric_parts = [
510
- 'usage',
511
- user,
512
- instance,
513
- value_stream,
514
- branch,
515
- project,
516
- job_name,
517
- component_to_test,
518
- poolname
519
- ]
520
-
521
- metric_parts = metric_parts.reject(&:nil?)
522
- metric_parts = metric_parts.map { |s| s.gsub('.', '_') }
523
-
524
- $metrics.increment(metric_parts.join('.'))
519
+ $metrics.increment("usage_jenkins_instance.#{jenkins_instance}.#{value_stream}.#{poolname}")
520
+ $metrics.increment("usage_branch_project.#{branch}.#{project}.#{poolname}")
521
+ $metrics.increment("usage_job_component.#{job_name}.#{component_to_test}.#{poolname}")
525
522
  rescue StandardError => e
526
523
  $logger.log('d', "[!] [#{poolname}] failed while evaluating usage labels on '#{vm}' with an error: #{e}")
527
524
  raise
@@ -536,21 +533,20 @@ module Vmpooler
536
533
  next if value.nil?
537
534
  return value if key == match
538
535
  end
539
- nil
536
+ 'none'
540
537
  end
541
538
 
542
539
  def purge_unused_vms_and_folders
543
540
  global_purge = $config[:config]['purge_unconfigured_folders']
544
541
  providers = $config[:providers].keys
545
- providers.each do |provider|
546
- provider_purge = $config[:providers][provider]['purge_unconfigured_folders']
547
- provider_purge = global_purge if provider_purge.nil?
542
+ providers.each do |provider_key|
543
+ provider_purge = $config[:providers][provider_key]['purge_unconfigured_folders'] || global_purge
548
544
  if provider_purge
549
545
  Thread.new do
550
546
  begin
551
- purge_vms_and_folders($providers[provider.to_s])
547
+ purge_vms_and_folders(provider_key)
552
548
  rescue StandardError => e
553
- $logger.log('s', "[!] failed while purging provider #{provider} VMs and folders with an error: #{e}")
549
+ $logger.log('s', "[!] failed while purging provider #{provider_key} VMs and folders with an error: #{e}")
554
550
  end
555
551
  end
556
552
  end
@@ -559,14 +555,13 @@ module Vmpooler
559
555
  end
560
556
 
561
557
  # Return a list of pool folders
562
- def pool_folders(provider)
563
- provider_name = provider.name
558
+ def pool_folders(provider_name)
564
559
  folders = {}
565
560
  $config[:pools].each do |pool|
566
- next unless pool['provider'] == provider_name
561
+ next unless pool['provider'] == provider_name.to_s
567
562
 
568
563
  folder_parts = pool['folder'].split('/')
569
- datacenter = provider.get_target_datacenter_from_config(pool['name'])
564
+ datacenter = $providers[provider_name.to_s].get_target_datacenter_from_config(pool['name'])
570
565
  folders[folder_parts.pop] = "#{datacenter}/vm/#{folder_parts.join('/')}"
571
566
  end
572
567
  folders
@@ -580,8 +575,9 @@ module Vmpooler
580
575
  base.uniq
581
576
  end
582
577
 
583
- def purge_vms_and_folders(provider)
584
- configured_folders = pool_folders(provider)
578
+ def purge_vms_and_folders(provider_name)
579
+ provider = $providers[provider_name.to_s]
580
+ configured_folders = pool_folders(provider_name)
585
581
  base_folders = get_base_folders(configured_folders)
586
582
  whitelist = provider.provider_config['folder_whitelist']
587
583
  provider.purge_unconfigured_folders(base_folders, configured_folders, whitelist)
@@ -1500,9 +1496,7 @@ module Vmpooler
1500
1496
  def vms_ready?(request_id, redis)
1501
1497
  catch :request_not_ready do
1502
1498
  request_hash = redis.hgetall("vmpooler__odrequest__#{request_id}")
1503
- requested_platforms = request_hash['requested'].split(',')
1504
- requested_platforms.each do |platform|
1505
- platform_alias, pool, count = platform.split(':')
1499
+ Parsing.get_platform_pool_count(request_hash['requested']) do |platform_alias, pool, count|
1506
1500
  pools_filled = redis.scard("vmpooler__#{request_id}__#{platform_alias}__#{pool}")
1507
1501
  throw :request_not_ready unless pools_filled.to_i == count.to_i
1508
1502
  end
@@ -1554,9 +1548,7 @@ module Vmpooler
1554
1548
 
1555
1549
  def remove_vms_for_failed_request(request_id, expiration_ttl, redis)
1556
1550
  request_hash = redis.hgetall("vmpooler__odrequest__#{request_id}")
1557
- requested_platforms = request_hash['requested'].split(',')
1558
- requested_platforms.each do |platform|
1559
- platform_alias, pool, _count = platform.split(':')
1551
+ Parsing.get_platform_pool_count(request_hash['requested']) do |platform_alias, pool, _count|
1560
1552
  pools_filled = redis.smembers("vmpooler__#{request_id}__#{platform_alias}__#{pool}")
1561
1553
  redis.pipelined do
1562
1554
  pools_filled&.each do |vm|
@@ -29,7 +29,8 @@ module Vmpooler
29
29
  logger.log('d', "[#{name}] ConnPool - Creating a connection pool of size #{connpool_size} with timeout #{connpool_timeout}")
30
30
  @connection_pool = Vmpooler::PoolManager::GenericConnectionPool.new(
31
31
  metrics: metrics,
32
- metric_prefix: "#{name}_provider_connection_pool",
32
+ connpool_type: 'provider_connection_pool',
33
+ connpool_provider: name,
33
34
  size: connpool_size,
34
35
  timeout: connpool_timeout
35
36
  ) do
@@ -25,7 +25,8 @@ module Vmpooler
25
25
  logger.log('d', "[#{name}] ConnPool - Creating a connection pool of size #{connpool_size} with timeout #{connpool_timeout}")
26
26
  @connection_pool = Vmpooler::PoolManager::GenericConnectionPool.new(
27
27
  metrics: metrics,
28
- metric_prefix: "#{name}_provider_connection_pool",
28
+ connpool_type: 'provider_connection_pool',
29
+ connpool_provider: name,
29
30
  size: connpool_size,
30
31
  timeout: connpool_timeout
31
32
  ) do
@@ -298,7 +299,6 @@ module Vmpooler
298
299
  template_path = pool['template']
299
300
  target_folder_path = pool['folder']
300
301
  target_datastore = pool['datastore']
301
- target_cluster_name = get_target_cluster_from_config(pool_name)
302
302
  target_datacenter_name = get_target_datacenter_from_config(pool_name)
303
303
 
304
304
  # Get the template VM object
@@ -320,31 +320,19 @@ module Vmpooler
320
320
  ]
321
321
  )
322
322
 
323
- # Put the VM in the specified folder and resource pool
324
- relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec(
325
- datastore: find_datastore(target_datastore, connection, target_datacenter_name),
326
- diskMoveType: get_disk_backing(pool)
327
- )
328
-
329
- manage_host_selection = @config[:config]['manage_host_selection'] if @config[:config].key?('manage_host_selection')
330
- if manage_host_selection
331
- run_select_hosts(pool_name, @provider_hosts)
332
- target_host = select_next_host(pool_name, @provider_hosts)
333
- host_object = find_host_by_dnsname(connection, target_host)
334
- relocate_spec.host = host_object
335
- else
336
- # Choose a cluster/host to place the new VM on
337
- target_cluster_object = find_cluster(target_cluster_name, connection, target_datacenter_name)
338
- relocate_spec.pool = target_cluster_object.resourcePool
323
+ # Check if alternate network configuration is specified and add configuration
324
+ if pool.key?('network')
325
+ template_vm_network_device = template_vm_object.config.hardware.device.grep(RbVmomi::VIM::VirtualEthernetCard).first
326
+ network_name = pool['network']
327
+ network_device = set_network_device(target_datacenter_name, template_vm_network_device, network_name, connection)
328
+ config_spec.deviceChange = [{ operation: 'edit', device: network_device }]
339
329
  end
340
330
 
331
+ # Put the VM in the specified folder and resource pool
332
+ relocate_spec = create_relocate_spec(target_datastore, target_datacenter_name, pool_name, connection)
333
+
341
334
  # Create a clone spec
342
- clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec(
343
- location: relocate_spec,
344
- config: config_spec,
345
- powerOn: true,
346
- template: false
347
- )
335
+ clone_spec = create_clone_spec(relocate_spec, config_spec)
348
336
 
349
337
  begin
350
338
  vm_target_folder = find_vm_folder(pool_name, connection)
@@ -356,7 +344,7 @@ module Vmpooler
356
344
  raise
357
345
  end
358
346
  end
359
- raise ArgumentError, "Can not find the configured folder for #{pool_name} #{target_folder_path}" unless vm_target_folder
347
+ raise ArgumentError, "Cannot find the configured folder for #{pool_name} #{target_folder_path}" unless vm_target_folder
360
348
 
361
349
  # Create the new VM
362
350
  new_vm_object = template_vm_object.CloneVM_Task(
@@ -370,6 +358,81 @@ module Vmpooler
370
358
  vm_hash
371
359
  end
372
360
 
361
+ def create_relocate_spec(target_datastore, target_datacenter_name, pool_name, connection)
362
+ pool = pool_config(pool_name)
363
+ target_cluster_name = get_target_cluster_from_config(pool_name)
364
+
365
+ relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec(
366
+ datastore: find_datastore(target_datastore, connection, target_datacenter_name),
367
+ diskMoveType: get_disk_backing(pool)
368
+ )
369
+ manage_host_selection = @config[:config]['manage_host_selection'] if @config[:config].key?('manage_host_selection')
370
+ if manage_host_selection
371
+ run_select_hosts(pool_name, @provider_hosts)
372
+ target_host = select_next_host(pool_name, @provider_hosts)
373
+ host_object = find_host_by_dnsname(connection, target_host)
374
+ relocate_spec.host = host_object
375
+ else
376
+ # Choose a cluster/host to place the new VM on
377
+ target_cluster_object = find_cluster(target_cluster_name, connection, target_datacenter_name)
378
+ relocate_spec.pool = target_cluster_object.resourcePool
379
+ end
380
+ relocate_spec
381
+ end
382
+
383
+ def create_clone_spec(relocate_spec, config_spec)
384
+ RbVmomi::VIM.VirtualMachineCloneSpec(
385
+ location: relocate_spec,
386
+ config: config_spec,
387
+ powerOn: true,
388
+ template: false
389
+ )
390
+ end
391
+
392
+ def set_network_device(datacenter_name, template_vm_network_device, network_name, connection)
393
+ # Retrieve network object
394
+ datacenter = connection.serviceInstance.find_datacenter(datacenter_name)
395
+ new_network = datacenter.network.find { |n| n.name == network_name }
396
+
397
+ raise("Cannot find network #{network_name} in datacenter #{datacenter_name}") unless new_network
398
+
399
+ # Determine network device type
400
+ # All possible device type options here: https://vdc-download.vmware.com/vmwb-repository/dcr-public/98d63b35-d822-47fe-a87a-ddefd469df06/2e3c7b58-f2bd-486e-8bb1-a75eb0640bee/doc/vim.vm.device.VirtualEthernetCard.html
401
+ network_device =
402
+ if template_vm_network_device.is_a? RbVmomi::VIM::VirtualVmxnet2
403
+ RbVmomi::VIM.VirtualVmxnet2
404
+ elsif template_vm_network_device.is_a? RbVmomi::VIM::VirtualVmxnet3
405
+ RbVmomi::VIM.VirtualVmxnet3
406
+ elsif template_vm_network_device.is_a? RbVmomi::VIM::VirtualE1000
407
+ RbVmomi::VIM.VirtualE1000
408
+ elsif template_vm_network_device.is_a? RbVmomi::VIM::VirtualE1000e
409
+ RbVmomi::VIM.VirtualE1000e
410
+ elsif template_vm_network_device.is_a? RbVmomi::VIM::VirtualSriovEthernetCard
411
+ RbVmomi::VIM.VirtualSriovEthernetCard
412
+ else
413
+ RbVmomi::VIM.VirtualPCNet32
414
+ end
415
+
416
+ # Set up new network device attributes
417
+ network_device.key = template_vm_network_device.key
418
+ network_device.deviceInfo = RbVmomi::VIM.Description(
419
+ label: template_vm_network_device.deviceInfo.label,
420
+ summary: network_name
421
+ )
422
+ network_device.backing = RbVmomi::VIM.VirtualEthernetCardNetworkBackingInfo(
423
+ deviceName: network_name,
424
+ network: new_network,
425
+ useAutoDetect: false
426
+ )
427
+ network_device.addressType = 'assigned'
428
+ network_device.connectable = RbVmomi::VIM.VirtualDeviceConnectInfo(
429
+ allowGuestControl: true,
430
+ startConnected: true,
431
+ connected: true
432
+ )
433
+ network_device
434
+ end
435
+
373
436
  def create_disk(pool_name, vm_name, disk_size)
374
437
  pool = pool_config(pool_name)
375
438
  raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil?