vmpooler 0.14.0 → 0.14.5
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/vmpooler/api.rb +2 -2
- data/lib/vmpooler/api/v1.rb +35 -25
- data/lib/vmpooler/metrics/promstats.rb +124 -34
- data/lib/vmpooler/metrics/promstats/collector_middleware.rb +3 -0
- data/lib/vmpooler/pool_manager.rb +16 -14
- data/lib/vmpooler/version.rb +1 -1
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 3a00a8643a9f748b6e4969c1704d609f43ea2e6fbcf733f75d80b9710f2e0887
|
4
|
+
data.tar.gz: 679f9ec5fda317ee478c7e23c39a81d95efd5f0ae961f7d071b94f7ef5977af2
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: acc4f1a92edf4c6b4ee86bc337429ec23dc173ab74405986e82bdd096054aca3f372c8552fa2dda020f29281b9ec4627c02e4be7367a7a6ab124414b0c90273a
|
7
|
+
data.tar.gz: 20021a38a843d2b4e82c46ad4206119692023a42e67f207d688402ac140fe8496cc27be157575bb2d785cf2cc28af0d4368654d73f7f2575af789075b7dbc3e7
|
data/lib/vmpooler/api.rb
CHANGED
@@ -38,8 +38,8 @@ module Vmpooler
|
|
38
38
|
# Using customised collector that filters out hostnames on API paths
|
39
39
|
require 'vmpooler/metrics/promstats/collector_middleware'
|
40
40
|
require 'prometheus/middleware/exporter'
|
41
|
-
use Vmpooler::Metrics::Promstats::CollectorMiddleware, metrics_prefix: "#{metrics.
|
42
|
-
use Prometheus::Middleware::Exporter, path: metrics.
|
41
|
+
use Vmpooler::Metrics::Promstats::CollectorMiddleware, metrics_prefix: "#{metrics.prometheus_prefix}_http"
|
42
|
+
use Prometheus::Middleware::Exporter, path: metrics.prometheus_endpoint
|
43
43
|
end
|
44
44
|
|
45
45
|
if torun.include? :api
|
data/lib/vmpooler/api/v1.rb
CHANGED
@@ -89,18 +89,16 @@ module Vmpooler
|
|
89
89
|
template_backends += aliases
|
90
90
|
weighted_pools = get_pool_weights(template_backends)
|
91
91
|
|
92
|
-
|
93
|
-
|
94
|
-
|
92
|
+
if weighted_pools.count > 1 && weighted_pools.count == template_backends.count
|
93
|
+
pickup = Pickup.new(weighted_pools)
|
94
|
+
count.to_i.times do
|
95
95
|
selection << pickup.pick
|
96
|
-
|
96
|
+
end
|
97
|
+
else
|
98
|
+
count.to_i.times do
|
97
99
|
selection << template_backends.sample
|
98
100
|
end
|
99
101
|
end
|
100
|
-
else
|
101
|
-
count.to_i.times do
|
102
|
-
selection << template
|
103
|
-
end
|
104
102
|
end
|
105
103
|
|
106
104
|
count_selection(selection)
|
@@ -809,7 +807,7 @@ module Vmpooler
|
|
809
807
|
|
810
808
|
post "#{api_prefix}/ondemandvm/?" do
|
811
809
|
content_type :json
|
812
|
-
metrics.increment('
|
810
|
+
metrics.increment('http_requests_vm_total.post.ondemand.requestid')
|
813
811
|
|
814
812
|
need_token! if Vmpooler::API.settings.config[:auth]
|
815
813
|
|
@@ -847,7 +845,7 @@ module Vmpooler
|
|
847
845
|
post "#{api_prefix}/ondemandvm/:template/?" do
|
848
846
|
content_type :json
|
849
847
|
result = { 'ok' => false }
|
850
|
-
metrics.increment('
|
848
|
+
metrics.increment('http_requests_vm_total.delete.ondemand.template')
|
851
849
|
|
852
850
|
need_token! if Vmpooler::API.settings.config[:auth]
|
853
851
|
|
@@ -874,7 +872,7 @@ module Vmpooler
|
|
874
872
|
|
875
873
|
get "#{api_prefix}/ondemandvm/:requestid/?" do
|
876
874
|
content_type :json
|
877
|
-
metrics.increment('
|
875
|
+
metrics.increment('http_requests_vm_total.get.ondemand.request')
|
878
876
|
|
879
877
|
status 404
|
880
878
|
result = check_ondemand_request(params[:requestid])
|
@@ -885,7 +883,7 @@ module Vmpooler
|
|
885
883
|
delete "#{api_prefix}/ondemandvm/:requestid/?" do
|
886
884
|
content_type :json
|
887
885
|
need_token! if Vmpooler::API.settings.config[:auth]
|
888
|
-
metrics.increment('
|
886
|
+
metrics.increment('http_requests_vm_total.delete.ondemand.request')
|
889
887
|
|
890
888
|
status 404
|
891
889
|
result = delete_ondemand_request(params[:requestid])
|
@@ -896,7 +894,7 @@ module Vmpooler
|
|
896
894
|
post "#{api_prefix}/vm/?" do
|
897
895
|
content_type :json
|
898
896
|
result = { 'ok' => false }
|
899
|
-
metrics.increment('
|
897
|
+
metrics.increment('http_requests_vm_total.post.vm.checkout')
|
900
898
|
|
901
899
|
payload = JSON.parse(request.body.read)
|
902
900
|
|
@@ -986,7 +984,12 @@ module Vmpooler
|
|
986
984
|
result['ready'] = true
|
987
985
|
Parsing.get_platform_pool_count(request_hash['requested']) do |platform_alias, pool, _count|
|
988
986
|
instances = backend.smembers("vmpooler__#{request_id}__#{platform_alias}__#{pool}")
|
989
|
-
|
987
|
+
|
988
|
+
if result.key?(platform_alias)
|
989
|
+
result[platform_alias][:hostname] = result[platform_alias][:hostname] + instances
|
990
|
+
else
|
991
|
+
result[platform_alias] = { 'hostname': instances }
|
992
|
+
end
|
990
993
|
end
|
991
994
|
result['domain'] = config['domain'] if config['domain']
|
992
995
|
status 200
|
@@ -999,10 +1002,17 @@ module Vmpooler
|
|
999
1002
|
else
|
1000
1003
|
Parsing.get_platform_pool_count(request_hash['requested']) do |platform_alias, pool, count|
|
1001
1004
|
instance_count = backend.scard("vmpooler__#{request_id}__#{platform_alias}__#{pool}")
|
1002
|
-
|
1003
|
-
|
1004
|
-
|
1005
|
-
|
1005
|
+
instances_pending = count.to_i - instance_count.to_i
|
1006
|
+
|
1007
|
+
if result.key?(platform_alias) && result[platform_alias].key?(:ready)
|
1008
|
+
result[platform_alias][:ready] = (result[platform_alias][:ready].to_i + instance_count).to_s
|
1009
|
+
result[platform_alias][:pending] = (result[platform_alias][:pending].to_i + instances_pending).to_s
|
1010
|
+
else
|
1011
|
+
result[platform_alias] = {
|
1012
|
+
'ready': instance_count.to_s,
|
1013
|
+
'pending': instances_pending.to_s
|
1014
|
+
}
|
1015
|
+
end
|
1006
1016
|
end
|
1007
1017
|
end
|
1008
1018
|
|
@@ -1039,7 +1049,7 @@ module Vmpooler
|
|
1039
1049
|
post "#{api_prefix}/vm/:template/?" do
|
1040
1050
|
content_type :json
|
1041
1051
|
result = { 'ok' => false }
|
1042
|
-
metrics.increment('
|
1052
|
+
metrics.increment('http_requests_vm_total.get.vm.template')
|
1043
1053
|
|
1044
1054
|
payload = extract_templates_from_query_params(params[:template])
|
1045
1055
|
|
@@ -1063,7 +1073,7 @@ module Vmpooler
|
|
1063
1073
|
|
1064
1074
|
get "#{api_prefix}/vm/:hostname/?" do
|
1065
1075
|
content_type :json
|
1066
|
-
metrics.increment('
|
1076
|
+
metrics.increment('http_requests_vm_total.get.vm.hostname')
|
1067
1077
|
|
1068
1078
|
result = {}
|
1069
1079
|
|
@@ -1136,7 +1146,7 @@ module Vmpooler
|
|
1136
1146
|
|
1137
1147
|
delete "#{api_prefix}/vm/:hostname/?" do
|
1138
1148
|
content_type :json
|
1139
|
-
metrics.increment('
|
1149
|
+
metrics.increment('http_requests_vm_total.delete.vm.hostname')
|
1140
1150
|
|
1141
1151
|
result = {}
|
1142
1152
|
|
@@ -1165,7 +1175,7 @@ module Vmpooler
|
|
1165
1175
|
|
1166
1176
|
put "#{api_prefix}/vm/:hostname/?" do
|
1167
1177
|
content_type :json
|
1168
|
-
metrics.increment('
|
1178
|
+
metrics.increment('http_requests_vm_total.put.vm.modify')
|
1169
1179
|
|
1170
1180
|
status 404
|
1171
1181
|
result = { 'ok' => false }
|
@@ -1242,7 +1252,7 @@ module Vmpooler
|
|
1242
1252
|
|
1243
1253
|
post "#{api_prefix}/vm/:hostname/disk/:size/?" do
|
1244
1254
|
content_type :json
|
1245
|
-
metrics.increment('
|
1255
|
+
metrics.increment('http_requests_vm_total.post.vm.disksize')
|
1246
1256
|
|
1247
1257
|
need_token! if Vmpooler::API.settings.config[:auth]
|
1248
1258
|
|
@@ -1266,7 +1276,7 @@ module Vmpooler
|
|
1266
1276
|
|
1267
1277
|
post "#{api_prefix}/vm/:hostname/snapshot/?" do
|
1268
1278
|
content_type :json
|
1269
|
-
metrics.increment('
|
1279
|
+
metrics.increment('http_requests_vm_total.post.vm.snapshot')
|
1270
1280
|
|
1271
1281
|
need_token! if Vmpooler::API.settings.config[:auth]
|
1272
1282
|
|
@@ -1292,7 +1302,7 @@ module Vmpooler
|
|
1292
1302
|
|
1293
1303
|
post "#{api_prefix}/vm/:hostname/snapshot/:snapshot/?" do
|
1294
1304
|
content_type :json
|
1295
|
-
metrics.increment('
|
1305
|
+
metrics.increment('http_requests_vm_total.post.vm.disksize')
|
1296
1306
|
|
1297
1307
|
need_token! if Vmpooler::API.settings.config[:auth]
|
1298
1308
|
|
@@ -5,7 +5,7 @@ require 'prometheus/client'
|
|
5
5
|
module Vmpooler
|
6
6
|
class Metrics
|
7
7
|
class Promstats < Metrics
|
8
|
-
attr_reader :prefix, :
|
8
|
+
attr_reader :prefix, :prometheus_endpoint, :prometheus_prefix
|
9
9
|
|
10
10
|
# Constants for Metric Types
|
11
11
|
M_COUNTER = 1
|
@@ -24,22 +24,135 @@ module Vmpooler
|
|
24
24
|
|
25
25
|
def initialize(logger, params = {})
|
26
26
|
@prefix = params['prefix'] || 'vmpooler'
|
27
|
-
@
|
28
|
-
@
|
27
|
+
@prometheus_prefix = params['prometheus_prefix'] || 'vmpooler'
|
28
|
+
@prometheus_endpoint = params['prometheus_endpoint'] || '/prometheus'
|
29
29
|
@logger = logger
|
30
30
|
|
31
31
|
# Setup up prometheus registry and data structures
|
32
32
|
@prometheus = Prometheus::Client.registry
|
33
33
|
end
|
34
34
|
|
35
|
-
|
35
|
+
=begin # rubocop:disable Style/BlockComments
|
36
|
+
The Metrics table is used to register metrics and translate/interpret the incoming metrics.
|
37
|
+
|
38
|
+
This table describes all of the prometheus metrics that are recognised by the application.
|
39
|
+
The background documentation for defining metrics is at: https://prometheus.io/docs/introduction/
|
40
|
+
In particular, the naming practices should be adhered to: https://prometheus.io/docs/practices/naming/
|
41
|
+
The Ruby Client docs are also useful: https://github.com/prometheus/client_ruby
|
42
|
+
|
43
|
+
The table here allows the currently used stats definitions to be translated correctly for Prometheus.
|
44
|
+
The current format is of the form A.B.C, where the final fields may be actual values (e.g. poolname).
|
45
|
+
Prometheus metrics cannot use the '.' as a character, so this is either translated into '_' or
|
46
|
+
variable parameters are expressed as labels accompanying the metric.
|
47
|
+
|
48
|
+
Sample statistics are:
|
49
|
+
# Example showing hostnames (FQDN)
|
50
|
+
migrate_from.pix-jj26-chassis1-2.ops.puppetlabs.net
|
51
|
+
migrate_to.pix-jj26-chassis1-8.ops.puppetlabs.net
|
52
|
+
|
53
|
+
# Example showing poolname as a parameter
|
54
|
+
poolreset.invalid.centos-8-x86_64
|
55
|
+
|
56
|
+
# Examples showing similar sub-typed checkout stats
|
57
|
+
checkout.empty.centos-8-x86_64
|
58
|
+
checkout.invalid.centos-8-x86_64
|
59
|
+
checkout.invalid.unknown
|
60
|
+
checkout.success.centos-8-x86_64
|
61
|
+
|
62
|
+
# Stats without any final parameter.
|
63
|
+
connect.fail
|
64
|
+
connect.open
|
65
|
+
delete.failed
|
66
|
+
delete.success
|
67
|
+
|
68
|
+
# Stats with multiple param_labels
|
69
|
+
vmpooler_user.debian-8-x86_64-pixa4.john
|
70
|
+
|
71
|
+
The metrics implementation here preserves the existing framework which will continue to support
|
72
|
+
graphite and statsd (since vmpooler is used outside of puppet). Some rationalisation and renaming
|
73
|
+
of the actual metrics was done to get a more usable model to fit within the prometheus framework.
|
74
|
+
This particularly applies to the user stats collected once individual machines are terminated as
|
75
|
+
this would have challenged prometheus' ability due to multiple (8) parameters being collected
|
76
|
+
in a single measure (which has a very high cardinality).
|
77
|
+
|
78
|
+
Prometheus requires all metrics to be pre-registered (which is the primary reason for this
|
79
|
+
table) and also uses labels to differentiate the characteristics of the measurement. This
|
80
|
+
is used throughout to capture information such as poolnames. So for example, this is a sample
|
81
|
+
of the prometheus metrics generated for the "vmpooler_ready" measurement:
|
82
|
+
|
83
|
+
# TYPE vmpooler_ready gauge
|
84
|
+
# HELP vmpooler_ready vmpooler number of machines in ready State
|
85
|
+
vmpooler_ready{vmpooler_instance="vmpooler",poolname="win-10-ent-x86_64-pixa4"} 2.0
|
86
|
+
vmpooler_ready{vmpooler_instance="vmpooler",poolname="debian-8-x86_64-pixa4"} 2.0
|
87
|
+
vmpooler_ready{vmpooler_instance="vmpooler",poolname="centos-8-x86_64-pixa4"} 2.0
|
88
|
+
|
89
|
+
Prometheus supports the following metric types:
|
90
|
+
(see https://prometheus.io/docs/concepts/metric_types/)
|
91
|
+
|
92
|
+
Counter (increment):
|
93
|
+
A counter is a cumulative metric that represents a single monotonically increasing counter whose
|
94
|
+
value can only increase or be reset to zero on restart
|
95
|
+
|
96
|
+
Gauge:
|
97
|
+
A gauge is a metric that represents a single numerical value that can arbitrarily go up and down.
|
98
|
+
|
99
|
+
Histogram:
|
100
|
+
A histogram samples observations (usually things like request durations or response sizes) and
|
101
|
+
counts them in configurable buckets. It also provides a sum of all observed values.
|
102
|
+
This replaces the timer metric supported by statsd
|
103
|
+
|
104
|
+
Summary :
|
105
|
+
Summary provides a total count of observations and a sum of all observed values, it calculates
|
106
|
+
configurable quantiles over a sliding time window.
|
107
|
+
(Summary is not used in vmpooler)
|
108
|
+
|
109
|
+
vmpooler_metrics_table is a table of hashes, where the hash key represents the first part of the
|
110
|
+
metric name, e.g. for the metric 'delete.*' (see above) the key would be 'delete:'. "Sub-metrics",
|
111
|
+
are supported, again for the 'delete.*' example, this can be subbed into '.failed' and '.success'
|
112
|
+
|
113
|
+
The entries within the hash as are follows:
|
114
|
+
|
115
|
+
mtype:
|
116
|
+
Metric type, which is one of the following constants:
|
117
|
+
M_COUNTER = 1
|
118
|
+
M_GAUGE = 2
|
119
|
+
M_SUMMARY = 3
|
120
|
+
M_HISTOGRAM = 4
|
121
|
+
|
122
|
+
torun:
|
123
|
+
Indicates which process the metric is for - within vmpooler this is either ':api' or ':manager'
|
124
|
+
(there is a suggestion that we change this to two separate tables).
|
125
|
+
|
126
|
+
docstring:
|
127
|
+
Documentation string for the metric - this is displayed as HELP text by the endpoint.
|
128
|
+
|
129
|
+
metric_suffixes:
|
130
|
+
Array of sub-metrics of the form 'sub-metric: "doc-string for sub-metric"'. This supports
|
131
|
+
the generation of individual sub-metrics for all elements in the array.
|
132
|
+
|
133
|
+
param_labels:
|
134
|
+
This is an optional array of symbols for the final labels in a metric. It should not be
|
135
|
+
specified if there are no additional parameters.
|
136
|
+
|
137
|
+
If it specified, it can either be a single symbol, or two or more symbols. The treatment
|
138
|
+
differs if there is only one symbol given as all of the remainder of the metric string
|
139
|
+
supplied is collected into a label with the symbol name. This allows the handling of
|
140
|
+
node names (FQDN).
|
141
|
+
|
142
|
+
To illustrate:
|
143
|
+
1. In the 'connect.*' or 'delete.*' example above, it should not be specified.
|
144
|
+
2. For the 'migrate_from.*' example above, the remainder of the measure is collected
|
145
|
+
as the 'host_name' label.
|
146
|
+
3. For the 'vmpooler_user' example above, the first parameter is treated as the pool
|
147
|
+
name, and the second as the username.
|
148
|
+
|
149
|
+
=end
|
36
150
|
def vmpooler_metrics_table
|
37
151
|
{
|
38
152
|
errors: {
|
39
153
|
mtype: M_COUNTER,
|
40
154
|
torun: %i[manager],
|
41
155
|
docstring: 'Count of errors for pool',
|
42
|
-
prom_metric_prefix: "#{@metrics_prefix}_errors",
|
43
156
|
metric_suffixes: {
|
44
157
|
markedasfailed: 'timeout waiting for instance to initialise',
|
45
158
|
duplicatehostname: 'unable to create instance due to duplicate hostname',
|
@@ -51,42 +164,36 @@ module Vmpooler
|
|
51
164
|
mtype: M_COUNTER,
|
52
165
|
torun: %i[manager],
|
53
166
|
docstring: 'Number of pool instances this user created created',
|
54
|
-
prom_metric_prefix: "#{@metrics_prefix}_user",
|
55
167
|
param_labels: %i[user poolname]
|
56
168
|
},
|
57
169
|
usage_litmus: {
|
58
170
|
mtype: M_COUNTER,
|
59
171
|
torun: %i[manager],
|
60
172
|
docstring: 'Pools by Litmus job usage',
|
61
|
-
prom_metric_prefix: "#{@metrics_prefix}_usage_litmus",
|
62
173
|
param_labels: %i[user poolname]
|
63
174
|
},
|
64
175
|
usage_jenkins_instance: {
|
65
176
|
mtype: M_COUNTER,
|
66
177
|
torun: %i[manager],
|
67
178
|
docstring: 'Pools by Jenkins instance usage',
|
68
|
-
prom_metric_prefix: "#{@metrics_prefix}_usage_jenkins_instance",
|
69
179
|
param_labels: %i[jenkins_instance value_stream poolname]
|
70
180
|
},
|
71
181
|
usage_branch_project: {
|
72
182
|
mtype: M_COUNTER,
|
73
183
|
torun: %i[manager],
|
74
184
|
docstring: 'Pools by branch/project usage',
|
75
|
-
prom_metric_prefix: "#{@metrics_prefix}_usage_branch_project",
|
76
185
|
param_labels: %i[branch project poolname]
|
77
186
|
},
|
78
187
|
usage_job_component: {
|
79
188
|
mtype: M_COUNTER,
|
80
189
|
torun: %i[manager],
|
81
190
|
docstring: 'Pools by job/component usage',
|
82
|
-
prom_metric_prefix: "#{@metrics_prefix}_usage_job_component",
|
83
191
|
param_labels: %i[job_name component_to_test poolname]
|
84
192
|
},
|
85
193
|
checkout: {
|
86
194
|
mtype: M_COUNTER,
|
87
195
|
torun: %i[api],
|
88
196
|
docstring: 'Pool checkout counts',
|
89
|
-
prom_metric_prefix: "#{@metrics_prefix}_checkout",
|
90
197
|
metric_suffixes: {
|
91
198
|
nonresponsive: 'checkout failed - non responsive machine',
|
92
199
|
empty: 'checkout failed - no machine',
|
@@ -99,7 +206,6 @@ module Vmpooler
|
|
99
206
|
mtype: M_COUNTER,
|
100
207
|
torun: %i[api],
|
101
208
|
docstring: 'Delete machine',
|
102
|
-
prom_metric_prefix: "#{@metrics_prefix}_delete",
|
103
209
|
metric_suffixes: {
|
104
210
|
success: 'succeeded',
|
105
211
|
failed: 'failed'
|
@@ -110,7 +216,6 @@ module Vmpooler
|
|
110
216
|
mtype: M_COUNTER,
|
111
217
|
torun: %i[api],
|
112
218
|
docstring: 'Ondemand request',
|
113
|
-
prom_metric_prefix: "#{@metrics_prefix}_ondemandrequest_generate",
|
114
219
|
metric_suffixes: {
|
115
220
|
duplicaterequests: 'failed duplicate request',
|
116
221
|
success: 'succeeded'
|
@@ -121,7 +226,6 @@ module Vmpooler
|
|
121
226
|
mtype: M_COUNTER,
|
122
227
|
torun: %i[api],
|
123
228
|
docstring: 'Ondemand request failure',
|
124
|
-
prom_metric_prefix: "#{@metrics_prefix}_ondemandrequest_fail",
|
125
229
|
metric_suffixes: {
|
126
230
|
toomanyrequests: 'too many requests',
|
127
231
|
invalid: 'invalid poolname'
|
@@ -132,7 +236,6 @@ module Vmpooler
|
|
132
236
|
mtype: M_COUNTER,
|
133
237
|
torun: %i[api],
|
134
238
|
docstring: 'vmpooler pool configuration request',
|
135
|
-
prom_metric_prefix: "#{@metrics_prefix}_config",
|
136
239
|
metric_suffixes: { invalid: 'Invalid' },
|
137
240
|
param_labels: %i[poolname]
|
138
241
|
},
|
@@ -140,7 +243,6 @@ module Vmpooler
|
|
140
243
|
mtype: M_COUNTER,
|
141
244
|
torun: %i[api],
|
142
245
|
docstring: 'Pool reset counter',
|
143
|
-
prom_metric_prefix: "#{@metrics_prefix}_poolreset",
|
144
246
|
metric_suffixes: { invalid: 'Invalid Pool' },
|
145
247
|
param_labels: %i[poolname]
|
146
248
|
},
|
@@ -148,7 +250,6 @@ module Vmpooler
|
|
148
250
|
mtype: M_COUNTER,
|
149
251
|
torun: %i[manager],
|
150
252
|
docstring: 'vmpooler connect (to vSphere)',
|
151
|
-
prom_metric_prefix: "#{@metrics_prefix}_connect",
|
152
253
|
metric_suffixes: {
|
153
254
|
open: 'Connect Succeeded',
|
154
255
|
fail: 'Connect Failed'
|
@@ -159,42 +260,36 @@ module Vmpooler
|
|
159
260
|
mtype: M_COUNTER,
|
160
261
|
torun: %i[manager],
|
161
262
|
docstring: 'vmpooler machine migrated from',
|
162
|
-
prom_metric_prefix: "#{@metrics_prefix}_migrate_from",
|
163
263
|
param_labels: %i[host_name]
|
164
264
|
},
|
165
265
|
migrate_to: {
|
166
266
|
mtype: M_COUNTER,
|
167
267
|
torun: %i[manager],
|
168
268
|
docstring: 'vmpooler machine migrated to',
|
169
|
-
prom_metric_prefix: "#{@metrics_prefix}_migrate_to",
|
170
269
|
param_labels: %i[host_name]
|
171
270
|
},
|
172
|
-
|
271
|
+
http_requests_vm_total: {
|
173
272
|
mtype: M_COUNTER,
|
174
273
|
torun: %i[api],
|
175
274
|
docstring: 'Total number of HTTP request/sub-operations handled by the Rack application under the /vm endpoint',
|
176
|
-
prom_metric_prefix: "#{@metrics_prefix}_http_requests_vm_total",
|
177
275
|
param_labels: %i[method subpath operation]
|
178
276
|
},
|
179
277
|
ready: {
|
180
278
|
mtype: M_GAUGE,
|
181
279
|
torun: %i[manager],
|
182
280
|
docstring: 'vmpooler number of machines in ready State',
|
183
|
-
prom_metric_prefix: "#{@metrics_prefix}_ready",
|
184
281
|
param_labels: %i[poolname]
|
185
282
|
},
|
186
283
|
running: {
|
187
284
|
mtype: M_GAUGE,
|
188
285
|
torun: %i[manager],
|
189
286
|
docstring: 'vmpooler number of machines running',
|
190
|
-
prom_metric_prefix: "#{@metrics_prefix}_running",
|
191
287
|
param_labels: %i[poolname]
|
192
288
|
},
|
193
289
|
connection_available: {
|
194
290
|
mtype: M_GAUGE,
|
195
291
|
torun: %i[manager],
|
196
292
|
docstring: 'vmpooler redis connections available',
|
197
|
-
prom_metric_prefix: "#{@metrics_prefix}_connection_available",
|
198
293
|
param_labels: %i[type provider]
|
199
294
|
},
|
200
295
|
time_to_ready_state: {
|
@@ -202,7 +297,6 @@ module Vmpooler
|
|
202
297
|
torun: %i[manager],
|
203
298
|
buckets: POOLER_READY_TIME_BUCKETS,
|
204
299
|
docstring: 'Time taken for machine to read ready state for pool',
|
205
|
-
prom_metric_prefix: "#{@metrics_prefix}_time_to_ready_state",
|
206
300
|
param_labels: %i[poolname]
|
207
301
|
},
|
208
302
|
migrate: {
|
@@ -210,7 +304,6 @@ module Vmpooler
|
|
210
304
|
torun: %i[manager],
|
211
305
|
buckets: POOLER_CLONE_TIME_BUCKETS,
|
212
306
|
docstring: 'vmpooler time taken to migrate machine for pool',
|
213
|
-
prom_metric_prefix: "#{@metrics_prefix}_migrate",
|
214
307
|
param_labels: %i[poolname]
|
215
308
|
},
|
216
309
|
clone: {
|
@@ -218,7 +311,6 @@ module Vmpooler
|
|
218
311
|
torun: %i[manager],
|
219
312
|
buckets: POOLER_CLONE_TIME_BUCKETS,
|
220
313
|
docstring: 'vmpooler time taken to clone machine',
|
221
|
-
prom_metric_prefix: "#{@metrics_prefix}_clone",
|
222
314
|
param_labels: %i[poolname]
|
223
315
|
},
|
224
316
|
destroy: {
|
@@ -226,7 +318,6 @@ module Vmpooler
|
|
226
318
|
torun: %i[manager],
|
227
319
|
buckets: POOLER_CLONE_TIME_BUCKETS,
|
228
320
|
docstring: 'vmpooler time taken to destroy machine',
|
229
|
-
prom_metric_prefix: "#{@metrics_prefix}_destroy",
|
230
321
|
param_labels: %i[poolname]
|
231
322
|
},
|
232
323
|
connection_waited: {
|
@@ -234,7 +325,6 @@ module Vmpooler
|
|
234
325
|
torun: %i[manager],
|
235
326
|
buckets: REDIS_CONNECT_BUCKETS,
|
236
327
|
docstring: 'vmpooler redis connection wait time',
|
237
|
-
prom_metric_prefix: "#{@metrics_prefix}_connection_waited",
|
238
328
|
param_labels: %i[type provider]
|
239
329
|
}
|
240
330
|
}
|
@@ -279,7 +369,7 @@ module Vmpooler
|
|
279
369
|
|
280
370
|
def setup_prometheus_metrics(torun)
|
281
371
|
@p_metrics = vmpooler_metrics_table
|
282
|
-
@p_metrics.each do |
|
372
|
+
@p_metrics.each do |name, metric_spec|
|
283
373
|
# Only register metrics appropriate to api or manager
|
284
374
|
next if (torun & metric_spec[:torun]).empty?
|
285
375
|
|
@@ -288,7 +378,7 @@ module Vmpooler
|
|
288
378
|
metric_spec[:metric_suffixes].each do |metric_suffix|
|
289
379
|
add_prometheus_metric(
|
290
380
|
metric_spec,
|
291
|
-
"#{
|
381
|
+
"#{@prometheus_prefix}_#{name}_#{metric_suffix[0]}",
|
292
382
|
"#{metric_spec[:docstring]} #{metric_suffix[1]}"
|
293
383
|
)
|
294
384
|
end
|
@@ -296,7 +386,7 @@ module Vmpooler
|
|
296
386
|
# No Additional counter suffixes so register this as metric.
|
297
387
|
add_prometheus_metric(
|
298
388
|
metric_spec,
|
299
|
-
|
389
|
+
"#{@prometheus_prefix}_#{name}",
|
300
390
|
metric_spec[:docstring]
|
301
391
|
)
|
302
392
|
end
|
@@ -315,9 +405,9 @@ module Vmpooler
|
|
315
405
|
metric_subkey = sublabels.shift.to_sym
|
316
406
|
raise("Invalid Metric #{metric_key}_#{metric_subkey} for #{label}") unless metric[:metric_suffixes].key? metric_subkey.to_sym
|
317
407
|
|
318
|
-
metric[:metric_name] = "#{
|
408
|
+
metric[:metric_name] = "#{@prometheus_prefix}_#{metric_key}_#{metric_subkey}"
|
319
409
|
else
|
320
|
-
metric[:metric_name] =
|
410
|
+
metric[:metric_name] = "#{@prometheus_prefix}_#{metric_key}"
|
321
411
|
end
|
322
412
|
|
323
413
|
# Check if we are looking for a parameter value at last element.
|
@@ -150,8 +150,11 @@ module Vmpooler
|
|
150
150
|
redis.pipelined do
|
151
151
|
redis.hset("vmpooler__active__#{pool}", vm, Time.now)
|
152
152
|
redis.hset("vmpooler__vm__#{vm}", 'checkout', Time.now)
|
153
|
-
|
154
|
-
|
153
|
+
if ondemandrequest_hash['token:token']
|
154
|
+
redis.hset("vmpooler__vm__#{vm}", 'token:token', ondemandrequest_hash['token:token'])
|
155
|
+
redis.hset("vmpooler__vm__#{vm}", 'token:user', ondemandrequest_hash['token:user'])
|
156
|
+
redis.hset("vmpooler__vm__#{vm}", 'lifetime', $config[:config]['vm_lifetime_auth'].to_i)
|
157
|
+
end
|
155
158
|
redis.sadd("vmpooler__#{request_id}__#{pool_alias}__#{pool}", vm)
|
156
159
|
end
|
157
160
|
move_vm_queue(pool, vm, 'pending', 'running', redis)
|
@@ -365,7 +368,7 @@ module Vmpooler
|
|
365
368
|
$metrics.increment("errors.duplicatehostname.#{pool_name}")
|
366
369
|
$logger.log('s', "[!] [#{pool_name}] Generated hostname #{hostname} was not unique (attempt \##{hostname_retries} of #{max_hostname_retries})")
|
367
370
|
elsif !dns_available
|
368
|
-
$metrics.increment("errors.staledns.#{
|
371
|
+
$metrics.increment("errors.staledns.#{pool_name}")
|
369
372
|
$logger.log('s', "[!] [#{pool_name}] Generated hostname #{hostname} already exists in DNS records (#{dns_ip}), stale DNS")
|
370
373
|
end
|
371
374
|
end
|
@@ -536,15 +539,14 @@ module Vmpooler
|
|
536
539
|
def purge_unused_vms_and_folders
|
537
540
|
global_purge = $config[:config]['purge_unconfigured_folders']
|
538
541
|
providers = $config[:providers].keys
|
539
|
-
providers.each do |
|
540
|
-
provider_purge = $config[:providers][
|
541
|
-
provider_purge = global_purge if provider_purge.nil?
|
542
|
+
providers.each do |provider_key|
|
543
|
+
provider_purge = $config[:providers][provider_key]['purge_unconfigured_folders'] || global_purge
|
542
544
|
if provider_purge
|
543
545
|
Thread.new do
|
544
546
|
begin
|
545
|
-
purge_vms_and_folders(
|
547
|
+
purge_vms_and_folders(provider_key)
|
546
548
|
rescue StandardError => e
|
547
|
-
$logger.log('s', "[!] failed while purging provider #{
|
549
|
+
$logger.log('s', "[!] failed while purging provider #{provider_key} VMs and folders with an error: #{e}")
|
548
550
|
end
|
549
551
|
end
|
550
552
|
end
|
@@ -553,14 +555,13 @@ module Vmpooler
|
|
553
555
|
end
|
554
556
|
|
555
557
|
# Return a list of pool folders
|
556
|
-
def pool_folders(
|
557
|
-
provider_name = provider.name
|
558
|
+
def pool_folders(provider_name)
|
558
559
|
folders = {}
|
559
560
|
$config[:pools].each do |pool|
|
560
|
-
next unless pool['provider'] == provider_name
|
561
|
+
next unless pool['provider'] == provider_name.to_s
|
561
562
|
|
562
563
|
folder_parts = pool['folder'].split('/')
|
563
|
-
datacenter =
|
564
|
+
datacenter = $providers[provider_name.to_s].get_target_datacenter_from_config(pool['name'])
|
564
565
|
folders[folder_parts.pop] = "#{datacenter}/vm/#{folder_parts.join('/')}"
|
565
566
|
end
|
566
567
|
folders
|
@@ -574,8 +575,9 @@ module Vmpooler
|
|
574
575
|
base.uniq
|
575
576
|
end
|
576
577
|
|
577
|
-
def purge_vms_and_folders(
|
578
|
-
|
578
|
+
def purge_vms_and_folders(provider_name)
|
579
|
+
provider = $providers[provider_name.to_s]
|
580
|
+
configured_folders = pool_folders(provider_name)
|
579
581
|
base_folders = get_base_folders(configured_folders)
|
580
582
|
whitelist = provider.provider_config['folder_whitelist']
|
581
583
|
provider.purge_unconfigured_folders(base_folders, configured_folders, whitelist)
|
data/lib/vmpooler/version.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: vmpooler
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.14.
|
4
|
+
version: 0.14.5
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Puppet
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2020-
|
11
|
+
date: 2020-08-21 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: pickup
|